From 81c667b58e78243df38dc2d7311cb285f7c1d4f4 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 22 Dec 2021 06:17:14 +0000 Subject: [PATCH 01/56] Additional networking metrics (#2549) Adds additional metrics for network monitoring and evaluation. Co-authored-by: Mark Mackey --- Cargo.lock | 773 ++++++++++-------- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/client/Cargo.toml | 2 +- beacon_node/client/src/builder.rs | 31 +- beacon_node/execution_layer/Cargo.toml | 2 +- beacon_node/http_api/src/lib.rs | 17 + beacon_node/http_metrics/src/lib.rs | 2 + beacon_node/http_metrics/src/metrics.rs | 7 + beacon_node/http_metrics/tests/tests.rs | 1 + beacon_node/lighthouse_network/Cargo.toml | 13 +- .../lighthouse_network/src/behaviour/mod.rs | 53 +- .../lighthouse_network/src/discovery/enr.rs | 4 +- .../lighthouse_network/src/discovery/mod.rs | 3 +- beacon_node/lighthouse_network/src/lib.rs | 9 +- beacon_node/lighthouse_network/src/metrics.rs | 91 ++- .../src/peer_manager/config.rs | 3 + .../src/peer_manager/mod.rs | 161 +++- .../src/peer_manager/network_behaviour.rs | 32 +- .../src/peer_manager/peerdb/client.rs | 4 +- .../src/peer_manager/peerdb/peer_info.rs | 18 - beacon_node/lighthouse_network/src/service.rs | 29 +- .../lighthouse_network/tests/common/mod.rs | 23 +- beacon_node/network/src/metrics.rs | 638 ++------------- beacon_node/network/src/service.rs | 42 +- beacon_node/network/src/service/tests.rs | 7 +- beacon_node/store/Cargo.toml | 2 +- common/lighthouse_metrics/src/lib.rs | 6 + lcli/src/generate_bootnode_enr.rs | 2 +- slasher/Cargo.toml | 2 +- 29 files changed, 849 insertions(+), 1130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4fe2b3573fc..b7a14e1735f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,15 +130,6 @@ name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -150,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.47" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d9ff5d688f1c13395289f67db01d4826b46dd694e7580accdc3e8430f2d98e" +checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" [[package]] name = "arbitrary" @@ -195,9 +186,9 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -291,7 +282,7 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec 0.19.5", + "bitvec 0.19.6", "bls", "derivative", "environment", @@ -380,9 +371,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -415,9 +406,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +checksum = "55f93d0ef3363c364d5976646a38f04cf67cfe1d4c8d160cdea02cab2c116b33" dependencies = [ "funty", "radium 0.5.3", @@ -444,7 +435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -458,6 +449,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +dependencies = [ + "generic-array", +] + [[package]] name = "block-padding" version = "0.2.1" @@ -631,11 +631,11 @@ checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cexpr" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 6.1.2", + "nom 7.1.0", ] [[package]] @@ -704,11 +704,11 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.3" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term 0.11.0", + "ansi_term", "atty", "bitflags", "strsim 0.8.0", @@ -807,6 +807,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "convert_case" version = "0.4.0" @@ -849,9 +855,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if", ] @@ -942,6 +948,27 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +dependencies = [ + "generic-array", +] + [[package]] name = "crypto-mac" version = "0.8.0" @@ -970,7 +997,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -995,11 +1022,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377c9b002a72a0b2c1a18c62e2f3864bdfea4a015e3683a96e24aa45dd6c02d1" +checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" dependencies = [ - "nix 0.22.2", + "nix 0.23.1", "winapi", ] @@ -1010,7 +1037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -1018,9 +1045,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" +checksum = "d0d720b8683f8dd83c65155f0530560cba68cd2bf395f6513a483caee57ff7f4" dependencies = [ "darling_core", "darling_macro", @@ -1028,9 +1055,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +checksum = "7a340f241d2ceed1deb47ae36c4144b2707ec7dd0b649f894cb39bb595986324" dependencies = [ "fnv", "ident_case", @@ -1042,9 +1069,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" +checksum = "72c41b3b7352feb3211a0d743dc5700a4e3b60f51bd2b368892d1e0f9a95f44b" dependencies = [ "darling_core", "quote", @@ -1087,7 +1114,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2", + "sha2 0.9.8", "tree_hash", "types", ] @@ -1098,10 +1125,19 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ - "const-oid", + "const-oid 0.5.2", "typenum", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1146,6 +1182,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +dependencies = [ + "block-buffer 0.10.0", + "crypto-common", + "generic-array", +] + [[package]] name = "directory" version = "0.1.0" @@ -1198,14 +1245,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0-beta.11" +version = "0.1.0-beta.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4968631f2eb03ef8dff74fe355440bcf4bd1c514c4326325fc739640c4ec53" +checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" dependencies = [ "aes", "aes-gcm", "arrayvec 0.7.2", - "digest", + "digest 0.10.1", "enr", "fnv", "futures", @@ -1213,12 +1260,12 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core 0.29.0", + "libp2p-core 0.30.0", "lru", "parking_lot", "rand 0.8.4", "rlp 0.5.1", - "sha2", + "sha2 0.9.8", "smallvec", "tokio", "tokio-stream", @@ -1241,12 +1288,24 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ - "der", - "elliptic-curve", + "der 0.3.5", + "elliptic-curve 0.9.12", "hmac 0.11.0", "signature", ] +[[package]] +name = "ecdsa" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ae02c7618ee05108cd86a0be2f5586d1f0d965bede7ecfd46815f1b860227" +dependencies = [ + "der 0.5.1", + "elliptic-curve 0.11.6", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.3.0" @@ -1266,7 +1325,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.8", "zeroize", ] @@ -1313,20 +1372,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", - "ff", + "ff 0.9.0", "generic-array", - "group", + "group 0.9.0", "pkcs8", "rand_core 0.6.3", "subtle", "zeroize", ] +[[package]] +name = "elliptic-curve" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "decb3a27ea454a5f23f96eb182af0671c12694d64ecc33dada74edd1301f6cfc" +dependencies = [ + "crypto-bigint", + "der 0.5.1", + "ff 0.11.0", + "generic-array", + "group 0.11.0", + "rand_core 0.6.3", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" -version = "0.8.29" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if", ] @@ -1500,7 +1576,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2", + "sha2 0.9.8", "wasm-bindgen-test", ] @@ -1513,7 +1589,7 @@ dependencies = [ "cpufeatures 0.1.5", "lazy_static", "ring", - "sha2", + "sha2 0.9.8", ] [[package]] @@ -1539,7 +1615,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2", + "sha2 0.9.8", "zeroize", ] @@ -1558,7 +1634,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2", + "sha2 0.9.8", "tempfile", "unicode-normalization", "uuid", @@ -1826,6 +1902,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +dependencies = [ + "rand_core 0.6.3", + "subtle", +] + [[package]] name = "ffi-opaque" version = "2.0.1" @@ -1874,12 +1960,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - [[package]] name = "fixedbitset" version = "0.4.0" @@ -1960,9 +2040,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -1975,9 +2055,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -1985,15 +2065,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -2003,18 +2083,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg 1.0.1", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -2027,21 +2105,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d383f0425d991a05e564c2f3ec150bd6dde863179c131dd60d8aa73a05434461" dependencies = [ "futures-io", - "rustls 0.20.1", + "rustls 0.20.2", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-timer" @@ -2051,11 +2129,10 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg 1.0.1", "futures-channel", "futures-core", "futures-io", @@ -2065,8 +2142,6 @@ dependencies = [ "memchr", "pin-project-lite 0.2.7", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -2175,16 +2250,27 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff 0.11.0", "rand_core 0.6.3", "subtle", ] [[package]] name = "h2" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" dependencies = [ "bytes", "fnv", @@ -2293,7 +2379,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" dependencies = [ - "digest", + "digest 0.9.0", "hmac 0.11.0", ] @@ -2304,7 +2390,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", ] [[package]] @@ -2314,7 +2400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac 0.11.1", - "digest", + "digest 0.9.0", ] [[package]] @@ -2323,7 +2409,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "hmac 0.8.1", ] @@ -2347,7 +2433,7 @@ checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.8", ] [[package]] @@ -2433,9 +2519,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.15" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436ec0091e4f20e655156a30a0df3770fe2900aa301e548e08446ec794b6953c" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -2446,7 +2532,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 0.4.8", "pin-project-lite 0.2.7", "socket2 0.4.2", "tokio", @@ -2496,6 +2582,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "if-addrs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "if-addrs-sys" version = "0.3.2" @@ -2641,9 +2737,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -2654,6 +2750,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "js-sys" version = "0.3.55" @@ -2685,9 +2787,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", - "sha2", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", + "sha2 0.9.8", ] [[package]] @@ -2769,9 +2871,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.107" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "libflate" @@ -2827,9 +2929,8 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782229f90bf7d5b12ee3ee08f7e160ba99f0d75eee7d118d9c1a688b13f6e64a" +version = "0.42.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "atomic", "bytes", @@ -2838,7 +2939,7 @@ dependencies = [ "getrandom 0.2.3", "instant", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -2860,9 +2961,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" +checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" dependencies = [ "asn1_der", "bs58", @@ -2872,19 +2973,19 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "libsecp256k1 0.5.0", + "libsecp256k1 0.7.0", "log", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.10.4", "parking_lot", "pin-project 1.0.8", - "prost 0.8.0", - "prost-build 0.8.0", - "rand 0.7.3", + "prost", + "prost-build", + "rand 0.8.4", "ring", "rw-stream-sink", - "sha2", + "sha2 0.9.8", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -2894,9 +2995,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asn1_der", "bs58", @@ -2905,20 +3005,22 @@ dependencies = [ "fnv", "futures", "futures-timer", + "instant", "lazy_static", "libsecp256k1 0.7.0", "log", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.11.0", + "p256", "parking_lot", "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.8.4", "ring", "rw-stream-sink", - "sha2", + "sha2 0.10.0", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -2928,12 +3030,11 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8f89d15cb6e3c5bc22afff7513b11bab7856f2872d3cfba86f7f63a06bc498" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "smallvec", "trust-dns-resolver", @@ -2941,9 +3042,8 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98942284cc1a91f24527a8b1e5bc06f7dd22fc6cee5be3d9bf5785bf902eb934" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -2954,44 +3054,42 @@ dependencies = [ "futures-timer", "hex_fmt", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "open-metrics-client", "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.7.3", "regex", - "sha2", + "sha2 0.10.0", "smallvec", "unsigned-varint 0.7.1", ] [[package]] name = "libp2p-identify" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec6d59e3f88435a83797fc3734f18385f6f54e0fe081e12543573364687c7db5" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "lru", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "smallvec", ] [[package]] name = "libp2p-metrics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59f3be49edeecff13ef0d0dc28295ba4a33910611715f04236325d08e4119e0" +version = "0.3.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3000,14 +3098,13 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2cd64ef597f40e14bfce0497f50ecb63dd6d201c61796daeb4227078834fbf" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "nohash-hasher", "parking_lot", @@ -3018,20 +3115,19 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8772c7a99088221bb7ca9c5c0574bf55046a7ab4c319f3619b275f28c8fb87a" +version = "0.34.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "bytes", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.8.4", - "sha2", + "sha2 0.10.0", "snow", "static_assertions", "x25519-dalek", @@ -3040,32 +3136,30 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba1a6ff33e4a274c89a3b1d78b9f34f32af13265cc5c46c16938262d4e945a" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb84d40627cd109bbbf43da9269d4ef75903f42356c88d98b2b55c47c430c792" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-timer", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "rand 0.7.3", "smallvec", @@ -3074,9 +3168,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd93a7dad9b61c39797572e4fb4fdba8415d6348b4e745b3d4cb008f84331ab" +version = "0.26.1" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "quote", "syn", @@ -3084,16 +3177,15 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7399c5b6361ef525d41c11fcf51635724f832baf5819b30d3d873eabb4fbae4b" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "if-addrs", + "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "socket2 0.4.2", "tokio", @@ -3101,14 +3193,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa92005fbd67695715c821e1acfe4d7be9fd2d88738574e93d645c49ec2831c8" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "quicksink", "rw-stream-sink", @@ -3119,36 +3210,16 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7362abb8867d7187e7e93df17f460d554c997fc5c8ac57dc1259057f6889af" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "parking_lot", "thiserror", "yamux", ] -[[package]] -name = "libsecp256k1" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.6.0" @@ -3157,14 +3228,14 @@ checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" dependencies = [ "arrayref", "base64 0.12.3", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.2.2", "libsecp256k1-gen-ecmult 0.2.1", "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.8", "typenum", ] @@ -3176,14 +3247,14 @@ checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", "base64 0.13.0", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.3.0", "libsecp256k1-gen-ecmult 0.3.0", "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", - "sha2", + "sha2 0.9.8", "typenum", ] @@ -3194,7 +3265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3205,7 +3276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3330,12 +3401,13 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", + "open-metrics-client", "parking_lot", "rand 0.7.3", "regex", "serde", "serde_derive", - "sha2", + "sha2 0.9.8", "slog", "slog-async", "slog-term", @@ -3408,9 +3480,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.6" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +checksum = "469898e909a1774d844793b347135a0cd344ca2f69d082013ecb8061a2229a3a" dependencies = [ "hashbrown", ] @@ -3464,9 +3536,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -3497,9 +3569,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg 1.0.1", ] @@ -3544,6 +3616,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.4.4" @@ -3620,10 +3698,10 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "multihash-derive", - "sha2", + "sha2 0.9.8", "unsigned-varint 0.7.1", ] @@ -3697,6 +3775,19 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multistream-select" +version = "0.11.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project 1.0.8", + "smallvec", + "unsigned-varint 0.7.1", +] + [[package]] name = "native-tls" version = "0.2.8" @@ -3730,7 +3821,7 @@ dependencies = [ "genesis", "hashset_delay", "hex", - "if-addrs", + "if-addrs 0.6.7", "igd", "itertools", "lazy_static", @@ -3772,9 +3863,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.22.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3bb9a13fa32bc5aeb64150cd3f32d6cf4c748f8f8a417cce5d2eb976a8370ba" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", @@ -3811,13 +3902,12 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "6.1.2" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ - "bitvec 0.19.5", - "funty", "memchr", + "minimal-lexical", "version_check", ] @@ -3892,9 +3982,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -3911,9 +4001,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "oorandom" @@ -3929,12 +4019,12 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "open-metrics-client" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7337d80c23c2d8b1349563981bc4fb531220733743ba8115454a67b181173f0d" +checksum = "9e224744b2e4da5b241857d2363a13bce60425f7b6ae2a5ff88d4d5557d9cc85" dependencies = [ "dtoa", - "itoa", + "itoa 0.4.8", "open-metrics-client-derive-text-encode", "owning_ref", ] @@ -4022,6 +4112,18 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "p256" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0e0c5310031b5d4528ac6534bccc1446c289ac45c47b277d5aa91089c5f74fa" +dependencies = [ + "ecdsa 0.13.3", + "elliptic-curve 0.11.6", + "sec1", + "sha2 0.9.8", +] + [[package]] name = "parity-scale-codec" version = "1.3.7" @@ -4130,23 +4232,13 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset 0.2.0", - "indexmap", -] - [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset 0.4.0", + "fixedbitset", "indexmap", ] @@ -4214,15 +4306,15 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ - "der", + "der 0.3.5", "spki", ] [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "platforms" @@ -4366,17 +4458,11 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" dependencies = [ "unicode-xid", ] @@ -4408,16 +4494,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "prost" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" -dependencies = [ - "bytes", - "prost-derive 0.8.0", -] - [[package]] name = "prost" version = "0.9.0" @@ -4425,25 +4501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive 0.9.0", -] - -[[package]] -name = "prost-build" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" -dependencies = [ - "bytes", - "heck", - "itertools", - "log", - "multimap", - "petgraph 0.5.1", - "prost 0.8.0", - "prost-types 0.8.0", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -4458,27 +4516,14 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph 0.6.0", - "prost 0.9.0", - "prost-types 0.9.0", + "petgraph", + "prost", + "prost-types", "regex", "tempfile", "which", ] -[[package]] -name = "prost-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.9.0" @@ -4492,16 +4537,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" -dependencies = [ - "bytes", - "prost 0.8.0", -] - [[package]] name = "prost-types" version = "0.9.0" @@ -4509,7 +4544,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost 0.9.0", + "prost", ] [[package]] @@ -4807,9 +4842,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" +checksum = "7c4e0a76dc12a116108933f6301b95e83634e0c47b0afbed6abbaa0601e99258" dependencies = [ "base64 0.13.0", "bytes", @@ -4833,6 +4868,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-util", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -4850,6 +4886,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -4975,9 +5022,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac4581f0fc0e0efd529d069e8189ec7b90b8e7680e21beb35141bdc45f36040" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ "log", "ring", @@ -4987,9 +5034,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "rw-stream-sink" @@ -5004,9 +5051,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safe_arith" @@ -5076,7 +5123,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "salsa20", - "sha2", + "sha2 0.9.8", ] [[package]] @@ -5099,6 +5146,18 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +dependencies = [ + "der 0.5.1", + "generic-array", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1" version = "0.20.3" @@ -5189,9 +5248,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" dependencies = [ "serde_derive", ] @@ -5208,9 +5267,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" dependencies = [ "proc-macro2", "quote", @@ -5219,11 +5278,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.71" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" +checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -5246,19 +5305,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] @@ -5269,10 +5328,10 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -5282,21 +5341,32 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900d964dd36bb15bcf2f2b35694c072feab74969a54f2bbeec7a2d725d2bdcb6" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.1", + "digest 0.10.1", +] + [[package]] name = "sha3" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", "opaque-debug", ] @@ -5331,7 +5401,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" dependencies = [ - "digest", + "digest 0.9.0", "rand_core 0.6.3", ] @@ -5557,7 +5627,7 @@ dependencies = [ "rand_core 0.6.3", "ring", "rustc_version 0.3.3", - "sha2", + "sha2 0.9.8", "subtle", "x25519-dalek", ] @@ -5626,7 +5696,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ - "der", + "der 0.3.5", ] [[package]] @@ -5960,7 +6030,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2", + "sha2 0.9.8", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -6012,11 +6082,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg 1.0.1", "bytes", "libc", "memchr", @@ -6042,9 +6111,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -6195,36 +6264,22 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "245da694cc7fc4729f3f418b304cb57789f1bed2a78c575407ab8a23f53cb4d3" dependencies = [ - "ansi_term 0.12.1", - "chrono", + "ansi_term", "lazy_static", "matchers", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index c4bd3bf7b54..d4e187bd8d3 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -43,7 +43,7 @@ genesis = { path = "../genesis" } int_to_bytes = { path = "../../consensus/int_to_bytes" } rand = "0.7.3" proto_array = { path = "../../consensus/proto_array" } -lru = "0.6.0" +lru = "0.7.1" tempfile = "3.1.0" bitvec = "0.19.3" bls = { path = "../../crypto/bls" } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d2e673f6071..acb8376dbda 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,7 +31,7 @@ task_executor = { path = "../../common/task_executor" } environment = { path = "../../lighthouse/environment" } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -time = "0.3.3" +time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 30bc34dda49..d497af6485c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -18,7 +18,7 @@ use eth2::{ }; use execution_layer::ExecutionLayer; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; -use lighthouse_network::NetworkGlobals; +use lighthouse_network::{open_metrics_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use slasher::Slasher; @@ -65,6 +65,7 @@ pub struct ClientBuilder { eth1_service: Option, network_globals: Option>>, network_send: Option>>, + gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, http_api_config: http_api::Config, @@ -96,6 +97,7 @@ where eth1_service: None, network_globals: None, network_send: None, + gossipsub_registry: None, db_path: None, freezer_db_path: None, http_api_config: <_>::default(), @@ -448,13 +450,27 @@ where .ok_or("network requires a runtime_context")? .clone(); - let (network_globals, network_send) = - NetworkService::start(beacon_chain, config, context.executor) - .await - .map_err(|e| format!("Failed to start network: {:?}", e))?; + // If gossipsub metrics are required we build a registry to record them + let mut gossipsub_registry = if config.metrics_enabled { + Some(Registry::default()) + } else { + None + }; + + let (network_globals, network_send) = NetworkService::start( + beacon_chain, + config, + context.executor, + gossipsub_registry + .as_mut() + .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + ) + .await + .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); self.network_send = Some(network_send); + self.gossipsub_registry = gossipsub_registry; Ok(self) } @@ -562,13 +578,13 @@ where Ok(self) } - /// Consumers the builder, returning a `Client` if all necessary components have been + /// Consumes the builder, returning a `Client` if all necessary components have been /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] pub fn build( - self, + mut self, ) -> Result>, String> { let runtime_context = self @@ -615,6 +631,7 @@ where chain: self.beacon_chain.clone(), db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), + gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new), log: log.clone(), }); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index ea09b1f7c71..c166024c060 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -23,7 +23,7 @@ bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" eth2_ssz_types = "0.2.2" -lru = "0.6.0" +lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" tree_hash_derive = { path = "../../consensus/tree_hash_derive"} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4df5c940b9e..85c464466c1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2275,6 +2275,22 @@ pub fn serve( }) }); + // GET lighthouse/nat + let get_lighthouse_nat = warp::path("lighthouse") + .and(warp::path("nat")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0, + )) + }) + }); + // GET lighthouse/peers let get_lighthouse_peers = warp::path("lighthouse") .and(warp::path("peers")) @@ -2622,6 +2638,7 @@ pub fn serve( .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) .or(get_lighthouse_peers_connected.boxed()) .or(get_lighthouse_proto_array.boxed()) diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index 66c7a6a6f69..89e6a8e2d10 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -4,6 +4,7 @@ mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_network::open_metrics_client::registry::Registry; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; use slog::{crit, info, Logger}; @@ -39,6 +40,7 @@ pub struct Context { pub chain: Option>>, pub db_path: Option, pub freezer_db_path: Option, + pub gossipsub_registry: Option>, pub log: Logger, } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index c86211f3135..66c961956c8 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,7 @@ use crate::Context; use beacon_chain::BeaconChainTypes; use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_network::open_metrics_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; pub use lighthouse_metrics::*; @@ -51,6 +52,12 @@ pub fn gather_prometheus_metrics( encoder .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); + // encode gossipsub metrics also if they exist + if let Some(registry) = ctx.gossipsub_registry.as_ref() { + if let Ok(registry_locked) = registry.lock() { + let _ = encode(&mut buffer, ®istry_locked); + } + } String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index 633b81115f3..fd8733cfe50 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -25,6 +25,7 @@ async fn returns_200_ok() { chain: None, db_path: None, freezer_db_path: None, + gossipsub_registry: None, log, }); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 7dcccd8ca2b..e148ae2db37 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { version = "0.1.0-beta.11", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } @@ -25,7 +25,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" -lru = "0.6.0" +lru = "0.7.1" parking_lot = "0.11.0" sha2 = "0.9.1" snap = "1.0.1" @@ -38,18 +38,21 @@ directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } superstruct = "0.3.0" +open-metrics-client = "0.13.0" [dependencies.libp2p] -version = "0.41.0" +# version = "0.41.0" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] +git = "https://github.com/libp2p/rust-libp2p" +# Latest libp2p master +rev = "17861d9cac121f7e448585a7f052d5eab4618826" +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext"] [dev-dependencies] slog-term = "2.6.0" slog-async = "2.5.0" tempfile = "3.1.0" exit-future = "0.2.0" -libp2p = { version = "0.41.0", default-features = false, features = ["plaintext"] } void = "1" [features] diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 51699d236f7..f14d24aac49 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -8,18 +8,19 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::rpc::*; -use crate::service::METADATA_FILENAME; +use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; -use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use libp2p::{ core::{ connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, }, gossipsub::{ + metrics::Config as GossipsubMetricsConfig, subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, @@ -45,7 +46,7 @@ use std::{ task::{Context, Poll}, }; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, ChainSpec, EnrForkId, EthSpec, ForkContext, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, }; @@ -182,14 +183,14 @@ pub struct Behaviour { impl Behaviour { pub async fn new( local_key: &Keypair, - mut config: NetworkConfig, + ctx: ServiceContext<'_>, network_globals: Arc>, log: &slog::Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); + let mut config = ctx.config.clone(); + // Set up the Identify Behaviour let identify_config = if config.private { IdentifyConfig::new( @@ -215,25 +216,29 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = fork_context.all_fork_digests(); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { filter: Self::create_whitelist_filter( possible_fork_digests, - chain_spec.attestation_subnet_count, + ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, ), max_subscribed_topics: 200, max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(fork_context.clone()); + config.gs_config = gossipsub_config(ctx.fork_context.clone()); + + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); - // Build and configure the Gossipsub behaviour let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, config.gs_config.clone(), - None, // No metrics for the time being + gossipsub_metrics, filter, snappy_transform, ) @@ -246,7 +251,7 @@ impl Behaviour { let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(chain_spec, &config.gs_config); + let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); // Prepare scoring parameters let params = score_settings.get_peer_score_params( @@ -267,6 +272,7 @@ impl Behaviour { let peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, ..Default::default() }; @@ -274,7 +280,7 @@ impl Behaviour { Ok(Behaviour { // Sub-behaviours gossipsub, - eth2_rpc: RPC::new(fork_context.clone(), log.clone()), + eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), discovery, identify: Identify::new(identify_config), // Auxiliary fields @@ -287,7 +293,7 @@ impl Behaviour { network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, - fork_context, + fork_context: ctx.fork_context, update_gossipsub_scores, }) } @@ -393,14 +399,15 @@ impl Behaviour { .remove(&topic); // unsubscribe from the topic - let topic: Topic = topic.into(); + let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&topic) { + match self.gossipsub.unsubscribe(&libp2p_topic) { Err(_) => { - warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %topic); + warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false } Ok(v) => { + // Inform the network debug!(self.log, "Unsubscribed to topic"; "topic" => %topic); v } @@ -732,6 +739,18 @@ impl Behaviour { /// Convenience function to propagate a request. fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + // Increment metrics + match &request { + Request::Status(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) + } + Request::BlocksByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) + } + Request::BlocksByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) + } + } self.add_event(BehaviourEvent::RequestReceived { peer_id, id, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f2ae759b79..1d542a7f393 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -127,7 +127,7 @@ pub fn use_or_load_enr( pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, log: &slog::Logger, ) -> Result { // Build the local ENR. @@ -163,7 +163,7 @@ pub fn create_enr_builder_from_config( pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, ) -> Result { let mut builder = create_enr_builder_from_config(config, true); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index ae7335b5caa..33e8c2c1704 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1039,6 +1039,7 @@ impl NetworkBehaviour for Discovery { Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); + metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. let enr = self.discv5.local_enr(); @@ -1096,7 +1097,7 @@ mod tests { ..Default::default() }; let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); - let enr: Enr = build_enr::(&enr_key, &config, EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 058b38ceb56..0460a42c8a9 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,7 +10,7 @@ mod config; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; -mod metrics; +pub mod metrics; pub mod peer_manager; pub mod rpc; mod service; @@ -66,13 +66,16 @@ pub use crate::types::{ error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, }; + +pub use open_metrics_client; + pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use libp2p; pub use libp2p::bandwidth::BandwidthSinks; -pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash}; +pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; pub use metrics::scrape_discovery_metrics; @@ -82,4 +85,4 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 4767f287f4c..b8fd8c58483 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,16 +1,19 @@ pub use lighthouse_metrics::*; lazy_static! { + pub static ref NAT_OPEN: Result = try_create_int_counter( + "nat_open", + "An estimate indicating if the local node is exposed to the internet." + ); pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( "libp2p_address_update_total", "Count of libp2p socked updated events (when our view of our IP address has changed)" ); pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( - "libp2p_peer_connected_peers_total", + "libp2p_peers", "Count of libp2p peers currently connected" ); - pub static ref PEERS_CONNECTED_INTEROP: Result = - try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected"); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" @@ -19,6 +22,14 @@ lazy_static! { "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); + pub static ref DISCOVERY_SENT_BYTES: Result = try_create_int_gauge( + "discovery_sent_bytes", + "The number of bytes sent in discovery" + ); + pub static ref DISCOVERY_RECV_BYTES: Result = try_create_int_gauge( + "discovery_recv_bytes", + "The number of bytes received in discovery" + ); pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( "discovery_queue_size", "The number of discovery queries awaiting execution" @@ -31,11 +42,7 @@ lazy_static! { "discovery_sessions", "The number of active discovery sessions with peers" ); - pub static ref DISCOVERY_REQS_IP: Result = try_create_float_gauge_vec( - "discovery_reqs_per_ip", - "Unsolicited discovery requests per ip per second", - &["Addresses"] - ); + pub static ref PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "libp2p_peers_per_client", "The connected peers via client implementation", @@ -57,6 +64,11 @@ lazy_static! { "RPC errors per client", &["client", "rpc_error", "direction"] ); + pub static ref TOTAL_RPC_REQUESTS: Result = try_create_int_counter_vec( + "libp2p_rpc_requests_total", + "RPC requests total", + &["type"] + ); pub static ref PEER_ACTION_EVENTS_PER_CLIENT: Result = try_create_int_counter_vec( "libp2p_peer_actions_per_client", @@ -69,26 +81,57 @@ lazy_static! { "Gossipsub messages that we did not accept, per client", &["client", "validation_result"] ); + + pub static ref PEER_SCORE_DISTRIBUTION: Result = + try_create_int_gauge_vec( + "peer_score_distribution", + "The distribution of connected peer scores", + &["position"] + ); + + pub static ref PEER_SCORE_PER_CLIENT: Result = + try_create_float_gauge_vec( + "peer_score_per_client", + "Average score per client", + &["client"] + ); + + /* + * Inbound/Outbound peers + */ + /// The number of peers that dialed us. + pub static ref NETWORK_INBOUND_PEERS: Result = + try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us."); + + /// The number of peers that we dialed us. + pub static ref NETWORK_OUTBOUND_PEERS: Result = + try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); +} + +/// Checks if we consider the NAT open. +/// +/// Conditions for an open NAT: +/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of +/// users reporting an external port and our ENR gets updated. +/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we +/// rely on whether we have any inbound messages. If we have no socket update messages, but +/// manage to get at least one inbound peer, we are exposed correctly. +pub fn check_nat() { + // NAT is already deemed open. + if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { + return; + } + if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) == 0 + || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 + { + inc_counter(&NAT_OPEN); + } } pub fn scrape_discovery_metrics() { let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); - set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); - set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); - - let process_gauge_vec = |gauge: &Result, metrics: discv5::metrics::Metrics| { - if let Ok(gauge_vec) = gauge { - gauge_vec.reset(); - for (ip, value) in metrics.requests_per_ip_per_second.iter() { - if let Ok(metric) = gauge_vec.get_metric_with_label_values(&[&format!("{:?}", ip)]) - { - metric.set(*value); - } - } - } - }; - - process_gauge_vec(&DISCOVERY_REQS_IP, metrics); + set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); + set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index aef8f96504c..6c5523de454 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -16,6 +16,8 @@ pub struct Config { /* Peer count related configurations */ /// Whether discovery is enabled. pub discovery_enabled: bool, + /// Whether metrics are enabled. + pub metrics_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -34,6 +36,7 @@ impl Default for Config { fn default() -> Self { Config { discovery_enabled: true, + metrics_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 8695d149696..202738c25f9 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -8,13 +8,14 @@ use crate::{Subnet, SubnetDiscovery}; use discv5::Enr; use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; -use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; +use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use slog::{debug, error, warn}; use smallvec::SmallVec; use std::{ sync::Arc, time::{Duration, Instant}, }; +use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -71,6 +72,8 @@ pub struct PeerManager { heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. discovery_enabled: bool, + /// Keeps track if the current instance is reporting metrics or not. + metrics_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } @@ -111,6 +114,7 @@ impl PeerManager { ) -> error::Result { let config::Config { discovery_enabled, + metrics_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -130,6 +134,7 @@ impl PeerManager { sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, + metrics_enabled, log: log.clone(), }) } @@ -378,19 +383,21 @@ impl PeerManager { "protocols" => ?info.protocols ); - // update the peer client kind metric - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&peer_info.client().kind.to_string()], + // update the peer client kind metric if the peer is connected + if matches!( + peer_info.connection_status(), + PeerConnectionStatus::Connected { .. } + | PeerConnectionStatus::Disconnecting { .. } ) { - v.inc() - }; - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&previous_kind.to_string()], - ) { - v.dec() - }; + metrics::inc_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&peer_info.client().kind.to_string()], + ); + metrics::dec_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&previous_kind.to_string()], + ); + } } } else { error!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string()); @@ -606,6 +613,46 @@ impl PeerManager { } } + // This function updates metrics for all connected peers. + fn update_connected_peer_metrics(&self) { + // Do nothing if we don't have metrics enabled. + if !self.metrics_enabled { + return; + } + + let mut connected_peer_count = 0; + let mut inbound_connected_peers = 0; + let mut outbound_connected_peers = 0; + let mut clients_per_peer = HashMap::new(); + + for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() { + connected_peer_count += 1; + if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() { + if *n_in > 0 { + inbound_connected_peers += 1; + } else { + outbound_connected_peers += 1; + } + } + *clients_per_peer + .entry(peer_info.client().kind.to_string()) + .or_default() += 1; + } + + metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count); + metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers); + metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers); + + for client_kind in ClientKind::iter() { + let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); + metrics::set_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&client_kind.to_string()], + *value as i64, + ); + } + } + /* Internal functions */ /// Sets a peer as connected as long as their reputation allows it @@ -705,22 +752,6 @@ impl PeerManager { // increment prometheus metrics metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); - - // Increment the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.inc() - }; - } true } @@ -802,6 +833,9 @@ impl PeerManager { self.handle_score_action(&peer_id, action, None); } + // Update peer score metrics; + self.update_peer_score_metrics(); + // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); @@ -840,6 +874,75 @@ impl PeerManager { self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); } } + + // Update metrics related to peer scoring. + fn update_peer_score_metrics(&self) { + if !self.metrics_enabled { + return; + } + // reset the gauges + let _ = metrics::PEER_SCORE_DISTRIBUTION + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::PEER_SCORE_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + + let mut avg_score_per_client: HashMap = HashMap::with_capacity(5); + { + let peers_db_read_lock = self.network_globals.peers.read(); + let connected_peers = peers_db_read_lock.best_peers_by_status(PeerInfo::is_connected); + let total_peers = connected_peers.len(); + for (id, (_peer, peer_info)) in connected_peers.into_iter().enumerate() { + // First quartile + if id == 0 { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1st"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers * 3 / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["3/4"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 2).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/2"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/4"], + peer_info.score().score() as i64, + ); + } else if id == total_peers.saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["last"], + peer_info.score().score() as i64, + ); + } + + let mut score_peers: &mut (f64, usize) = avg_score_per_client + .entry(peer_info.client().kind.to_string()) + .or_default(); + score_peers.0 += peer_info.score().score(); + score_peers.1 += 1; + } + } // read lock ended + + for (client, (score, peers)) in avg_score_per_client { + metrics::set_float_gauge_vec( + &metrics::PEER_SCORE_PER_CLIENT, + &[&client.to_string()], + score / (peers as f64), + ); + } + } } enum ConnectingType { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a11f3739ea7..d194deffd4f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -111,8 +111,11 @@ impl NetworkBehaviour for PeerManager { endpoint: &ConnectedPoint, _failed_addresses: Option<&Vec>, ) { - // Log the connection debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint()); + // Check NAT if metrics are enabled + if self.network_globals.local_enr.read().udp().is_some() { + metrics::check_nat(); + } // Check to make sure the peer is not supposed to be banned match self.ban_status(peer_id) { @@ -150,10 +153,8 @@ impl NetworkBehaviour for PeerManager { return; } - // Register the newly connected peer (regardless if we are about to disconnect them). // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. - // let enr match endpoint { ConnectedPoint::Listener { send_back_addr, .. } => { self.inject_connect_ingoing(peer_id, send_back_addr.clone(), None); @@ -167,12 +168,9 @@ impl NetworkBehaviour for PeerManager { } } - let connected_peers = self.network_globals.connected_peers() as i64; - // increment prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_disconnected(&mut self, peer_id: &PeerId) { @@ -190,21 +188,6 @@ impl NetworkBehaviour for PeerManager { self.events .push(PeerManagerEvent::PeerDisconnected(*peer_id)); debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); - - // Decrement the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|info| info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.dec() - }; - } } // NOTE: It may be the case that a rejected node, due to too many peers is disconnected @@ -212,12 +195,9 @@ impl NetworkBehaviour for PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(peer_id); - let connected_peers = self.network_globals.connected_peers() as i64; - // Update the prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_address_change( diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 8f1738ac688..7cc84516a07 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -4,7 +4,7 @@ use libp2p::identify::IdentifyInfo; use serde::Serialize; -use strum::{AsRefStr, AsStaticStr}; +use strum::{AsRefStr, AsStaticStr, EnumIter}; /// Various client and protocol information related to a node. #[derive(Clone, Debug, Serialize)] @@ -21,7 +21,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr)] +#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr, EnumIter)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 3ff5dc04acf..941ca7e6c93 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -19,8 +19,6 @@ use PeerConnectionStatus::*; #[derive(Clone, Debug, Serialize)] #[serde(bound = "T: EthSpec")] pub struct PeerInfo { - /// The connection status of the peer - _status: PeerStatus, /// The peers reputation score: Score, /// Client managing this peer @@ -57,7 +55,6 @@ pub struct PeerInfo { impl Default for PeerInfo { fn default() -> PeerInfo { PeerInfo { - _status: Default::default(), score: Score::default(), client: Client::default(), connection_status: Default::default(), @@ -387,21 +384,6 @@ impl PeerInfo { } } -#[derive(Clone, Debug, Serialize)] -/// The current health status of the peer. -pub enum PeerStatus { - /// The peer is healthy. - Healthy, - /// The peer is clogged. It has not been responding to requests on time. - _Clogged, -} - -impl Default for PeerStatus { - fn default() -> Self { - PeerStatus::Healthy - } -} - /// Connection Direction of connection. #[derive(Debug, Clone, Serialize, AsRefStr)] #[strum(serialize_all = "snake_case")] diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 60252385d99..23c19829065 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -20,6 +20,7 @@ use libp2p::{ swarm::{SwarmBuilder, SwarmEvent}, PeerId, Swarm, Transport, }; +use open_metrics_client::registry::Registry; use slog::{crit, debug, info, o, trace, warn, Logger}; use ssz::Decode; use std::fs::File; @@ -62,27 +63,34 @@ pub struct Service { pub log: Logger, } +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + impl Service { pub async fn new( executor: task_executor::TaskExecutor, - config: &NetworkConfig, - enr_fork_id: EnrForkId, + ctx: Context<'_>, log: &Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result<(Arc>, Self)> { let log = log.new(o!("service"=> "libp2p")); trace!(log, "Libp2p Service starting"); + let config = ctx.config; // initialise the node's ID let local_keypair = load_private_key(config, &log); // Create an ENR or load from disk if appropriate let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, enr_fork_id, &log)?; + enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; let local_peer_id = enr.peer_id(); + // Construct the metadata let meta_data = load_or_build_metadata(&config.network_dir, &log); // set up a collection of variables accessible outside of the network crate @@ -113,15 +121,8 @@ impl Service { .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour - let behaviour = Behaviour::new( - &local_keypair, - config.clone(), - network_globals.clone(), - &log, - fork_context, - chain_spec, - ) - .await?; + let behaviour = + Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; // use the executor for libp2p struct Executor(task_executor::TaskExecutor); diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 520921e87b9..7397fe7ea98 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -128,19 +128,18 @@ pub async fn build_libp2p_instance( let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); - let fork_context = Arc::new(fork_context()); + let libp2p_context = lighthouse_network::Context { + config: &config, + enr_fork_id: EnrForkId::default(), + fork_context: Arc::new(fork_context()), + chain_spec: &ChainSpec::minimal(), + gossipsub_registry: None, + }; Libp2pInstance( - LibP2PService::new( - executor, - &config, - EnrForkId::default(), - &log, - fork_context, - &ChainSpec::minimal(), - ) - .await - .expect("should build libp2p instance") - .1, + LibP2PService::new(executor, libp2p_context, &log) + .await + .expect("should build libp2p instance") + .1, signal, ) } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 35c5b4dce14..a10d238764b 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -4,216 +4,42 @@ use beacon_chain::{ }; use fnv::FnvHashMap; pub use lighthouse_metrics::*; -use lighthouse_network::PubsubMessage; use lighthouse_network::{ - types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, + types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, }; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use strum::AsStaticRef; -use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, subnet_id::subnet_id_to_string, - sync_subnet_id::sync_subnet_id_to_string, EthSpec, -}; +use types::EthSpec; lazy_static! { - /* - * Gossip subnets and scoring - */ - pub static ref PEERS_PER_PROTOCOL: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_protocol", - "Peers via supported protocol", - &["protocol"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_attestation_subnets", - "Attestation subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_sync_subnets", - "Sync subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_attestation_subnet_topic_count", - "Peers subscribed per attestation subnet topic", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_sync_subnet_topic_count", - "Peers subscribed per sync subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_main_topic", - "Mesh peers per main topic", - &["topic_hash"] - ); - - pub static ref MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_topic", - "Average peer's score per topic", - &["topic_hash"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_attestation_subnet_topic", - "Average peer's score per attestation subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_sync_subnet_topic", - "Average peer's score per sync committee subnet topic", - &["subnet"] - ); - - pub static ref ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT: Result = try_create_int_counter_vec( - "gossipsub_attestations_published_per_subnet_per_slot", - "Failed attestation publishes per subnet", - &["subnet"] - ); - - pub static ref SCORES_BELOW_ZERO_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_zero_per_client", - "Relative number of scores below zero per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_gossip_threshold_per_client", - "Relative number of scores below gossip threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_publish_threshold_per_client", - "Relative number of scores below publish threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_greylist_threshold_per_client", - "Relative number of scores below greylist threshold per client", + pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = + try_create_int_gauge_vec( + "block_mesh_peers_per_client", + "Number of mesh peers for BeaconBlock topic per client", &["Client"] ); - pub static ref MIN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_min_scores_per_client", - "Minimum scores per client", - &["Client"] - ); - pub static ref MEDIAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_median_scores_per_client", - "Median scores per client", - &["Client"] - ); - pub static ref MEAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_mean_scores_per_client", - "Mean scores per client", - &["Client"] - ); - pub static ref MAX_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_max_scores_per_client", - "Max scores per client", - &["Client"] - ); - pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = - try_create_int_gauge_vec( - "block_mesh_peers_per_client", - "Number of mesh peers for BeaconBlock topic per client", - &["Client"] - ); pub static ref BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "beacon_aggregate_and_proof_mesh_peers_per_client", "Number of mesh peers for BeaconAggregateAndProof topic per client", &["Client"] ); -} - -lazy_static! { - /* - * Gossip Rx - */ - pub static ref GOSSIP_BLOCKS_RX: Result = try_create_int_counter( - "gossipsub_blocks_rx_total", - "Count of gossip blocks received" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_rx_total", - "Count of gossip unaggregated attestations received" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_rx_total", - "Count of gossip aggregated attestations received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_rx_total", - "Count of gossip sync committee messages received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_received_total", - "Count of gossip sync committee contributions received" - ); - - - /* - * Gossip Tx - */ - pub static ref GOSSIP_BLOCKS_TX: Result = try_create_int_counter( - "gossipsub_blocks_tx_total", - "Count of gossip blocks transmitted" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_tx_total", - "Count of gossip unaggregated attestations transmitted" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_tx_total", - "Count of gossip aggregated attestations transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_tx_total", - "Count of gossip sync committee messages transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_tx_total", - "Count of gossip sync committee contributions transmitted" - ); /* * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_attestation_subnet_subscriptions_total", + "validator_attestation_subnet_subscriptions_total", "Count of validator attestation subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( - "gossipsub_subnet_subscriptions_aggregator_total", + "validator_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); - - /* - * Sync committee subnet subscriptions - */ - pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_sync_committee_subnet_subscriptions_total", + pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( + "validator_sync_committee_subnet_subscriptions_total", "Count of validator sync committee subscription requests." ); @@ -406,14 +232,13 @@ lazy_static! { "beacon_processor_sync_contribution_verified_total", "Total number of sync committee contributions verified for gossip." ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_contribution_imported_total", "Total number of sync committee contributions imported to fork choice, etc." ); -} - -lazy_static! { + /// Errors and Debugging Stats pub static ref GOSSIP_ATTESTATION_ERRORS_PER_TYPE: Result = try_create_int_counter_vec( "gossipsub_attestation_errors_per_type", @@ -426,8 +251,16 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); +} + +lazy_static! { + + /* + * Bandwidth metrics + */ pub static ref INBOUND_LIBP2P_BYTES: Result = try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); + pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( "libp2p_outbound_bytes", "The outbound bandwidth over libp2p" @@ -436,18 +269,8 @@ lazy_static! { "libp2p_total_bandwidth", "The total inbound/outbound bandwidth over libp2p" ); -} -pub fn update_bandwidth_metrics(bandwidth: Arc) { - set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); - set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); - set_gauge( - &TOTAL_LIBP2P_BANDWIDTH, - (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, - ); -} -lazy_static! { /* * Sync related metrics */ @@ -489,11 +312,21 @@ lazy_static! { ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found" + "Number of queued attestations which have expired before a matching block has been found." ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported" + "Number of queued attestations where as matching block has been imported." + ); + +} + +pub fn update_bandwidth_metrics(bandwidth: Arc) { + set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); + set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); + set_gauge( + &TOTAL_LIBP2P_BANDWIDTH, + (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, ); } @@ -505,402 +338,51 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } -/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. -pub fn expose_publish_metrics(messages: &[PubsubMessage]) { - for message in messages { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_TX), - PubsubMessage::Attestation(subnet_id) => { - inc_counter_vec( - &ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, - &[subnet_id.0.as_ref()], - ); - inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::SyncCommitteeMessage(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_TX) - } - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX) - } - _ => {} - } - } -} - -/// Inspects a `message` received from the network and updates Prometheus metrics. -pub fn expose_receive_metrics(message: &PubsubMessage) { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_RX), - PubsubMessage::Attestation(_) => inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_RX), - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) - } - PubsubMessage::SyncCommitteeMessage(_) => inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_RX), - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX) - } - _ => {} - } -} - pub fn update_gossip_metrics( gossipsub: &Gossipsub, network_globals: &Arc>, ) { - // Clear the metrics - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = MESH_PEERS_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - - let _ = SCORES_BELOW_ZERO_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = MIN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEDIAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MAX_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - - let _ = BEACON_BLOCK_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - - // reset the mesh peers, showing all subnets - for subnet_id in 0..T::default_spec().attestation_subnet_count { - let _ = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - for subnet_id in 0..SYNC_COMMITTEE_SUBNET_COUNT { - let _ = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - // Subnet topics subscribed to + // Mesh peers per client for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - if let GossipKind::Attestation(subnet_id) = topic.kind() { - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) - .map(|v| v.set(1)); - } - } - } - - // Peers per subscribed subnet - let mut peers_per_topic: HashMap = HashMap::new(); - for (peer_id, topics) in gossipsub.all_peers() { - for topic_hash in topics { - *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; - - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { + match topic.kind() { + GossipKind::Attestation(_subnet_id) => {} + GossipKind::BeaconBlock => { + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); + if let Some(v) = + get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) + { v.inc() }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } } - GossipKind::SyncCommitteeMessage(subnet_id) => { + } + GossipKind::BeaconAggregateAndProof => { + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], + &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, + &[client], ) { v.inc() }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } - } - kind => { - // main topics - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.add(score) - }; - } } } + GossipKind::SyncCommitteeMessage(_subnet_id) => {} + _kind => {} } } } - // adjust to average scores by dividing by number of peers - for (topic_hash, peers) in peers_per_topic.iter() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - kind => { - // main topics - if let Some(v) = - get_gauge(&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, &[kind.as_ref()]) - { - v.set(v.get() / (*peers as f64)) - }; - } - } - } - } - - // mesh peers - for topic_hash in gossipsub.topics() { - let peers = gossipsub.mesh_peers(topic_hash).count(); - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - kind => { - // main topics - if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { - v.set(peers as i64) - }; - } - } - } - } - - // protocol peers - let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new(); - for (_peer, protocol) in gossipsub.peer_protocol() { - *peers_per_protocol - .entry(protocol.as_static_ref()) - .or_default() += 1; - } - - for (protocol, peers) in peers_per_protocol.iter() { - if let Some(v) = get_int_gauge(&PEERS_PER_PROTOCOL, &[protocol]) { - v.set(*peers) - }; - } - - let mut peer_to_client = HashMap::new(); - let mut scores_per_client: HashMap<&'static str, Vec> = HashMap::new(); - { - let peers = network_globals.peers.read(); - for (peer_id, _) in gossipsub.all_peers() { - let client = peers - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.as_static()) - .unwrap_or_else(|| "Unknown"); - - peer_to_client.insert(peer_id, client); - let score = gossipsub.peer_score(peer_id).unwrap_or(0.0); - scores_per_client.entry(client).or_default().push(score); - } - } - - // mesh peers per client - for topic_hash in gossipsub.topics() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::BeaconBlock => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = - get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) - { - v.inc() - }; - } - } - } - GossipKind::BeaconAggregateAndProof => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = get_int_gauge( - &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, - &[client], - ) { - v.inc() - }; - } - } - } - _ => (), - } - } - } - - for (client, scores) in scores_per_client.into_iter() { - let c = &[client]; - let len = scores.len(); - if len > 0 { - let mut below0 = 0; - let mut below_gossip_threshold = 0; - let mut below_publish_threshold = 0; - let mut below_greylist_threshold = 0; - let mut min = f64::INFINITY; - let mut sum = 0.0; - let mut max = f64::NEG_INFINITY; - - let count = scores.len() as f64; - - for &score in &scores { - if score < 0.0 { - below0 += 1; - } - if score < -4000.0 { - //TODO not hardcode - below_gossip_threshold += 1; - } - if score < -8000.0 { - //TODO not hardcode - below_publish_threshold += 1; - } - if score < -16000.0 { - //TODO not hardcode - below_greylist_threshold += 1; - } - if score < min { - min = score; - } - if score > max { - max = score; - } - sum += score; - } - - let median = if len == 0 { - 0.0 - } else if len % 2 == 0 { - (scores[len / 2 - 1] + scores[len / 2]) / 2.0 - } else { - scores[len / 2] - }; - - set_gauge_entry(&SCORES_BELOW_ZERO_PER_CLIENT, c, below0 as f64 / count); - set_gauge_entry( - &SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT, - c, - below_gossip_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT, - c, - below_publish_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT, - c, - below_greylist_threshold as f64 / count, - ); - - set_gauge_entry(&MIN_SCORES_PER_CLIENT, c, min); - set_gauge_entry(&MEDIAN_SCORES_PER_CLIENT, c, median); - set_gauge_entry(&MEAN_SCORES_PER_CLIENT, c, sum / count); - set_gauge_entry(&MAX_SCORES_PER_CLIENT, c, max); - } - } } pub fn update_sync_metrics(network_globals: &Arc>) { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ce8aca47250..485b0a98f5b 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -9,15 +9,18 @@ use crate::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use futures::future::OptionFuture; use futures::prelude::*; +use lighthouse_network::{ + open_metrics_client::registry::Registry, MessageAcceptance, Service as LibP2PService, +}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, + Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, BehaviourEvent, MessageId, NetworkGlobals, PeerId, }; -use lighthouse_network::{MessageAcceptance, Service as LibP2PService}; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; @@ -32,7 +35,7 @@ use types::{ mod tests; /// The interval (in seconds) that various network metrics will update. -const METRIC_UPDATE_INTERVAL: u64 = 1; +const METRIC_UPDATE_INTERVAL: u64 = 5; /// Number of slots before the fork when we should subscribe to the new fork topics. const SUBSCRIBE_DELAY_SLOTS: u64 = 2; /// Delay after a fork where we unsubscribe from pre-fork topics. @@ -154,6 +157,7 @@ impl NetworkService { beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, ) -> error::Result<( Arc>, mpsc::UnboundedSender>, @@ -199,16 +203,18 @@ impl NetworkService { debug!(network_log, "Current fork"; "fork_name" => ?fork_context.current_fork()); - // launch libp2p service - let (network_globals, mut libp2p) = LibP2PService::new( - executor.clone(), + // construct the libp2p service context + let service_context = Context { config, enr_fork_id, - &network_log, - fork_context.clone(), - &beacon_chain.spec, - ) - .await?; + fork_context: fork_context.clone(), + chain_spec: &beacon_chain.spec, + gossipsub_registry, + }; + + // launch libp2p service + let (network_globals, mut libp2p) = + LibP2PService::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -324,21 +330,13 @@ fn spawn_service( // spawn on the current executor executor.spawn(async move { - let mut metric_update_counter = 0; loop { // build the futures to check simultaneously tokio::select! { _ = service.metrics_update.tick(), if service.metrics_enabled => { // update various network metrics - metric_update_counter +=1; - if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 { - // if a slot has occurred, reset the metrics - let _ = metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT - .as_ref() - .map(|gauge| gauge.reset()); - } metrics::update_gossip_metrics::( - service.libp2p.swarm.behaviour_mut().gs(), + service.libp2p.swarm.behaviour().gs(), &service.network_globals, ); // update sync metrics @@ -445,7 +443,6 @@ fn spawn_service( "count" => messages.len(), "topics" => ?topic_kinds ); - metrics::expose_publish_metrics(&messages); service.libp2p.swarm.behaviour_mut().publish(messages); } NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), @@ -643,9 +640,6 @@ fn spawn_service( message, .. } => { - // Update prometheus metrics. - metrics::expose_receive_metrics(&message); - match message { // attestation information gets processed in the attestation service PubsubMessage::Attestation(ref subnet_and_attestation) => { diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 33b190e4808..d78b1fe4f80 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -67,9 +67,10 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let _network_service = NetworkService::start(beacon_chain.clone(), &config, executor) - .await - .unwrap(); + let _network_service = + NetworkService::start(beacon_chain.clone(), &config, executor, None) + .await + .unwrap(); drop(signal); }); diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 121e22fc659..66a6cf5d28c 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -22,6 +22,6 @@ serde = "1.0.116" serde_derive = "1.0.116" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.6.0" +lru = "0.7.1" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 4b7160ae05a..98973de1add 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -307,6 +307,12 @@ pub fn set_float_gauge(gauge: &Result, value: f64) { } } +pub fn set_float_gauge_vec(gauge_vec: &Result, name: &[&str], value: f64) { + if let Some(gauge) = get_gauge(gauge_vec, name) { + gauge.set(value); + } +} + pub fn inc_gauge(gauge: &Result) { if let Ok(gauge) = gauge { gauge.inc(); diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index cb65bb4380f..6f39392d121 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -39,7 +39,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 01beda7e9c0..c319c2de1a2 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -14,7 +14,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } mdbx = { package = "libmdbx", version = "0.1.0" } -lru = "0.6.6" +lru = "0.7.1" parking_lot = "0.11.0" rand = "0.7.3" safe_arith = { path = "../consensus/safe_arith" } From a0c5701e369c16a88ce623bc13a46c11c83aa465 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Wed, 22 Dec 2021 08:15:37 +0000 Subject: [PATCH 02/56] Only import blocks with valid execution payloads (#2869) ## Issue Addressed N/A ## Proposed Changes We are currently treating errors from the EL on `engine_executePayload` as `PayloadVerificationStatus::NotVerified`. This adds the block as a candidate head block in fork choice even if the EL explicitly rejected the block as invalid. `PayloadVerificationStatus::NotVerified` should be only returned when the EL explicitly returns "syncing" imo. This PR propagates an error instead of returning `NotVerified` on EL all EL errors. --- beacon_node/beacon_chain/src/execution_payload.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5896dbf3d8e..ed7095122a3 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -65,7 +65,7 @@ pub fn execute_payload( } ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), }, - Err(_) => Ok(PayloadVerificationStatus::NotVerified), + Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), } } From dfc8968201aefdf52d97ea899b8620d8e22d14a2 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 22 Dec 2021 18:55:42 +0000 Subject: [PATCH 03/56] Update rust version in `lcli` Dockerfile (#2876) The `lcli` docker build was no longer working on the old rust version Co-authored-by: realbigsean --- lcli/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lcli/Dockerfile b/lcli/Dockerfile index bddf39a43ae..5a4177ead90 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.53.0 AS builder +FROM rust:1.56.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE From 0b54ff17f209e2a627d9b5664bd25cf9566b948c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 4 Jan 2022 20:46:44 +0000 Subject: [PATCH 04/56] Fix assert in slashing protection import (#2881) ## Issue Addressed There was an overeager assert in the import of slashing protection data here: https://github.com/sigp/lighthouse/blob/fff01b24ddedcd54486e374460855ca20d3dd232/validator_client/slashing_protection/src/slashing_database.rs#L939 We were asserting that if the import contained any blocks for a validator, then the database should contain only a single block for that validator due to pruning/consolidation. However, we would only prune if the import contained _relevant blocks_ (that would actually change the maximum slot): https://github.com/sigp/lighthouse/blob/fff01b24ddedcd54486e374460855ca20d3dd232/validator_client/slashing_protection/src/slashing_database.rs#L629-L633 This lead to spurious failures (in the form of `ConsistencyError`s) when importing an interchange containing no new blocks for any of the validators. This wasn't hard to trigger, e.g. export and then immediately re-import the same file. ## Proposed Changes This PR fixes the issue by simplifying the import so that it's more like the import for attestations. I.e. we make the assert true by always pruning when the imported file contains blocks. In practice this doesn't have any downsides: if we import a new block then the behaviour is as before, except that we drop the `signing_root`. If we import an existing block or an old block then we prune the database to a single block. The only time this would be relevant is during extreme clock drift locally _plus_ import of a non-drifted interchange, which should occur infrequently. ## Additional Info I've also added `Arbitrary` implementations to the slashing protection types so that we can fuzz them. I have a fuzzer sitting in a separate directory which I may or may not commit in a subsequent PR. There's a new test in the standard interchange tests v5.2.1 that checks for this issue: https://github.com/eth-clients/slashing-protection-interchange-tests/pull/12 --- Cargo.lock | 1 + Makefile | 5 ++- .../slashing_protection/Cargo.toml | 4 ++ validator_client/slashing_protection/Makefile | 2 +- .../src/bin/test_generator.rs | 13 ++++++ .../slashing_protection/src/interchange.rs | 5 +++ .../src/interchange_test.rs | 6 ++- .../src/slashing_database.rs | 45 ++++++++++--------- 8 files changed, 57 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7a14e1735f..d2d9f799b7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5479,6 +5479,7 @@ dependencies = [ name = "slashing_protection" version = "0.1.0" dependencies = [ + "arbitrary", "eth2_serde_utils", "filesystem", "lazy_static", diff --git a/Makefile b/Makefile index 6856635ebdd..494f325d269 100644 --- a/Makefile +++ b/Makefile @@ -157,9 +157,10 @@ lint: make-ef-tests: make -C $(EF_TESTS) -# Verifies that state_processing feature arbitrary-fuzz will compile +# Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz + cargo check -p state_processing --features arbitrary-fuzz + cargo check -p slashing_protection --features arbitrary-fuzz # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 9cfe0ab4ea3..634e49feea1 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -15,7 +15,11 @@ serde_derive = "1.0.116" serde_json = "1.0.58" eth2_serde_utils = "0.1.1" filesystem = { path = "../../common/filesystem" } +arbitrary = { version = "1.0", features = ["derive"], optional = true } [dev-dependencies] lazy_static = "1.4.0" rayon = "1.4.1" + +[features] +arbitrary-fuzz = ["arbitrary", "types/arbitrary-fuzz"] diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index 57875902604..ea51193a541 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v5.2.0 +TESTS_TAG := v5.2.1 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index 2bca9727afc..b96dd8eb796 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -224,6 +224,19 @@ fn main() { .with_blocks(vec![(0, 20, false)]), ], ), + MultiTestCase::new( + "multiple_interchanges_single_validator_multiple_blocks_out_of_order", + vec![ + TestCase::new(interchange(vec![(0, vec![0], vec![])])).with_blocks(vec![ + (0, 10, true), + (0, 20, true), + (0, 30, true), + ]), + TestCase::new(interchange(vec![(0, vec![20], vec![])])) + .contains_slashable_data() + .with_blocks(vec![(0, 29, false)]), + ], + ), MultiTestCase::new( "multiple_interchanges_single_validator_fail_iff_imported", vec![ diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index a9185e5bb24..3793766b6aa 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -7,6 +7,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -15,6 +16,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -23,6 +25,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -32,6 +35,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -42,6 +46,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index 6bd6ce38b3f..dc828773b9c 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -9,6 +9,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -16,6 +17,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -25,6 +27,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -33,6 +36,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, @@ -230,7 +234,7 @@ impl TestCase { } } -fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { +pub fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { // Metadata should be unchanged. assert_eq!(interchange.metadata, minified.metadata); diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 725aa6057dd..2b187f46eff 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -648,29 +648,17 @@ impl SlashingDatabase { // Summary of minimum and maximum messages pre-import. let prev_summary = self.validator_summary(pubkey, txn)?; - // If the interchange contains a new maximum slot block, import it. + // If the interchange contains any blocks, update the database with the new max slot. let max_block = record.signed_blocks.iter().max_by_key(|b| b.slot); if let Some(max_block) = max_block { - // Block is relevant if there are no previous blocks, or new block has slot greater than - // previous maximum. - if prev_summary - .max_block_slot - .map_or(true, |max_block_slot| max_block.slot > max_block_slot) - { - self.insert_block_proposal( - txn, - pubkey, - max_block.slot, - max_block - .signing_root - .map(SigningRoot::from) - .unwrap_or_default(), - )?; - - // Prune the database so that it contains *only* the new block. - self.prune_signed_blocks(&record.pubkey, max_block.slot, txn)?; - } + // Store new synthetic block with maximum slot and null signing root. Remove all other + // blocks. + let new_max_slot = max_or(prev_summary.max_block_slot, max_block.slot); + let signing_root = SigningRoot::default(); + + self.clear_signed_blocks(pubkey, txn)?; + self.insert_block_proposal(txn, pubkey, new_max_slot, signing_root)?; } // Find the attestations with max source and max target. Unless the input contains slashable @@ -901,6 +889,23 @@ impl SlashingDatabase { Ok(()) } + /// Remove all blocks signed by a given `public_key`. + /// + /// Dangerous, should only be used immediately before inserting a new block in the same + /// transacation. + fn clear_signed_blocks( + &self, + public_key: &PublicKeyBytes, + txn: &Transaction, + ) -> Result<(), NotSafe> { + let validator_id = self.get_validator_id_in_txn(txn, public_key)?; + txn.execute( + "DELETE FROM signed_blocks WHERE validator_id = ?1", + params![validator_id], + )?; + Ok(()) + } + /// Prune the signed attestations table for the given validator keys. pub fn prune_all_signed_attestations<'a>( &self, From fac117667b644705e82f1ab3bba2689b80e1b07f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 6 Jan 2022 03:14:58 +0000 Subject: [PATCH 05/56] Update to superstruct v0.4.1 (#2886) ## Proposed Changes Update `superstruct` to bring in @realbigsean's fixes necessary for MEV-compatible private beacon block types (a la #2795). The refactoring is due to another change in superstruct that allows partial getters to be auto-generated. --- Cargo.lock | 4 ++-- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++++-- .../beacon_chain/src/execution_payload.rs | 3 ++- beacon_node/lighthouse_network/Cargo.toml | 2 +- consensus/fork_choice/src/fork_choice.rs | 2 +- .../src/per_block_processing.rs | 7 ++----- .../block_signature_verifier.rs | 2 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/beacon_block.rs | 9 ++------- consensus/types/src/beacon_block_body.rs | 18 ------------------ testing/simulator/src/checks.rs | 2 +- 12 files changed, 19 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2d9f799b7a..17d83a0a4a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5812,9 +5812,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecffe12af481bd0b8950f90676d61fb1e5fc33f1f1c41ce5df11e83fb509aaab" +checksum = "4e623e69a04a6352677c1f892027e14e034dfc6c4aabed0a4a0be9c1a0a46cee" dependencies = [ "darling", "itertools", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index d4e187bd8d3..9f3db09b742 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -58,7 +58,7 @@ strum = { version = "0.21.0", features = ["derive"] } logging = { path = "../../common/logging" } execution_layer = { path = "../execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } -superstruct = "0.3.0" +superstruct = "0.4.0" [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0dbff198181..eed4e4fb4b3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1099,6 +1099,7 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash), }) }) @@ -2602,7 +2603,7 @@ impl BeaconChain { } // Register sync aggregate with validator monitor - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; @@ -2643,7 +2644,7 @@ impl BeaconChain { block.body().attestations().len() as f64, ); - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { metrics::set_gauge( &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, sync_aggregate.num_set_bits() as i64, @@ -3241,6 +3242,7 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash); let is_merge_transition_complete = is_merge_transition_complete(&new_head.beacon_state); @@ -3528,6 +3530,7 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash) .unwrap_or_else(Hash256::zero); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index ed7095122a3..c19bba61268 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -146,7 +146,7 @@ pub fn validate_execution_payload_for_gossip( chain: &BeaconChain, ) -> Result<(), BlockError> { // Only apply this validation if this is a merge beacon block. - if let Some(execution_payload) = block.body().execution_payload() { + if let Ok(execution_payload) = block.body().execution_payload() { // This logic should match `is_execution_enabled`. We use only the execution block hash of // the parent here in order to avoid loading the parent state during gossip verification. @@ -289,6 +289,7 @@ pub async fn prepare_execution_payload( .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash) }; diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index e148ae2db37..31dfab271e6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -37,7 +37,7 @@ rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } -superstruct = "0.3.0" +superstruct = "0.4.0" open-metrics-client = "0.13.0" [dependencies.libp2p] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 86b32aab1a4..3ab07c6af12 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -589,7 +589,7 @@ where .on_verified_block(block, block_root, state) .map_err(Error::AfterBlockFailed)?; - let execution_status = if let Some(execution_payload) = block.body().execution_payload() { + let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { let block_hash = execution_payload.block_hash; if block_hash == Hash256::zero() { diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index ed7275be080..857c7763325 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -148,10 +148,7 @@ pub fn per_block_processing( // `process_randao` as the former depends on the `randao_mix` computed with the reveal of the // previous block. if is_execution_enabled(state, block.body()) { - let payload = block - .body() - .execution_payload() - .ok_or(BlockProcessingError::IncorrectStateType)?; + let payload = block.body().execution_payload()?; process_execution_payload(state, payload, spec)?; } @@ -159,7 +156,7 @@ pub fn per_block_processing( process_eth1_data(state, block.body().eth1_data())?; process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { process_sync_aggregate( state, sync_aggregate, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3e7a799341f..28044a462c5 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -302,7 +302,7 @@ where /// Include the signature of the block's sync aggregate (if it exists) for verification. pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { - if let Some(sync_aggregate) = block.message().body().sync_aggregate() { + if let Ok(sync_aggregate) = block.message().body().sync_aggregate() { if let Some(signature_set) = sync_aggregate_signature_set( &self.decompressor, sync_aggregate, diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index f62fcf5999f..ba187fb9a85 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -43,7 +43,7 @@ regex = "1.3.9" lazy_static = "1.4.0" parking_lot = "0.11.1" itertools = "0.10.0" -superstruct = "0.3.0" +superstruct = "0.4.0" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index bdd4142b497..a83be72a06e 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -237,13 +237,8 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&ExecutionPayload, InconsistentFork> { - self.body() - .execution_payload() - .ok_or_else(|| InconsistentFork { - fork_at_slot: ForkName::Merge, - object_fork: self.body().fork_name(), - }) + pub fn execution_payload(&self) -> Result<&ExecutionPayload, Error> { + self.body().execution_payload() } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 3b417f5d0ba..d3d005462fd 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -50,24 +50,6 @@ pub struct BeaconBlockBody { } impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { - /// Access the sync aggregate from the block's body, if one exists. - pub fn sync_aggregate(self) -> Option<&'a SyncAggregate> { - match self { - BeaconBlockBodyRef::Base(_) => None, - BeaconBlockBodyRef::Altair(inner) => Some(&inner.sync_aggregate), - BeaconBlockBodyRef::Merge(inner) => Some(&inner.sync_aggregate), - } - } - - /// Access the execution payload from the block's body, if one exists. - pub fn execution_payload(self) -> Option<&'a ExecutionPayload> { - match self { - BeaconBlockBodyRef::Base(_) => None, - BeaconBlockBodyRef::Altair(_) => None, - BeaconBlockBodyRef::Merge(inner) => Some(&inner.execution_payload), - } - } - /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 42bf61384db..7ff387b9c6a 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -193,7 +193,7 @@ pub async fn verify_full_sync_aggregates_up_to( .map(|agg| agg.num_set_bits()) }) .map_err(|e| format!("Error while getting beacon block: {:?}", e))? - .ok_or(format!("Altair block {} should have sync aggregate", slot))?; + .map_err(|_| format!("Altair block {} should have sync aggregate", slot))?; if sync_aggregate_count != E::sync_committee_size() { return Err(format!( From f6b5b1a8be46ce21503ac2e91934438ded687321 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 6 Jan 2022 05:16:50 +0000 Subject: [PATCH 06/56] Use `?` debug formatting for block roots in beacon_chain.rs (#2890) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed NA ## Proposed Changes Ensures full roots are printed, rather than shortened versions like `0x935b…d376`. For example, it would be nice if we could do API queries based upon the roots shown in the `Beacon chain re-org` event: ``` Jan 05 12:36:52.224 WARN Beacon chain re-org reorg_distance: 2, new_slot: 2073184, new_head: 0x8a97…2dec, new_head_parent: 0xa985…7688, previous_slot: 2073183, previous_head: 0x935b…d376, service: beacon Jan 05 13:35:05.832 WARN Beacon chain re-org reorg_distance: 1, new_slot: 2073475, new_head: 0x9207…c6b9, new_head_parent: 0xb2ce…839b, previous_slot: 2073474, previous_head: 0x8066…92f7, service: beacon ``` ## Additional Info We should eventually fix this project-wide, however this is a short-term patch. --- beacon_node/beacon_chain/src/beacon_chain.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index eed4e4fb4b3..6edcb7d6c99 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3072,7 +3072,7 @@ impl BeaconChain { trace!( self.log, "Produced beacon block"; - "parent" => %block.parent_root(), + "parent" => ?block.parent_root(), "attestations" => block.body().attestations().len(), "slot" => block.slot() ); @@ -3178,10 +3178,10 @@ impl BeaconChain { warn!( self.log, "Beacon chain re-org"; - "previous_head" => %current_head.block_root, + "previous_head" => ?current_head.block_root, "previous_slot" => current_head.slot, - "new_head_parent" => %new_head.beacon_block.parent_root(), - "new_head" => %beacon_block_root, + "new_head_parent" => ?new_head.beacon_block.parent_root(), + "new_head" => ?beacon_block_root, "new_slot" => new_head.beacon_block.slot(), "reorg_distance" => reorg_distance, ); @@ -3189,11 +3189,11 @@ impl BeaconChain { debug!( self.log, "Head beacon block"; - "justified_root" => %new_head.beacon_state.current_justified_checkpoint().root, + "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => %new_head.beacon_state.finalized_checkpoint().root, + "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => %beacon_block_root, + "root" => ?beacon_block_root, "slot" => new_head.beacon_block.slot(), ); }; From 668477872e5142d98a45b3a41d854e55ce799d06 Mon Sep 17 00:00:00 2001 From: Philipp K Date: Fri, 7 Jan 2022 01:21:42 +0000 Subject: [PATCH 07/56] Allow value for beacon_node fee-recipient argument (#2884) ## Issue Addressed The fee-recipient argument of the beacon node does not allow a value to be specified: > $ lighthouse beacon_node --merge --fee-recipient "0x332E43696A505EF45b9319973785F837ce5267b9" > error: Found argument '0x332E43696A505EF45b9319973785F837ce5267b9' which wasn't expected, or isn't valid in this context > > USAGE: > lighthouse beacon_node --fee-recipient --merge > > For more information try --help ## Proposed Changes Allow specifying a value for the fee-recipient argument in beacon_node/src/cli.rs ## Additional Info I've added .takes_value(true) and successfully proposed a block in the kintsugi testnet with my own fee-recipient address instead of the hardcoded default. I think that was just missed as the argument does not make sense without a value :) Co-authored-by: pk910 Co-authored-by: Michael Sproul Co-authored-by: Michael Sproul --- beacon_node/src/cli.rs | 2 ++ lighthouse/tests/beacon_node.rs | 20 +++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0b2cda91ef4..57de6c1b914 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -402,11 +402,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("fee-recipient") .long("fee-recipient") + .value_name("FEE-RECIPIENT") .help("Once the merge has happened, this address will receive transaction fees \ collected from any blocks produced by this node. Defaults to a junk \ address whilst the merge is in development stages. THE DEFAULT VALUE \ WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") .requires("merge") + .takes_value(true) ) /* diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 73d5a20657d..6d03cafe10b 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,7 +11,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Checkpoint, Epoch, Hash256}; +use types::{Address, Checkpoint, Epoch, Hash256}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -206,6 +206,24 @@ fn eth1_purge_cache_flag() { .with_config(|config| assert!(config.eth1.purge_cache)); } +// Tests for Merge flags. +#[test] +fn merge_fee_recipient_flag() { + CommandLineTest::new() + .flag("merge", None) + .flag( + "fee-recipient", + Some("0x00000000219ab540356cbb839cbe05303d7705fa"), + ) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.suggested_fee_recipient, + Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) + ) + }); +} + // Tests for Network flags. #[test] fn network_dir_flag() { From 20941bc0f7869533c714ca796679cfd33007e4d9 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 7 Jan 2022 05:32:29 +0000 Subject: [PATCH 08/56] Fix off-by-one in block packing lcli (#2878) ## Issue Addressed The current `lcli` block packing code has an off-by-one where it would include an extra slot (the oldest slot) of attestations as "available" (this means there would be 33 slots of "available" attestations instead of 32). There is typically only single-digit attestations remaining from that slot and as such does not cause a significant change to the results although every efficiency will have been very slightly under-reported. ## Proposed Changes Prune the `available_attestation_set` before writing out the data instead of after. ## Additional Info This `lcli` code will soon be deprecated by a Lighthouse API (#2879) which will run significantly faster and will be used to hook into our upcoming monitoring platform #2873. --- lcli/src/etl/block_efficiency.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs index 45452735dc0..87175ace892 100644 --- a/lcli/src/etl/block_efficiency.rs +++ b/lcli/src/etl/block_efficiency.rs @@ -274,6 +274,9 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { // Add them to the set. included_attestations_set.extend(attestations_in_block.clone()); + // Remove expired available attestations. + available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); + // Don't write data from the initialization epoch. if epoch != initialization_epoch { let included = attestations_in_block.len(); @@ -309,9 +312,6 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { } } } - - // Remove expired available attestations. - available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); } let mut offline = "None".to_string(); From daa3da3758c44284bf143dd1dff3ab1af5cbdc78 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 7 Jan 2022 05:32:33 +0000 Subject: [PATCH 09/56] Add tests for flags `enable-enr-auto-update` and `disable-packet-filter` (#2887) Resolves https://github.com/sigp/lighthouse/issues/2602 ## Issue Addressed https://github.com/sigp/lighthouse/pull/2749#issue-1037552417 > ## Open TODO > Add tests for boot_node flags `enable-enr-auto-update` and `disable-packet-filter`. They end up in [Discv5Config](https://github.com/mooori/lighthouse/blob/9ed2cba6bc3e41f08207cb0eeaf9e4aee40d05dd/boot_node/src/config.rs#L29), which doesn't support serde (de)serialization. ## Proposed Changes - Added tests for flags `enable-enr-auto-update` and `disable-packet-filter` - Instead of (de)serialize Discv5Config, added the two fields copied from Discv5Config to BootNodeConfigSerialization. --- boot_node/src/config.rs | 8 ++++++-- lighthouse/tests/boot_node.rs | 22 +++++++++++++++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 1e550e60c44..4df7a5f235e 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -132,13 +132,15 @@ impl BootNodeConfig { /// The set of configuration parameters that can safely be (de)serialized. /// -/// Its fields are a subset of the fields of `BootNodeConfig`. +/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { pub listen_socket: SocketAddr, // TODO: Generalise to multiaddr pub boot_nodes: Vec, pub local_enr: Enr, + pub disable_packet_filter: bool, + pub enable_enr_auto_update: bool, } impl BootNodeConfigSerialization { @@ -150,7 +152,7 @@ impl BootNodeConfigSerialization { boot_nodes, local_enr, local_key: _, - discv5_config: _, + discv5_config, phantom: _, } = config; @@ -158,6 +160,8 @@ impl BootNodeConfigSerialization { listen_socket: *listen_socket, boot_nodes: boot_nodes.clone(), local_enr: local_enr.clone(), + disable_packet_filter: !discv5_config.enable_packet_filter, + enable_enr_auto_update: discv5_config.enr_update, } } } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index ac23002c376..7b3c3acb3ca 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -139,9 +139,25 @@ fn enr_port_flag() { }) } -// TODO add tests for flags `enable-enr-auto-update` and `disable-packet-filter`. -// -// These options end up in `Discv5Config`, which doesn't support serde (de)serialization. +#[test] +fn disable_packet_filter_flag() { + CommandLineTest::new() + .flag("disable-packet-filter", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.disable_packet_filter, true); + }); +} + +#[test] +fn enable_enr_auto_update_flag() { + CommandLineTest::new() + .flag("enable-enr-auto-update", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.enable_enr_auto_update, true); + }); +} #[test] fn network_dir_flag() { From ccdc10c288bfe1a52191e1375e2fbbd18eca932b Mon Sep 17 00:00:00 2001 From: Fredrik Svantes Date: Fri, 7 Jan 2022 05:32:34 +0000 Subject: [PATCH 10/56] Adjusting ARCHIVE_URL (#2892) Was renamed from eth2-clients to eth-clients --- validator_client/slashing_protection/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index ea51193a541..e3d935b4c98 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -2,7 +2,7 @@ TESTS_TAG := v5.2.1 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz -ARCHIVE_URL := https://github.com/eth2-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) +ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) From 65b1374b587483b11475aa37f0bd2899dd7d4bff Mon Sep 17 00:00:00 2001 From: Richard Patel Date: Sat, 8 Jan 2022 01:15:07 +0000 Subject: [PATCH 11/56] Document Homebrew package (#2885) ## Issue Addressed Resolves #2329 ## Proposed Changes Documents the recently added `lighthouse` Homebrew formula. ## Additional Info NA Co-authored-by: Richard Patel Co-authored-by: Michael Sproul --- book/src/SUMMARY.md | 1 + book/src/homebrew.md | 36 ++++++++++++++++++++++++++++++++++++ book/src/installation.md | 4 ++++ 3 files changed, 41 insertions(+) create mode 100644 book/src/homebrew.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 93cec12401e..7552d42306c 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -10,6 +10,7 @@ * [Build from Source](./installation-source.md) * [Raspberry Pi 4](./pi.md) * [Cross-Compiling](./cross-compiling.md) + * [Homebrew](./homebrew.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) diff --git a/book/src/homebrew.md b/book/src/homebrew.md new file mode 100644 index 00000000000..317dc0e0fa6 --- /dev/null +++ b/book/src/homebrew.md @@ -0,0 +1,36 @@ +# Homebrew package + +Lighthouse is available on Linux and macOS via the [Homebrew package manager](https://brew.sh). + +Please note that this installation method is maintained by the Homebrew community. +It is not officially supported by the Lighthouse team. + +### Installation + +Install the latest version of the [`lighthouse`][formula] formula with: + +```bash +brew install lighthouse +``` + +### Usage + +If Homebrew is installed to your `PATH` (default), simply run: + +```bash +lighthouse --help +``` + +Alternatively, you can find the `lighthouse` binary at: + +```bash +"$(brew --prefix)/bin/lighthouse" --help +``` + +### Maintenance + +The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. + +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. + + [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/installation.md b/book/src/installation.md index 009bfc00c0f..38fbe6b7808 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,6 +8,10 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). +The community maintains additional installation methods (currently only one). + +- [Homebrew package](./homebrew.md). + Additionally, there are two extra guides for specific uses: - [Rapsberry Pi 4 guide](./pi.md). From 02e2fd2fb8cd27070e4acc39872bd4e7a38497de Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 11 Jan 2022 01:35:55 +0000 Subject: [PATCH 12/56] Add early attester cache (#2872) ## Issue Addressed NA ## Proposed Changes Introduces a cache to attestation to produce atop blocks which will become the head, but are not fully imported (e.g., not inserted into the database). Whilst attesting to a block before it's imported is rather easy, if we're going to produce that attestation then we also need to be able to: 1. Verify that attestation. 1. Respond to RPC requests for the `beacon_block_root`. Attestation verification (1) is *partially* covered. Since we prime the shuffling cache before we insert the block into the early attester cache, we should be fine for all typical use-cases. However, it is possible that the cache is washed out before we've managed to insert the state into the database and then attestation verification will fail with a "missing beacon state"-type error. Providing the block via RPC (2) is also partially covered, since we'll check the database *and* the early attester cache when responding a blocks-by-root request. However, we'll still omit the block from blocks-by-range requests (until the block lands in the DB). I *think* this is fine, since there's no guarantee that we return all blocks for those responses. Another important consideration is whether or not the *parent* of the early attester block is available in the databse. If it were not, we might fail to respond to blocks-by-root request that are iterating backwards to collect a chain of blocks. I argue that *we will always have the parent of the early attester block in the database.* This is because we are holding the fork-choice write-lock when inserting the block into the early attester cache and we do not drop that until the block is in the database. --- .../src/attestation_verification.rs | 14 +- .../beacon_chain/src/attester_cache.rs | 19 ++- beacon_node/beacon_chain/src/beacon_chain.rs | 90 ++++++++++ beacon_node/beacon_chain/src/builder.rs | 1 + .../beacon_chain/src/early_attester_cache.rs | 161 ++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 8 + .../tests/attestation_production.rs | 18 ++ .../beacon_processor/worker/rpc_methods.rs | 2 +- 9 files changed, 304 insertions(+), 10 deletions(-) create mode 100644 beacon_node/beacon_chain/src/early_attester_cache.rs diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index c672ff6be6a..85d7b2b7d59 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -986,11 +986,17 @@ fn verify_head_block_is_known( attestation: &Attestation, max_skip_slots: Option, ) -> Result { - if let Some(block) = chain + let block_opt = chain .fork_choice .read() .get_block(&attestation.data.beacon_block_root) - { + .or_else(|| { + chain + .early_attester_cache + .get_proto_block(attestation.data.beacon_block_root) + }); + + if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = max_skip_slots { if attestation.data.slot > block.slot + max_skip_slots { @@ -1242,7 +1248,9 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) { + if !chain.fork_choice.read().contains_block(&target.root) + && !chain.early_attester_cache.contains_block(target.root) + { return Err(Error::UnknownTargetRoot(target.root)); } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 01662efc135..24963a125d2 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -75,7 +75,7 @@ impl From for Error { /// Stores the minimal amount of data required to compute the committee length for any committee at any /// slot in a given `epoch`. -struct CommitteeLengths { +pub struct CommitteeLengths { /// The `epoch` to which the lengths pertain. epoch: Epoch, /// The length of the shuffling in `self.epoch`. @@ -84,7 +84,7 @@ struct CommitteeLengths { impl CommitteeLengths { /// Instantiate `Self` using `state.current_epoch()`. - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let active_validator_indices_len = if let Ok(committee_cache) = state.committee_cache(RelativeEpoch::Current) { @@ -101,8 +101,16 @@ impl CommitteeLengths { }) } + /// Get the count of committees per each slot of `self.epoch`. + pub fn get_committee_count_per_slot( + &self, + spec: &ChainSpec, + ) -> Result { + T::get_committee_count_per_slot(self.active_validator_indices_len, spec).map_err(Into::into) + } + /// Get the length of the committee at the given `slot` and `committee_index`. - fn get( + pub fn get_committee_length( &self, slot: Slot, committee_index: CommitteeIndex, @@ -120,8 +128,7 @@ impl CommitteeLengths { } let slots_per_epoch = slots_per_epoch as usize; - let committees_per_slot = - T::get_committee_count_per_slot(self.active_validator_indices_len, spec)?; + let committees_per_slot = self.get_committee_count_per_slot::(spec)?; let index_in_epoch = compute_committee_index_in_epoch( slot, slots_per_epoch, @@ -172,7 +179,7 @@ impl AttesterCacheValue { spec: &ChainSpec, ) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> { self.committee_lengths - .get::(slot, committee_index, spec) + .get_committee_length::(slot, committee_index, spec) .map(|committee_length| (self.current_justified_checkpoint, committee_length)) } } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6edcb7d6c99..f2a2271542b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,6 +12,7 @@ use crate::block_verification::{ IntoFullyVerifiedBlock, }; use crate::chain_config::ChainConfig; +use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; @@ -107,6 +108,9 @@ pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); +/// Defines how old a block can be before it's no longer a candidate for the early attester cache. +const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -328,6 +332,8 @@ pub struct BeaconChain { pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, + /// A cache used when producing attestations whilst the head block is still being imported. + pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, /// A list of any hard-coded forks that have been disabled. @@ -926,6 +932,28 @@ impl BeaconChain { )? } + /// Returns the block at the given root, if any. + /// + /// Will also check the early attester cache for the block. Because of this, there's no + /// guarantee that a block returned from this function has a `BeaconState` available in + /// `self.store`. The expected use for this function is *only* for returning blocks requested + /// from P2P peers. + /// + /// ## Errors + /// + /// May return a database error. + pub fn get_block_checking_early_attester_cache( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let block_opt = self + .store + .get_block(block_root)? + .or_else(|| self.early_attester_cache.get_block(*block_root)); + + Ok(block_opt) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1422,6 +1450,29 @@ impl BeaconChain { ) -> Result, Error> { let _total_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_SECONDS); + // The early attester cache will return `Some(attestation)` in the scenario where there is a + // block being imported that will become the head block, but that block has not yet been + // inserted into the database and set as `self.canonical_head`. + // + // In effect, the early attester cache prevents slow database IO from causing missed + // head/target votes. + match self + .early_attester_cache + .try_attest(request_slot, request_index, &self.spec) + { + // The cache matched this request, return the value. + Ok(Some(attestation)) => return Ok(attestation), + // The cache did not match this request, proceed with the rest of this function. + Ok(None) => (), + // The cache returned an error. Log the error and proceed with the rest of this + // function. + Err(e) => warn!( + self.log, + "Early attester cache failed"; + "error" => ?e + ), + } + let slots_per_epoch = T::EthSpec::slots_per_epoch(); let request_epoch = request_slot.epoch(slots_per_epoch); @@ -2602,6 +2653,42 @@ impl BeaconChain { } } + // If the block is recent enough, check to see if it becomes the head block. If so, apply it + // to the early attester cache. This will allow attestations to the block without waiting + // for the block and state to be inserted to the database. + // + // Only performing this check on recent blocks avoids slowing down sync with lots of calls + // to fork choice `get_head`. + if block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + let new_head_root = fork_choice + .get_head(current_slot, &self.spec) + .map_err(BeaconChainError::from)?; + + if new_head_root == block_root { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } + } else { + warn!( + self.log, + "Early attester block missing"; + "block_root" => ?block_root + ); + } + } + } + // Register sync aggregate with validator monitor if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot @@ -3248,6 +3335,9 @@ impl BeaconChain { drop(lag_timer); + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + // Update the snapshot that stores the head of the chain at the time it received the // block. *self diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 54397a7d556..4662d05d3db 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -763,6 +763,7 @@ where block_times_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), + early_attester_cache: <_>::default(), disabled_forks: self.disabled_forks, shutdown_sender: self .shutdown_sender diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs new file mode 100644 index 00000000000..56dced94e62 --- /dev/null +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -0,0 +1,161 @@ +use crate::{ + attester_cache::{CommitteeLengths, Error}, + metrics, +}; +use parking_lot::RwLock; +use proto_array::Block as ProtoBlock; +use types::*; + +pub struct CacheItem { + /* + * Values used to create attestations. + */ + epoch: Epoch, + committee_lengths: CommitteeLengths, + beacon_block_root: Hash256, + source: Checkpoint, + target: Checkpoint, + /* + * Values used to make the block available. + */ + block: SignedBeaconBlock, + proto_block: ProtoBlock, +} + +/// Provides a single-item cache which allows for attesting to blocks before those blocks have +/// reached the database. +/// +/// This cache stores enough information to allow Lighthouse to: +/// +/// - Produce an attestation without using `chain.canonical_head`. +/// - Verify that a block root exists (i.e., will be imported in the future) during attestation +/// verification. +/// - Provide a block which can be sent to peers via RPC. +#[derive(Default)] +pub struct EarlyAttesterCache { + item: RwLock>>, +} + +impl EarlyAttesterCache { + /// Removes the cached item, meaning that all future calls to `Self::try_attest` will return + /// `None` until a new cache item is added. + pub fn clear(&self) { + *self.item.write() = None + } + + /// Updates the cache item, so that `Self::try_attest` with return `Some` when given suitable + /// parameters. + pub fn add_head_block( + &self, + beacon_block_root: Hash256, + block: SignedBeaconBlock, + proto_block: ProtoBlock, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), Error> { + let epoch = state.current_epoch(); + let committee_lengths = CommitteeLengths::new(state, spec)?; + let source = state.current_justified_checkpoint(); + let target_slot = epoch.start_slot(E::slots_per_epoch()); + let target = Checkpoint { + epoch, + root: if state.slot() <= target_slot { + beacon_block_root + } else { + *state.get_block_root(target_slot)? + }, + }; + + let item = CacheItem { + epoch, + committee_lengths, + beacon_block_root, + source, + target, + block, + proto_block, + }; + + *self.item.write() = Some(item); + + Ok(()) + } + + /// Will return `Some(attestation)` if all the following conditions are met: + /// + /// - There is a cache `item` present. + /// - If `request_slot` is in the same epoch as `item.epoch`. + /// - If `request_index` does not exceed `item.comittee_count`. + pub fn try_attest( + &self, + request_slot: Slot, + request_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result>, Error> { + let lock = self.item.read(); + let item = if let Some(item) = lock.as_ref() { + item + } else { + return Ok(None); + }; + + let request_epoch = request_slot.epoch(E::slots_per_epoch()); + if request_epoch != item.epoch { + return Ok(None); + } + + let committee_count = item + .committee_lengths + .get_committee_count_per_slot::(spec)?; + if request_index >= committee_count as u64 { + return Ok(None); + } + + let committee_len = + item.committee_lengths + .get_committee_length::(request_slot, request_index, spec)?; + + let attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_len) + .map_err(BeaconStateError::from)?, + data: AttestationData { + slot: request_slot, + index: request_index, + beacon_block_root: item.beacon_block_root, + source: item.source, + target: item.target, + }, + signature: AggregateSignature::empty(), + }; + + metrics::inc_counter(&metrics::BEACON_EARLY_ATTESTER_CACHE_HITS); + + Ok(Some(attestation)) + } + + /// Returns `true` if `block_root` matches the cached item. + pub fn contains_block(&self, block_root: Hash256) -> bool { + self.item + .read() + .as_ref() + .map_or(false, |item| item.beacon_block_root == block_root) + } + + /// Returns the block, if `block_root` matches the cached item. + pub fn get_block(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.block.clone()) + } + + /// Returns the proto-array block, if `block_root` matches the cached item. + pub fn get_proto_block(&self, block_root: Hash256) -> Option { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.proto_block.clone()) + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 513467cef83..768a8695515 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ mod block_times_cache; mod block_verification; pub mod builder; pub mod chain_config; +mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 32ebe70921b..32dfc266f34 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -240,6 +240,14 @@ lazy_static! { pub static ref SHUFFLING_CACHE_MISSES: Result = try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request"); + /* + * Early attester cache + */ + pub static ref BEACON_EARLY_ATTESTER_CACHE_HITS: Result = try_create_int_counter( + "beacon_early_attester_cache_hits", + "Count of times the early attester cache returns an attestation" + ); + /* * Attestation Production */ diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 1ce2411c41d..4d862cbac72 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -122,6 +122,24 @@ fn produces_attestations() { ); assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); + + let early_attestation = { + let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + chain + .early_attester_cache + .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .unwrap(); + chain + .early_attester_cache + .try_attest(slot, index, &chain.spec) + .unwrap() + .unwrap() + }; + + assert_eq!( + attestation, early_attestation, + "early attester cache inconsistent" + ); } } } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index f3d49c2b425..f79a655745f 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -129,7 +129,7 @@ impl Worker { ) { let mut send_block_count = 0; for root in request.block_roots.iter() { - if let Ok(Some(block)) = self.chain.store.get_block(root) { + if let Ok(Some(block)) = self.chain.get_block_checking_early_attester_cache(root) { self.send_response( peer_id, Response::BlocksByRoot(Some(Box::new(block))), From 6976796162432fa346a453d357d1e915d27cd348 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 11 Jan 2022 01:35:56 +0000 Subject: [PATCH 13/56] Update dependencies including `sha2` (#2896) ## Proposed Changes Although the [security advisory](https://rustsec.org/advisories/RUSTSEC-2021-0100.html) only lists `sha2` 0.9.7 as vulnerable, the [changelog](https://github.com/RustCrypto/hashes/blob/master/sha2/CHANGELOG.md#099-2022-01-06) states that 0.9.8 is also vulnerable, and has been yanked. --- Cargo.lock | 249 +++++++++++++++++++++++++++-------------------------- 1 file changed, 129 insertions(+), 120 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17d83a0a4a2..ec56aab4992 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" +checksum = "84450d0b4a8bd1ba4144ce8ce718fbc5d071358b1e5384bace6536b3d1f2d5b3" [[package]] name = "arbitrary" @@ -205,7 +205,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -550,9 +550,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byte-slice-cast" @@ -769,9 +769,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.46" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" dependencies = [ "cc", ] @@ -900,9 +900,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if", "crossbeam-utils", @@ -921,9 +921,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if", "crossbeam-utils", @@ -934,9 +934,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if", "lazy_static", @@ -1114,7 +1114,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2 0.9.8", + "sha2 0.9.9", "tree_hash", "types", ] @@ -1265,7 +1265,7 @@ dependencies = [ "parking_lot", "rand 0.8.4", "rlp 0.5.1", - "sha2 0.9.8", + "sha2 0.9.9", "smallvec", "tokio", "tokio-stream", @@ -1296,9 +1296,9 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ae02c7618ee05108cd86a0be2f5586d1f0d965bede7ecfd46815f1b860227" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" dependencies = [ "der 0.5.1", "elliptic-curve 0.11.6", @@ -1325,7 +1325,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -1576,7 +1576,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2 0.9.8", + "sha2 0.9.9", "wasm-bindgen-test", ] @@ -1589,7 +1589,7 @@ dependencies = [ "cpufeatures 0.1.5", "lazy_static", "ring", - "sha2 0.9.8", + "sha2 0.9.9", ] [[package]] @@ -1615,7 +1615,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -1634,7 +1634,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2 0.9.8", + "sha2 0.9.9", "tempfile", "unicode-normalization", "uuid", @@ -1891,6 +1891,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "ff" version = "0.9.0" @@ -1962,9 +1971,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" @@ -2140,16 +2149,16 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "pin-utils", "slab", ] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -2268,9 +2277,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ "bytes", "fnv", @@ -2427,13 +2436,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 0.4.8", + "itoa 1.0.1", ] [[package]] @@ -2444,7 +2453,7 @@ checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -2533,7 +2542,7 @@ dependencies = [ "httparse", "httpdate", "itoa 0.4.8", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "socket2 0.4.2", "tokio", "tower-service", @@ -2673,9 +2682,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.0.1", "hashbrown", @@ -2789,7 +2798,7 @@ dependencies = [ "cfg-if", "ecdsa 0.11.1", "elliptic-curve 0.9.12", - "sha2 0.9.8", + "sha2 0.9.9", ] [[package]] @@ -2913,9 +2922,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libmdbx" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75aa79307892c0000dd0a8169c4db5529d32ca2302587d552870903109b46925" +checksum = "c9a8a3723c12c5caa3f2a456b645063d1d8ffb1562895fa43746a999d205b0c6" dependencies = [ "bitflags", "byteorder", @@ -2954,7 +2963,7 @@ dependencies = [ "libp2p-yamux", "multiaddr", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "rand 0.7.3", "smallvec", ] @@ -2979,13 +2988,13 @@ dependencies = [ "multihash", "multistream-select 0.10.4", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "prost", "prost-build", "rand 0.8.4", "ring", "rw-stream-sink", - "sha2 0.9.8", + "sha2 0.9.9", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -3014,13 +3023,13 @@ dependencies = [ "multistream-select 0.11.0", "p256", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "prost", "prost-build", "rand 0.8.4", "ring", "rw-stream-sink", - "sha2 0.10.0", + "sha2 0.10.1", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -3058,12 +3067,12 @@ dependencies = [ "libp2p-swarm", "log", "open-metrics-client", - "pin-project 1.0.8", + "pin-project 1.0.10", "prost", "prost-build", "rand 0.7.3", "regex", - "sha2 0.10.0", + "sha2 0.10.1", "smallvec", "unsigned-varint 0.7.1", ] @@ -3127,7 +3136,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.4", - "sha2 0.10.0", + "sha2 0.10.1", "snow", "static_assertions", "x25519-dalek", @@ -3235,7 +3244,7 @@ dependencies = [ "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "typenum", ] @@ -3254,7 +3263,7 @@ dependencies = [ "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "typenum", ] @@ -3407,7 +3416,7 @@ dependencies = [ "regex", "serde", "serde_derive", - "sha2 0.9.8", + "sha2 0.9.9", "slog", "slog-async", "slog-term", @@ -3480,9 +3489,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469898e909a1774d844793b347135a0cd344ca2f69d082013ecb8061a2229a3a" +checksum = "274353858935c992b13c0ca408752e2121da852d07dec7ce5f108c77dfa14d1f" dependencies = [ "hashbrown", ] @@ -3551,9 +3560,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "mdbx-sys" -version = "0.11.1" +version = "0.11.4-git.20210105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6fb0496b0bc2274db9ae3ee92cf97bb29bf40e51b96ec1087a6374c4a42a05d" +checksum = "b21b3e0def3a5c880f6388ed2e33b695097c6b0eca039dae6010527b059f8be1" dependencies = [ "bindgen", "cc", @@ -3701,7 +3710,7 @@ dependencies = [ "digest 0.9.0", "generic-array", "multihash-derive", - "sha2 0.9.8", + "sha2 0.9.9", "unsigned-varint 0.7.1", ] @@ -3770,7 +3779,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "smallvec", "unsigned-varint 0.7.1", ] @@ -3783,7 +3792,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "smallvec", "unsigned-varint 0.7.1", ] @@ -4118,10 +4127,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0e0c5310031b5d4528ac6534bccc1446c289ac45c47b277d5aa91089c5f74fa" dependencies = [ - "ecdsa 0.13.3", + "ecdsa 0.13.4", "elliptic-curve 0.11.6", "sec1", - "sha2 0.9.8", + "sha2 0.9.9", ] [[package]] @@ -4244,27 +4253,27 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" dependencies = [ - "pin-project-internal 0.4.28", + "pin-project-internal 0.4.29", ] [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 1.0.8", + "pin-project-internal 1.0.10", ] [[package]] name = "pin-project-internal" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" dependencies = [ "proc-macro2", "quote", @@ -4273,9 +4282,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -4290,9 +4299,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -4375,9 +4384,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "primitive-types" @@ -4460,9 +4469,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.34" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -4625,9 +4634,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] @@ -4862,7 +4871,7 @@ dependencies = [ "mime", "native-tls", "percent-encoding", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "serde", "serde_json", "serde_urlencoded", @@ -5045,7 +5054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ "futures", - "pin-project 0.4.28", + "pin-project 0.4.29", "static_assertions", ] @@ -5123,7 +5132,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "salsa20", - "sha2 0.9.8", + "sha2 0.9.9", ] [[package]] @@ -5169,9 +5178,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" dependencies = [ "cc", ] @@ -5248,9 +5257,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] @@ -5267,9 +5276,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -5278,9 +5287,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ "itoa 1.0.1", "ryu", @@ -5337,9 +5346,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if", @@ -5350,9 +5359,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900d964dd36bb15bcf2f2b35694c072feab74969a54f2bbeec7a2d725d2bdcb6" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" dependencies = [ "cfg-if", "cpufeatures 0.2.1", @@ -5628,7 +5637,7 @@ dependencies = [ "rand_core 0.6.3", "ring", "rustc_version 0.3.3", - "sha2 0.9.8", + "sha2 0.9.9", "subtle", "x25519-dalek", ] @@ -5834,9 +5843,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.82" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ "proc-macro2", "quote", @@ -5894,13 +5903,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -6031,7 +6040,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.8", + "sha2 0.9.9", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -6094,7 +6103,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "signal-hook-registry", "tokio-macros", "winapi", @@ -6102,11 +6111,11 @@ dependencies = [ [[package]] name = "tokio-io-timeout" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", ] @@ -6149,7 +6158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", "tokio-util", ] @@ -6162,7 +6171,7 @@ checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" dependencies = [ "futures-util", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "tokio", "tungstenite 0.12.0", ] @@ -6175,7 +6184,7 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "tokio", "tungstenite 0.14.0", ] @@ -6191,7 +6200,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "slab", "tokio", ] @@ -6219,7 +6228,7 @@ checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tracing-attributes", "tracing-core", ] @@ -6250,7 +6259,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.8", + "pin-project 1.0.10", "tracing", ] @@ -6267,9 +6276,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245da694cc7fc4729f3f418b304cb57789f1bed2a78c575407ab8a23f53cb4d3" +checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" dependencies = [ "ansi_term", "lazy_static", @@ -6437,9 +6446,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" @@ -6710,9 +6719,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -6756,7 +6765,7 @@ dependencies = [ "mime_guess", "multipart 0.17.1", "percent-encoding", - "pin-project 1.0.8", + "pin-project 1.0.10", "scoped-tls", "serde", "serde_json", @@ -6787,7 +6796,7 @@ dependencies = [ "mime_guess", "multipart 0.18.0", "percent-encoding", - "pin-project 1.0.8", + "pin-project 1.0.10", "scoped-tls", "serde", "serde_json", @@ -6949,7 +6958,7 @@ dependencies = [ "jsonrpc-core", "log", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "reqwest", "rlp 0.5.1", "secp256k1", @@ -7022,9 +7031,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c475786c6f47219345717a043a37ec04cb4bc185e28853adcc4fa0a947eba630" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ "webpki 0.22.0", ] From 4848e531559d6b717e8431f9571a7b1b3557005b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 11 Jan 2022 05:33:28 +0000 Subject: [PATCH 14/56] Avoid peer penalties on internal errors for batch block import (#2898) ## Issue Addressed NA ## Proposed Changes I've observed some Prater nodes (and potentially some mainnet nodes) banning peers due to validator pubkey cache lock timeouts. For the `BeaconChainError`-type of errors, they're caused by internal faults and we can't necessarily tell if the peer is bad or not. I think this is causing us to ban peers unnecessarily when running on under-resourced machines. ## Additional Info NA --- .../beacon_processor/worker/sync_methods.rs | 128 ++++++++++++++---- .../network/src/sync/backfill_sync/mod.rs | 35 +++-- beacon_node/network/src/sync/manager.rs | 5 +- .../network/src/sync/range_sync/chain.rs | 34 +++-- 4 files changed, 156 insertions(+), 46 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 6a75c2990a3..27e0a6711d0 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,7 +7,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; -use lighthouse_network::PeerId; +use lighthouse_network::{PeerAction, PeerId}; use slog::{crit, debug, error, info, trace, warn}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -23,6 +23,14 @@ pub enum ProcessId { ParentLookup(PeerId, Hash256), } +/// Returned when a chain segment import fails. +struct ChainSegmentFailed { + /// To be displayed in logs. + message: String, + /// Used to penalize peers. + peer_action: Option, +} + impl Worker { /// Attempt to process a block received from a direct RPC request, returning the processing /// result on the `result_tx` channel. @@ -123,9 +131,13 @@ impl Worker { "chain" => chain_id, "last_block_slot" => end_slot, "imported_blocks" => imported_blocks, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(imported_blocks > 0) + + BatchProcessResult::Failed { + imported_blocks: imported_blocks > 0, + peer_action: e.peer_action, + } } }; @@ -154,9 +166,12 @@ impl Worker { "batch_epoch" => epoch, "first_block_slot" => start_slot, "last_block_slot" => end_slot, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(false) + BatchProcessResult::Failed { + imported_blocks: false, + peer_action: e.peer_action, + } } }; @@ -175,7 +190,7 @@ impl Worker { // reverse match self.process_blocks(downloaded_blocks.iter().rev()) { (_, Err(e)) => { - debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => e); + debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => %e.message); self.send_sync_message(SyncMessage::ParentLookupFailed { peer_id, chain_head, @@ -193,7 +208,7 @@ impl Worker { fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator>, - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks = downloaded_blocks.cloned().collect::>(); match self.chain.process_chain_segment(blocks) { ChainSegmentResult::Successful { imported_blocks } => { @@ -223,7 +238,7 @@ impl Worker { fn process_backfill_blocks( &self, blocks: &[SignedBeaconBlock], - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { match self.chain.import_historical_block_batch(blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -250,7 +265,12 @@ impl Worker { "block_root" => ?block_root, "expected_root" => ?expected_block_root ); - String::from("mismatched_block_root") + + ChainSegmentFailed { + message: String::from("mismatched_block_root"), + // The peer is faulty if they send blocks with bad roots. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::InvalidSignature | HistoricalBlockError::SignatureSet(_) => { @@ -259,7 +279,12 @@ impl Worker { "Backfill batch processing error"; "error" => ?e ); - "invalid_signature".into() + + ChainSegmentFailed { + message: "invalid_signature".into(), + // The peer is faulty if they bad signatures. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( @@ -267,25 +292,55 @@ impl Worker { "Backfill batch processing error"; "error" => "pubkey_cache_timeout" ); - "pubkey_cache_timeout".into() + + ChainSegmentFailed { + message: "pubkey_cache_timeout".into(), + // This is an internal error, do not penalize the peer. + peer_action: None, + } } HistoricalBlockError::NoAnchorInfo => { warn!(self.log, "Backfill not required"); - String::from("no_anchor_info") + + ChainSegmentFailed { + message: String::from("no_anchor_info"), + // There is no need to do a historical sync, this is not a fault of + // the peer. + peer_action: None, + } } - HistoricalBlockError::IndexOutOfBounds - | HistoricalBlockError::BlockOutOfRange { .. } => { + HistoricalBlockError::IndexOutOfBounds => { error!( self.log, - "Backfill batch processing error"; + "Backfill batch OOB error"; "error" => ?e, ); - String::from("logic_error") + ChainSegmentFailed { + message: String::from("logic_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } + } + HistoricalBlockError::BlockOutOfRange { .. } => { + error!( + self.log, + "Backfill batch error"; + "error" => ?e, + ); + ChainSegmentFailed { + message: String::from("unexpected_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } } }, other => { warn!(self.log, "Backfill batch processing error"; "error" => ?other); - format!("{:?}", other) + ChainSegmentFailed { + message: format!("{:?}", other), + // This is an internal error, don't penalize the peer. + peer_action: None, + } } }; (0, Err(err)) @@ -312,15 +367,18 @@ impl Worker { } /// Helper function to handle a `BlockError` from `process_chain_segment` - fn handle_failed_chain_segment(&self, error: BlockError) -> Result<(), String> { + fn handle_failed_chain_segment( + &self, + error: BlockError, + ) -> Result<(), ChainSegmentFailed> { match error { BlockError::ParentUnknown(block) => { // blocks should be sequential and all parents should exist - - Err(format!( - "Block has an unknown parent: {}", - block.parent_root() - )) + Err(ChainSegmentFailed { + message: format!("Block has an unknown parent: {}", block.parent_root()), + // Peers are faulty if they send non-sequential blocks. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::BlockIsAlreadyKnown => { // This can happen for many reasons. Head sync's can download multiples and parent @@ -350,10 +408,14 @@ impl Worker { ); } - Err(format!( - "Block with slot {} is higher than the current slot {}", - block_slot, present_slot - )) + Err(ChainSegmentFailed { + message: format!( + "Block with slot {} is higher than the current slot {}", + block_slot, present_slot + ), + // Peers are faulty if they send blocks from the future. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::WouldRevertFinalizedSlot { .. } => { debug!(self.log, "Finalized or earlier block processed";); @@ -370,7 +432,11 @@ impl Worker { "outcome" => ?e, ); - Err(format!("Internal error whilst processing block: {:?}", e)) + Err(ChainSegmentFailed { + message: format!("Internal error whilst processing block: {:?}", e), + // Do not penalize peers for internal errors. + peer_action: None, + }) } other => { debug!( @@ -379,7 +445,11 @@ impl Worker { "outcome" => %other, ); - Err(format!("Peer sent invalid block. Reason: {:?}", other)) + Err(ChainSegmentFailed { + message: format!("Peer sent invalid block. Reason: {:?}", other), + // Do not penalize peers for internal errors. + peer_action: None, + }) } } } diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index b9016b9fdcd..fc94eaca0d9 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -541,7 +541,15 @@ impl BackFillSync { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + // The beacon processor queue is full, no need to penalize the peer. + peer_action: None, + }, + ) } else { Ok(ProcessResult::Successful) } @@ -621,7 +629,10 @@ impl BackFillSync { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = match self.batches.get_mut(&batch_id) { Some(v) => v, None => { @@ -659,12 +670,20 @@ impl BackFillSync { // that it is likely all peers are sending invalid batches // repeatedly and are either malicious or faulty. We stop the backfill sync and // report all synced peers that have participated. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for peer in self.participating_peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Backfill batch failed to download. Penalizing peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for peer in self.participating_peers.drain() { + network.report_peer(peer, *peer_action); + } } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) .map(|_| ProcessResult::Successful) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f0726ca947b..f9055665ca6 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -137,7 +137,10 @@ pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. Success(bool), /// The batch processing failed. It carries whether the processing imported any block. - Failed(bool), + Failed { + imported_blocks: bool, + peer_action: Option, + }, } /// Maintains a sequential list of parents to lookup and the lookup's current state. diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index a1acac614ea..4b89808994b 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -313,7 +313,14 @@ impl SyncingChain { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + peer_action: None, + }, + ) } else { Ok(KeepChain) } @@ -488,7 +495,10 @@ impl SyncingChain { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( "Batch not found for current processing target {}", @@ -511,12 +521,20 @@ impl SyncingChain { // report all peers. // There are some edge cases with forks that could land us in this situation. // This should be unlikely, so we tolerate these errors, but not often. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for (peer, _) in self.peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Batch failed to download. Dropping chain scoring peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for (peer, _) in self.peers.drain() { + network.report_peer(peer, *peer_action); + } } Err(RemoveChain::ChainFailed(batch_id)) } else { From b6560079636c9bedd3408bdcf0f359c742b7d9df Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 11 Jan 2022 05:33:29 +0000 Subject: [PATCH 15/56] Skip serializing proposer boost if null (#2899) ## Issue Addressed Restore compatibility between Lighthouse v2.0.1 VC and `unstable` BN in preparation for the next release. ## Proposed Changes * Don't serialize the `PROPOSER_SCORE_BOOST` as `null` because it breaks the `extra_fields: HashMap` used by the v2.0.1 VC. --- consensus/types/src/chain_spec.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 68a5175a91b..70845877d9f 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -659,6 +659,7 @@ pub struct Config { #[serde(with = "eth2_serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, #[serde(with = "eth2_serde_utils::quoted_u64")] From 61f60bdf03cffe76fa9d66eac7f7c30ab000fab7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 12 Jan 2022 02:36:24 +0000 Subject: [PATCH 16/56] Avoid penalizing peers for delays during processing (#2894) ## Issue Addressed NA ## Proposed Changes We have observed occasions were under-resourced nodes will receive messages that were valid *at the time*, but later become invalidated due to long waits for a `BeaconProcessor` worker. In this PR, we will check to see if the message was valid *at the time of receipt*. If it was initially valid but invalid now, we just ignore the message without penalizing the peer. ## Additional Info NA --- .../src/attestation_verification.rs | 18 ++-- .../src/sync_committee_verification.rs | 14 ++- .../beacon_processor/worker/gossip_methods.rs | 85 +++++++++++++++---- common/slot_clock/src/lib.rs | 14 +++ 4 files changed, 95 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 85d7b2b7d59..fb05ef75526 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -452,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check the attestation's epoch matches its target. if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) @@ -716,7 +716,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -1019,14 +1019,13 @@ fn verify_head_block_is_known( /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, - attestation: &Attestation, +pub fn verify_propagation_slot_range( + slot_clock: &S, + attestation: &Attestation, ) -> Result<(), Error> { let attestation_slot = attestation.data.slot; - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { @@ -1037,11 +1036,10 @@ pub fn verify_propagation_slot_range( } // Taking advantage of saturating subtraction on `Slot`. - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)? - - T::EthSpec::slots_per_epoch(); + - E::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { attestation_slot, diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 4bc5b439e12..fa7d4dcfed5 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -273,7 +273,7 @@ impl VerifiedSyncContribution { let subcommittee_index = contribution.subcommittee_index as usize; // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. - verify_propagation_slot_range(chain, contribution)?; + verify_propagation_slot_range(&chain.slot_clock, contribution)?; // Validate subcommittee index. if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { @@ -428,7 +428,7 @@ impl VerifiedSyncCommitteeMessage { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future sync committee messages for later processing. - verify_propagation_slot_range(chain, &sync_message)?; + verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; // Ensure the `subnet_id` is valid for the given validator. let pubkey = chain @@ -516,14 +516,13 @@ impl VerifiedSyncCommitteeMessage { /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, +pub fn verify_propagation_slot_range( + slot_clock: &S, sync_contribution: &U, ) -> Result<(), Error> { let message_slot = sync_contribution.get_slot(); - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot > latest_permissible_slot { @@ -533,8 +532,7 @@ pub fn verify_propagation_slot_range( }); } - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index d18c96c0a73..1b7ef7aa9bc 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -2,9 +2,9 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ - attestation_verification::{Error as AttnError, VerifiedAttestation}, + attestation_verification::{self, Error as AttnError, VerifiedAttestation}, observed_operations::ObservationOutcome, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, GossipVerifiedBlock, @@ -19,7 +19,7 @@ use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncCommitteeMessage, SyncSubnetId, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -100,12 +100,7 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - match self { - FailedAtt::Unaggregate { attestation, .. } => &attestation.data.beacon_block_root, - FailedAtt::Aggregate { attestation, .. } => { - &attestation.message.aggregate.data.beacon_block_root - } - } + &self.attestation().data.beacon_block_root } pub fn kind(&self) -> &'static str { @@ -114,6 +109,13 @@ impl FailedAtt { FailedAtt::Aggregate { .. } => "aggregated", } } + + pub fn attestation(&self) -> &Attestation { + match self { + FailedAtt::Unaggregate { attestation, .. } => attestation, + FailedAtt::Aggregate { attestation, .. } => &attestation.message.aggregate, + } + } } /// Items required to verify a batch of unaggregated gossip attestations. @@ -410,6 +412,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -608,6 +611,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -1117,6 +1121,7 @@ impl Worker { subnet_id: SyncSubnetId, seen_timestamp: Duration, ) { + let message_slot = sync_signature.slot; let sync_signature = match self .chain .verify_sync_committee_message_for_gossip(sync_signature, subnet_id) @@ -1128,6 +1133,8 @@ impl Worker { message_id, "sync_signature", e, + message_slot, + seen_timestamp, ); return; } @@ -1177,6 +1184,7 @@ impl Worker { sync_contribution: SignedContributionAndProof, seen_timestamp: Duration, ) { + let contribution_slot = sync_contribution.message.contribution.slot; let sync_contribution = match self .chain .verify_sync_contribution_for_gossip(sync_contribution) @@ -1189,6 +1197,8 @@ impl Worker { message_id, "sync_contribution", e, + contribution_slot, + seen_timestamp, ); return; } @@ -1232,6 +1242,7 @@ impl Worker { failed_att: FailedAtt, reprocess_tx: Option>>, error: AttnError, + seen_timestamp: Duration, ) { let beacon_block_root = failed_att.beacon_block_root(); let attestation_type = failed_att.kind(); @@ -1239,8 +1250,7 @@ impl Worker { match &error { AttnError::FutureEpoch { .. } | AttnError::PastEpoch { .. } - | AttnError::FutureSlot { .. } - | AttnError::PastSlot { .. } => { + | AttnError::FutureSlot { .. } => { /* * These errors can be triggered by a mismatch between our slot and the peer. * @@ -1262,6 +1272,24 @@ impl Worker { // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } + AttnError::PastSlot { .. } => { + // Produce a slot clock frozen at the time we received the message from the + // network. + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + attestation_verification::verify_propagation_slot_range( + seen_clock, + failed_att.attestation(), + ); + + // Only penalize the peer if it would have been invalid at the moment we received + // it. + if hindsight_verification.is_err() { + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + } + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => { /* * These errors are caused by invalid signatures. @@ -1625,6 +1653,8 @@ impl Worker { message_id: MessageId, message_type: &str, error: SyncCommitteeError, + sync_committee_message_slot: Slot, + seen_timestamp: Duration, ) { metrics::register_sync_committee_error(&error); @@ -1650,10 +1680,7 @@ impl Worker { // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - SyncCommitteeError::PastSlot { - message_slot, - earliest_permissible_slot, - } => { + SyncCommitteeError::PastSlot { .. } => { /* * This error can be triggered by a mismatch between our slot and the peer. * @@ -1667,12 +1694,34 @@ impl Worker { "type" => ?message_type, ); - // We tolerate messages that were just one slot late. - if *message_slot + 1 < *earliest_permissible_slot { + // Compute the slot when we received the message. + let received_slot = self + .chain + .slot_clock + .slot_of(seen_timestamp) + .unwrap_or_else(|| self.chain.slot_clock.genesis_slot()); + + // The message is "excessively" late if it was more than one slot late. + let excessively_late = received_slot > sync_committee_message_slot + 1; + + // This closure will lazily produce a slot clock frozen at the time we received the + // message from the network and return a bool indicating if the message was invalid + // at the time of receipt too. + let invalid_in_hindsight = || { + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + sync_committee_verification::verify_propagation_slot_range( + seen_clock, + &sync_committee_message_slot, + ); + hindsight_verification.is_err() + }; + + // Penalize the peer if the message was more than one slot late + if excessively_late && invalid_in_hindsight() { self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } - // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } SyncCommitteeError::EmptyAggregationBitfield => { diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index f50931c6f6a..183f5c9313d 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -112,4 +112,18 @@ pub trait SlotClock: Send + Sync + Sized + Clone { Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) }) } + + /// Produces a *new* slot clock with the same configuration of `self`, except that clock is + /// "frozen" at the `freeze_at` time. + /// + /// This is useful for observing the slot clock at arbitrary fixed points in time. + fn freeze_at(&self, freeze_at: Duration) -> ManualSlotClock { + let slot_clock = ManualSlotClock::new( + self.genesis_slot(), + self.genesis_duration(), + self.slot_duration(), + ); + slot_clock.set_current_time(freeze_at); + slot_clock + } } From aaa5344eab2c0bda90d0d4da3710982c05396814 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 12 Jan 2022 05:32:14 +0000 Subject: [PATCH 17/56] Add peer score adjustment msgs (#2901) ## Issue Addressed N/A ## Proposed Changes This PR adds the `msg` field to `Peer score adjusted` log messages. These `msg` fields help identify *why* a peer was banned. Example: ``` Jan 11 04:18:48.096 DEBG Peer score adjusted score: -100.00, peer_id: 16Uiu2HAmQskxKWWGYfginwZ51n5uDbhvjHYnvASK7PZ5gBdLmzWj, msg: attn_unknown_head, service: libp2p Jan 11 04:18:48.096 DEBG Peer score adjusted score: -27.86, peer_id: 16Uiu2HAmA7cCb3MemVDbK3MHZoSb7VN3cFUG3vuSZgnGesuVhPDE, msg: sync_past_slot, service: libp2p Jan 11 04:18:48.096 DEBG Peer score adjusted score: -100.00, peer_id: 16Uiu2HAmQskxKWWGYfginwZ51n5uDbhvjHYnvASK7PZ5gBdLmzWj, msg: attn_unknown_head, service: libp2p Jan 11 04:18:48.096 DEBG Peer score adjusted score: -28.86, peer_id: 16Uiu2HAmA7cCb3MemVDbK3MHZoSb7VN3cFUG3vuSZgnGesuVhPDE, msg: sync_past_slot, service: libp2p Jan 11 04:18:48.096 DEBG Peer score adjusted score: -29.86, peer_id: 16Uiu2HAmA7cCb3MemVDbK3MHZoSb7VN3cFUG3vuSZgnGesuVhPDE, msg: sync_past_slot, service: libp2p ``` There is also a `libp2p_report_peer_msgs_total` metrics which allows us to see count of reports per `msg` tag. ## Additional Info NA --- .../src/attestation_verification.rs | 18 -- .../lighthouse_network/src/behaviour/mod.rs | 1 + beacon_node/lighthouse_network/src/metrics.rs | 9 + .../src/peer_manager/mod.rs | 19 +- .../src/peer_manager/peerdb.rs | 108 ++++++-- beacon_node/lighthouse_network/src/service.rs | 10 +- .../lighthouse_network/tests/pm_tests.rs | 3 +- .../beacon_processor/worker/gossip_methods.rs | 249 ++++++++++++++---- beacon_node/network/src/service.rs | 3 +- .../network/src/sync/backfill_sync/mod.rs | 14 +- beacon_node/network/src/sync/manager.rs | 37 ++- .../network/src/sync/network_context.rs | 3 +- .../network/src/sync/range_sync/chain.rs | 14 +- 13 files changed, 378 insertions(+), 110 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index fb05ef75526..6692aa48cd2 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -183,24 +183,6 @@ pub enum Error { /// single-participant attestation from this validator for this epoch and should not observe /// another. PriorAttestationKnown { validator_index: u64, epoch: Epoch }, - /// The attestation is for an epoch in the future (with respect to the gossip clock disparity). - /// - /// ## Peer scoring - /// - /// Assuming the local clock is correct, the peer has sent an invalid message. - FutureEpoch { - attestation_epoch: Epoch, - current_epoch: Epoch, - }, - /// The attestation is for an epoch in the past (with respect to the gossip clock disparity). - /// - /// ## Peer scoring - /// - /// Assuming the local clock is correct, the peer has sent an invalid message. - PastEpoch { - attestation_epoch: Epoch, - current_epoch: Epoch, - }, /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). /// diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index f14d24aac49..32a87166b2f 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -887,6 +887,7 @@ impl NetworkBehaviourEventProcess for Behaviour< PeerAction::LowToleranceError, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), + "does_not_support_gossipsub", ); } } diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index b8fd8c58483..1dfe0448b7a 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -106,6 +106,15 @@ lazy_static! { /// The number of peers that we dialed us. pub static ref NETWORK_OUTBOUND_PEERS: Result = try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); + + /* + * Peer Reporting + */ + pub static ref REPORT_PEER_MSGS: Result = try_create_int_counter_vec( + "libp2p_report_peer_msgs_total", + "Number of peer reports per msg", + &["msg"] + ); } /// Checks if we consider the NAT open. diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 202738c25f9..318bdfcdf31 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -155,7 +155,13 @@ impl PeerManager { } } - self.report_peer(peer_id, PeerAction::Fatal, source, Some(reason)); + self.report_peer( + peer_id, + PeerAction::Fatal, + source, + Some(reason), + "goodbye_peer", + ); } /// Reports a peer for some action. @@ -167,12 +173,13 @@ impl PeerManager { action: PeerAction, source: ReportSource, reason: Option, + msg: &'static str, ) { let action = self .network_globals .peers .write() - .report_peer(peer_id, action, source); + .report_peer(peer_id, action, source, msg); self.handle_score_action(peer_id, action, reason); } @@ -511,7 +518,13 @@ impl PeerManager { RPCError::Disconnected => return, // No penalty for a graceful disconnection }; - self.report_peer(peer_id, peer_action, ReportSource::RPC, None); + self.report_peer( + peer_id, + peer_action, + ReportSource::RPC, + None, + "handle_rpc_error", + ); } /// A ping request has been received. diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 81c03eaf751..f70f35b689d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -490,7 +490,10 @@ impl PeerDB { peer_id: &PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, ) -> ScoreUpdateResult { + metrics::inc_counter_vec(&metrics::REPORT_PEER_MSGS, &[msg]); + match self.peers.get_mut(peer_id) { Some(info) => { let previous_state = info.score_state(); @@ -502,7 +505,13 @@ impl PeerDB { let result = Self::handle_score_transition(previous_state, peer_id, info, &self.log); if previous_state == info.score_state() { - debug!(self.log, "Peer score adjusted"; "peer_id" => %peer_id, "score" => %info.score()); + debug!( + self.log, + "Peer score adjusted"; + "msg" => %msg, + "peer_id" => %peer_id, + "score" => %info.score() + ); } match result { ScoreTransitionResult::Banned => { @@ -522,13 +531,23 @@ impl PeerDB { } ScoreTransitionResult::NoAction => ScoreUpdateResult::NoAction, ScoreTransitionResult::Unbanned => { - error!(self.log, "Report peer action lead to an unbanning"; "peer_id" => %peer_id); + error!( + self.log, + "Report peer action lead to an unbanning"; + "msg" => %msg, + "peer_id" => %peer_id + ); ScoreUpdateResult::NoAction } } } None => { - debug!(self.log, "Reporting a peer that doesn't exist"; "peer_id" =>%peer_id); + debug!( + self.log, + "Reporting a peer that doesn't exist"; + "msg" => %msg, + "peer_id" =>%peer_id + ); ScoreUpdateResult::NoAction } } @@ -1357,7 +1376,7 @@ mod tests { assert_eq!(pdb.banned_peers_count.banned_peers(), 0); for p in pdb.connected_peer_ids().cloned().collect::>() { - let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&p); } @@ -1426,9 +1445,19 @@ mod tests { pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); @@ -1481,7 +1510,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Disconnect and ban peer 2 - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // Should be 1 disconnected peer and one peer in the process of being disconnected println!( "3:{},{}", @@ -1495,7 +1529,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Now that the peer is disconnected, register the ban. - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be 1 disconnected peer and one banned peer. println!( "5:{},{}", @@ -1509,7 +1548,12 @@ mod tests { pdb.banned_peers().count() ); // Now ban peer 1. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be no disconnected peers and 2 banned peers println!( "6:{},{}", @@ -1523,7 +1567,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Same thing here. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); println!( "8:{},{}", pdb.disconnected_peers, pdb.banned_peers_count.banned_peers @@ -1559,7 +1608,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // This should add a new banned peer, there should be 0 disconnected and 2 banned @@ -1576,7 +1630,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should still have 2 banned peers @@ -1606,7 +1665,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should have 1 disconnect (peer 2) and one banned (peer 3) @@ -1657,7 +1721,12 @@ mod tests { ); // Ban peer 0 - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); // Should have 1 disconnect ( peer 2) and two banned (peer0, peer 3) @@ -1709,7 +1778,7 @@ mod tests { let p5 = connect_peer_with_ips(&mut pdb, vec![ip5]); for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1725,6 +1794,7 @@ mod tests { &peers[BANNED_PEERS_PER_IP_THRESHOLD + 1], PeerAction::Fatal, ReportSource::PeerManager, + "", ); pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); @@ -1777,7 +1847,7 @@ mod tests { // ban all peers for p in &peers { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1806,7 +1876,7 @@ mod tests { socker_addr.push(Protocol::Tcp(8080)); for p in &peers { pdb.connect_ingoing(p, socker_addr.clone(), None); - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1823,7 +1893,7 @@ mod tests { // reban every peer except one for p in &peers[1..] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1832,7 +1902,7 @@ mod tests { assert!(!pdb.ban_status(&p2).is_banned()); // reban last peer - let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 23c19829065..cbb11cae4bb 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -280,11 +280,17 @@ impl Service { } /// Report a peer's action. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { self.swarm .behaviour_mut() .peer_manager_mut() - .report_peer(peer_id, action, source, None); + .report_peer(peer_id, action, source, None, msg); } /// Disconnect and ban a peer, providing a reason. diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index 96f91797ad3..9b26e4939fa 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -167,7 +167,8 @@ async fn banned_peers_consistency() { &peer_id, PeerAction::Fatal, ReportSource::Processor, - None + None, + "" ); }, _ => {} diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 1b7ef7aa9bc..2b6ac02b622 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -180,11 +180,12 @@ impl Worker { /* Auxiliary functions */ /// Penalizes a peer for misbehaviour. - fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction) { + fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { self.send_network_message(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::Gossipsub, + msg, }) } @@ -738,16 +739,24 @@ impl Worker { self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); return None; } + Err(e @ BlockError::BeaconChainError(_)) => { + debug!( + self.log, + "Gossip block beacon chain error"; + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) - | Err(e @ BlockError::NotFinalizedDescendant { .. }) - | Err(e @ BlockError::BeaconChainError(_)) => { + | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError, "gossip_block_high"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } @@ -780,7 +789,7 @@ impl Worker { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError, "gossip_block_low"); return None; } }; @@ -931,7 +940,11 @@ impl Worker { "block root" => ?block.canonical_root(), "block slot" => block.slot() ); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_block_ssz", + ); trace!( self.log, "Invalid gossip beacon block ssz"; @@ -973,7 +986,11 @@ impl Worker { // the fault on the peer. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // We still penalize a peer slightly to prevent overuse of invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_exit", + ); return; } }; @@ -1032,7 +1049,11 @@ impl Worker { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_proposer_slashing", + ); return; } }; @@ -1083,7 +1104,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_attester_slashing", + ); return; } }; @@ -1248,9 +1273,7 @@ impl Worker { let attestation_type = failed_att.kind(); metrics::register_attestation_error(&error); match &error { - AttnError::FutureEpoch { .. } - | AttnError::PastEpoch { .. } - | AttnError::FutureSlot { .. } => { + AttnError::FutureSlot { .. } => { /* * These errors can be triggered by a mismatch between our slot and the peer. * @@ -1267,7 +1290,11 @@ impl Worker { // Peers that are slow or not to spec can spam us with these messages draining our // bandwidth. We therefore penalize these peers when they do this. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1285,7 +1312,11 @@ impl Worker { // Only penalize the peer if it would have been invalid at the moment we received // it. if hindsight_verification.is_err() { - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_past_slot", + ); } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1297,7 +1328,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_selection_proof", + ); } AttnError::EmptyAggregationBitfield => { /* @@ -1307,7 +1342,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_empty_agg_bitfield", + ); } AttnError::AggregatorPubkeyUnknown(_) => { /* @@ -1324,7 +1363,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_pubkey", + ); } AttnError::AggregatorNotInCommittee { .. } => { /* @@ -1341,7 +1384,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_not_in_committee", + ); } AttnError::AttestationAlreadyKnown { .. } => { /* @@ -1417,7 +1464,11 @@ impl Worker { "type" => ?attestation_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_val_index_too_high", + ); } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( @@ -1482,7 +1533,11 @@ impl Worker { } else { // We shouldn't make any further attempts to process this attestation. // Downscore the peer. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_unknown_head", + ); self.propagate_validation_result( message_id, peer_id, @@ -1510,7 +1565,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_unknown_target", + ); } AttnError::BadTargetEpoch => { /* @@ -1520,7 +1579,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_bad_target", + ); } AttnError::NoCommitteeForSlotAndIndex { .. } => { /* @@ -1529,7 +1592,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_no_committee", + ); } AttnError::NotExactlyOneAggregationBitSet(_) => { /* @@ -1538,7 +1605,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_too_many_agg_bits", + ); } AttnError::AttestsToFutureBlock { .. } => { /* @@ -1547,7 +1618,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_block", + ); } AttnError::InvalidSubnetId { received, expected } => { /* @@ -1560,7 +1635,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_subnet_id", + ); } AttnError::Invalid(_) => { /* @@ -1569,7 +1648,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_state_processing", + ); } AttnError::InvalidTargetEpoch { .. } => { /* @@ -1578,7 +1661,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_epoch", + ); } AttnError::InvalidTargetRoot { .. } => { /* @@ -1587,7 +1674,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_root", + ); } AttnError::TooManySkippedSlots { head_block_slot, @@ -1607,7 +1698,11 @@ impl Worker { // In this case we wish to penalize gossipsub peers that do this to avoid future // attestations that have too many skip slots. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_too_many_skipped_slots", + ); } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( HotColdDBError::AttestationStateIsFinalized { .. }, @@ -1630,8 +1725,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } } @@ -1675,7 +1768,11 @@ impl Worker { // Unlike attestations, we have a zero slot buffer in case of sync committee messages, // so we don't penalize heavily. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1719,7 +1816,11 @@ impl Worker { // Penalize the peer if the message was more than one slot late if excessively_late && invalid_in_hindsight() { - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_past_slot", + ); } self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1732,7 +1833,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_empty_agg_bitfield", + ); } SyncCommitteeError::InvalidSelectionProof { .. } | SyncCommitteeError::InvalidSignature => { @@ -1742,7 +1847,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_proof_or_sig", + ); } SyncCommitteeError::AggregatorNotInCommittee { .. } | SyncCommitteeError::AggregatorPubkeyUnknown(_) => { @@ -1753,7 +1862,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_bad_aggregator", + ); } SyncCommitteeError::SyncContributionAlreadyKnown(_) | SyncCommitteeError::AggregatorAlreadyKnown(_) => { @@ -1786,7 +1899,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator", + ); } SyncCommitteeError::UnknownValidatorPubkey(_) => { debug!( @@ -1796,7 +1913,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator_pubkey", + ); } SyncCommitteeError::InvalidSubnetId { received, expected } => { /* @@ -1809,7 +1930,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subnet_id", + ); } SyncCommitteeError::Invalid(_) => { /* @@ -1818,7 +1943,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_state_processing", + ); } SyncCommitteeError::PriorSyncCommitteeMessageKnown { .. } => { /* @@ -1834,7 +1963,11 @@ impl Worker { ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_prior_known", + ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1855,8 +1988,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } SyncCommitteeError::BeaconStateError(e) => { /* @@ -1874,7 +2005,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_beacon_state_error", + ); } SyncCommitteeError::ContributionError(e) => { error!( @@ -1885,7 +2020,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_contribution_error", + ); } SyncCommitteeError::SyncCommitteeError(e) => { error!( @@ -1896,7 +2035,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_committee_error", + ); } SyncCommitteeError::ArithError(e) => { /* @@ -1909,7 +2052,11 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_arith_error", + ); } SyncCommitteeError::InvalidSubcommittee { .. } => { /* @@ -1917,7 +2064,11 @@ impl Worker { an invalid message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subcommittee", + ); } } debug!( diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 485b0a98f5b..35cf3fa90eb 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -96,6 +96,7 @@ pub enum NetworkMessage { peer_id: PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, }, /// Disconnect an ban a peer, providing a reason. GoodbyePeer { @@ -445,7 +446,7 @@ fn spawn_service( ); service.libp2p.swarm.behaviour_mut().publish(messages); } - NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), + NetworkMessage::ReportPeer { peer_id, action, source, msg } => service.libp2p.report_peer(&peer_id, action, source, msg), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), NetworkMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = service diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index fc94eaca0d9..610081319d6 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -682,7 +682,7 @@ impl BackFillSync { if let Some(peer_action) = peer_action { for peer in self.participating_peers.drain() { - network.report_peer(peer, *peer_action); + network.report_peer(peer, *peer_action, "backfill_batch_failed"); } } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) @@ -804,7 +804,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -813,7 +817,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_same_peer", + ); } } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f9055665ca6..32f2a263674 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -369,8 +369,11 @@ impl SyncManager { } else { crit!(self.log, "Parent chain has no blocks"); } - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); return; } // add the block to response @@ -388,8 +391,11 @@ impl SyncManager { // tolerate this behaviour. if !single_block_request.block_returned { warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => %single_block_request.hash, "peer_id" => %peer_id); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_no_block", + ); } return; } @@ -512,8 +518,11 @@ impl SyncManager { warn!(self.log, "Single block lookup failed"; "outcome" => ?outcome); // This could be a range of errors. But we couldn't process the block. // For now we consider this a mid tolerance error. - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_lookup_failed", + ); } } } @@ -836,8 +845,11 @@ impl SyncManager { self.request_parent(parent_request); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. - self.network - .report_peer(peer, PeerAction::LowToleranceError); + self.network.report_peer( + peer, + PeerAction::LowToleranceError, + "parent_request_bad_hash", + ); } else { // The last block in the queue is the only one that has not attempted to be processed yet. // @@ -907,6 +919,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::MidToleranceError, + "parent_request_err", ); } } @@ -945,6 +958,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::LowToleranceError, + "request_parent_import_failed", ); return; // drop the request } @@ -1112,8 +1126,11 @@ impl SyncManager { // A peer sent an object (block or attestation) that referenced a parent. // The processing of this chain failed. self.failed_chains.insert(chain_head); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "parent_lookup_failed", + ); } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e991e86e059..9415f210026 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -170,13 +170,14 @@ impl SyncNetworkContext { } /// Reports to the scoring algorithm the behaviour of a peer. - pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction) { + pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction, msg: &'static str) { debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); self.network_send .send(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::SyncService, + msg, }) .unwrap_or_else(|e| { warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 4b89808994b..4474f1cc34e 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -533,7 +533,7 @@ impl SyncingChain { if let Some(peer_action) = peer_action { for (peer, _) in self.peers.drain() { - network.report_peer(peer, *peer_action); + network.report_peer(peer, *peer_action, "batch_failed"); } } Err(RemoveChain::ChainFailed(batch_id)) @@ -624,7 +624,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -633,7 +637,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_same_peer", + ); } } } From f13e9c3d107495f865ac565ce042768377ab1ba5 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Wed, 12 Jan 2022 20:58:41 +0000 Subject: [PATCH 18/56] Antithesis docker workflow (#2877) ## Issue Addressed Automates a build and push to antithesis servers on merges to unstable. They run tests against lighthouse daily and have requested more frequent pushes. Currently we are just manually pushing stable images when we have a new release. ## Proposed Changes - Add a `Dockerfile.libvoidstar` - Add the `libvoidstar.so` binary - Add a new workflow to autmatically build and push on merges to unstable ## Additional Info Requires adding the following secrets -`ANTITHESIS_USERNAME` -`ANTITHESIS_PASSWORD` -`ANTITHESIS_REPOSITORY` -`ANTITHESIS_SERVER` Tested here: https://github.com/realbigsean/lighthouse/actions/runs/1612821446 Co-authored-by: realbigsean Co-authored-by: realbigsean --- .github/workflows/docker-antithesis.yml | 31 ++++++++++++++++++ testing/antithesis/Dockerfile.libvoidstar | 26 +++++++++++++++ testing/antithesis/libvoidstar/libvoidstar.so | Bin 0 -> 348192 bytes 3 files changed, 57 insertions(+) create mode 100644 .github/workflows/docker-antithesis.yml create mode 100644 testing/antithesis/Dockerfile.libvoidstar create mode 100644 testing/antithesis/libvoidstar/libvoidstar.so diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml new file mode 100644 index 00000000000..b7b35d1207c --- /dev/null +++ b/.github/workflows/docker-antithesis.yml @@ -0,0 +1,31 @@ +name: docker antithesis + +on: + push: + branches: + - unstable + +env: + ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} + ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} + ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} + REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} + IMAGE_NAME: lighthouse + TAG: libvoidstar + +jobs: + build-docker: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Update Rust + run: rustup update stable + - name: Dockerhub login + run: | + echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin + - name: Build AMD64 dockerfile (with push) + run: | + docker build \ + --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ + --file ./testing/antithesis/Dockerfile.libvoidstar . + docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar new file mode 100644 index 00000000000..d9084af3480 --- /dev/null +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -0,0 +1,26 @@ +FROM rust:1.56.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +COPY . lighthouse + +# build lighthouse directly with a cargo build command, bypassing the makefile +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse + +# build lcli binary directly with cargo install command, bypassing the makefile +RUN cargo install --path /lighthouse/lcli --force --locked + +FROM ubuntu:latest +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + libssl-dev \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# create and move the libvoidstar file +RUN mkdir libvoidstar +COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so + +# set the env variable to avoid having to always set it +ENV LD_LIBRARY_PATH=/usr/lib +# move the lighthouse binary and lcli binary +COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so new file mode 100644 index 0000000000000000000000000000000000000000..0f8a0f23c3fb7a349788bc8d5532dc71bb821c68 GIT binary patch literal 348192 zcmeFa3!Ga=dG|lN>rI3c66FFYB!QCzC$i&ok+I__z9fQ>NZOUwlD#Vl zNo&g~7ZoT7UT7Bxt+=!mkWv*A5O+}msFW7H6sTxvKY~Ilcqxc-6L)dV`=C^Lf8q2tMO$aHp}!(EjNQsGoF@A}VNRG#zK{REIsp0DLn zcDb&)PFd`$K7ov_JlBqhVQqd*?UPbIWjZsTUF{{)_v~jf7pab@i&aoPj3JGg!+@?|rq`&`pr+M_&e!*k#TRqiU`xfn+lIf;hQAE_GRygB z^N(!e|FI4KQyczf8~!dE{(c+&2^;=t8~#r={PQ;an>PIWHvDHcd<2bnsd{nPaHkD_ zk_~^V4R_mcj}7;b(35{{mmCpLw-S{IU(N*zot; z@Xy%rFWT^L+VJn$@SY8S%!Xfz!Evd2d7=$}iVa_5!*8|WciHfhHvDRH+K)e+?Aa*ntxc^FX;HxHkz7OM^*egbo?)9 zZj2}nv*zbVns@blXp`pm>HK)~d9iDLGL(PD&uTjX&B^|2G!HYs{!H_N&gTu<{v(=~ zS^0#?4zvHCntQZgWal!SPlJ`u4Vu?kes*duSbUu=l+G9e|8EuMT=KtB%q{t`<@PVo z7FUT2VL=A6b%ANifqAFel9nE4GKw*Rx&2E18yBxeMaZ@%r==sX@j~pU`}-r}EcOf7 zm!J73CmttfpmJ_0<6n_Tpy4^h$8jyn{oGaZ_yyuU=riU1mFDp=5kMWt{Rdz5$G95C zMH^))Kg!;!zP*f@{z7pP> zfiJE*N5pmRr^&cS5KbUG6QQ%|*!x?z#qPT5+wcG0me)P0QvS-l&tLzZKXhN&Oz&{! z)Bjo=yX5rCy0^daW7)*#epcMH<0bp=IeE7Cf1dxDSG?#mM}JuTi;w-)1wY;vy6$6} zuK9BQe%O=Y9)vvz@^ctLo=>{C1>xBU^pW}9fUr}8yP!`**pDEu!_*tGF)DxPv)8h>8Qag$`#uZ5%CaptuS5tSY(rR!a1h}pg!>TW z=jjNO=GJfj5A~sFl;#J`R8W7bo1i{2kc$SJ{VmzHT?p49Jf%;CxLR-DXkoNT_hqzC zx>0Z766=3tTbCai*KWo34wDG;e(hSlEyZKmmYcWh{dLd__4aD$EqeP>trtV(E$7n^ za8Zk=BHWE2KYoOuxuxeAo05tArfe>iiEYYeqM6N}c>c_UXKwO=bpDo^$#l{?vpsrh z;$(jF_H7%^q!Q6=E|S?ao0!_RWyf!t)S7Vfne85L^4#RPv&qCQlXFuO(Q_W}sm60^Qoj5X;SUUX!(OXV!nTu}m+%h+{`E(*N;mw}*OlP-6W+x&uC!cdlrk_uqO(df> z>F2gg#j|tX*>vXg17|X)PCh5&IXydb`b=hWA~KuZvf-r6{f5bn6Sv5$3xv6i+0@2u zaw|G3Hz(z0Iv3BxB2l^Tk^9psQYdtCE|m<)63>M~B6RTh9ieC}6FZs6=3<%SckG=_ zC1b}UQ?oI3Z0RGR>3k$KlSoEp6A#41?S~KEwl_4nadP8U5ej7^$wUq~6Piw)3FR`8 z=~yT|9XdH5$wZeq97-UmjFXE@pAJpOPlskAiP^sWEg}@lBSI{nTgvv*SlOJJf8c@8 z)VW+NE6mO5R3aIgnN6hCZY+~YWkS(NE@IuEi)FKslQE6tna#8_v-8=w+B_M{g))(3 zG&R@XpN>Z|YA2DLNm*=~yF(^VpV>SaN=FhIovC#4q)M5E*=%Y)gOW=|=3)Y;nU?>= zl4pc0BI$iBgRF(-Qqg%-RWg-J%$)0Ekx$57L# zmK^F)UF4xua(b|XhSE=i7J$qn<1-eIkk)Jpp-?27jiDbfuQE87JC}}yrlc=}j3ceC zFSDRRp)*!fk*QQBC#H4NQddTFHk3eLg8YTj@n}XwXCqOFP9|o~W^lNl^~_mW4xvnR zs5mrbg{0}(j2V|IP>nqLxoUshr01-dPs$JRH0Gyjf0$P_ck=l9e!;=)7tW|r*I@C3 zYm{9#Rwg;HO2!!R`pwFJgj+x@deGBjFbH~r~Gm=9@hErFzy^vH&I^34b6RwS9Lk~8K?XQ7^nOP8K?Y* z887Jkrx~YqlV{wg^I2d#pm~w;y3YRs&SL;X*iaq&|Xzr#3Pr(MQvhC<7B7AxcIp$pEBcQr@}bdsWR@^K-FJ5I)Z+K!8HvSTn# zcHE2?v>gxQWXH=m+3_)6*LM7jlbrzLWGBeD_@yeJFymw=&N$ggGw#!N@{E(60^?+- z$aq29Szw&(lo%&FWyb5;PK9x@Q)Qg&)EF1PQsq-;oa{6hCp%5XecDcoakA59oa}TM zFK9bm#>q~Pak3*;4Y&Wgw&P%&>^K=GJ1)k>W2$@%#>tMGakArK+^6k$87DhF#>tMK z@q)G!V4Um(87Dho#_QTnoN=;~W}NKg8JADt<)^?n*(owkb`}`-X*(sx$xfMZvQuHa zpzTx{Cp$I9$xfZ|y0+6`oa{6iCp#_1gNyl z&~}`RlN}f1WXE8tM4@wT?(XPoQ=7$-YH#*J}RK4HemPMmSFlV&`u?c^CJI|atcPLc7lwzI%E z*(otjcFK&mwVev%WT(nF*{LyZT%gLQ&N$g=Fiv)wjEA+I7UN{6%{bZVFkaSnx{Q;Z z9^+(3TsYkR+uDwUakArNob0$5H!f7=V=zv3+>DbQ5948N$ICd`@i9(z{EU~iodDxx zC&)P22{Yc-cH)eaoiyWQC(pRyP~}r#oa_`CCp!y_hqav&<7B7IIN7N%Ue?7{lX0@+ zVw~(4jJLHNH{)c-%Q!vf@-c3#R?pM?HauX%gN%E$oiO9Zgt8xJ+^6HG84qZlXS{v6 zvQuE(^902g7#I3EMTv3pw`{{JHoVF>`CDT={Y;ggI^*PTgK_e=$#_%y+hRO@g|gpa z+@<~PGEV;XY`A#baJ!}W4#vq}C*y&=DnBm9$zOwU^4HCHUHj`{y!k|B-^aK|`|D@C zeYJ`oV7zdR;z7oR=3&Ok-#Fv-2b7&O<6&yIjFZ0w#;e-jBI9lCZ;5fg_P5M9)mO!a zS8aHWaq_p$c==}KZ-a5F-zMYaZ;SD=_P5QszJ9xmhqb>w#>ro?+NRywa3|yBuZ!{W zCgrceIQi>loc#4LzM%c}GG2I+vhQa+ul)@$PW}dMc-V%=87F_!j2G6a{Nx!Ye+!JK zbvrLIUeNw7FkaUERhjVx?Qex~^0#WkYc{;jIQiRP+_zi#+vHsP)nc6Lx6OE3``clB zLDz4Oae7Zci#6%}JNfI-i#R!V+He=+kZ8E-axAM2eIMr{Paq_prxKI1rWxTHa6&DZpxBd4+jFtvkD!DYh@#>rnd zw9THhKocv8QPJZPXr}zcN$=@R5&UL!oGTwfk;w8q(-!kJ}`GpjG zDvW!yzct3=+TS|kB+EX@Ap<7j!x28K-h6 zFi!D{jFZ0$jCarJcFXv}gzCr2jFZ0=#_QVOD&ti>{?<9y{x%pVznV6@Wy9NylfNCt z#RlbXmvNsi=N{wauefx$-Bz`~4#pSseA2}@tphL^Cx6{G++)MNjFZ1U#+%!ezkbHa z-vHy}Z;m+^O4hfN}CS$T-z+nDMmsH_o_6`bJr;`CDbYs_VDLcv$;eXS}NWt0v<ev*CWm$=?9u-K_FA$T;~MW}N(u zGw##=rWx<*_FP~*t^F-B9=J;7XMyqRwThP*7n+wDuj_VLVO(6K>{J=|XetCQ`Riib(Eb{XFKB-~ zjMI8AFXQB|&xZSLcz|*8H^_MP7Ugf4aq>6LIQg4q+^PM|Gw!)k*)K9q>(Uk&Cx1&e zyllfOjFZ1r#=}Qderk-9zjemR-v;AC``ctZt?ReVIIW-SFz(UsgSw1|^>x}~ysG1i zD~8)`L0^v!#_O6p85b9;a&R$D{u+#TohrVY@v5F*c^Rj5em=&je*HE)V8erqQ~ib+ zH`c0r#u=yjO*8J%<&bB*t^F-9UeNWsz_`%sv`UPVzhxU!G49d+`WdJE2W)uIhKCs^f8&f-x2gO3G~?uNp7F3tl}~~3s`j_YcwN_T ziE+R7x6C;CTe0C)8(w3a{H-(YlRwObPlIzkKWQ@F)c&>@FKd6>jQe!`b{P+Ae|wCR zzv2lt?be1n87F^Tj0fgaeHn~X{kjt{T#{S7cq{swJ$*oMa$ zCx6q7FX;X`&p7#8V4VCdGG5UBE^w~>Ei=BL{jD%g{#I>x&4$+*Cx07^8;4c>HW?>> zTa1&xZN}5u-wxwl?Qf59S|=@@XwzI1#zi!6KUk~Htub1(#_SeU_ z(ARH(aazwEWSr_ZY{TO=Jk2=yn`hj2K=-$t>+7`0IQhH4x%RijxKsOEVVu^TR~aXN zYc{-Y!yAl~zfH!|^UB{A<5a(G#>w9f<38 z-HfO2Q2u%tCx5++lfOR3J=$MC;|u!!D9E@&uX_$NPX5Mic-n^N87F@WjJKbq{4Fw0 z{w^?1{+1Xww7+G>o7&$hNdP#!<&qgzb(d_)5_mAVb-0Gxvse4;WSr{PWy1{{?q;0)^)PPSs{HjbPX78BCx88n3+-=!@q+d@%y>Zi z8)uySP22Fi4KFZG{uUVz?^FIRFi!rK7$<+rjCU_pb}Ee5wZApS4(Ehp@Cw~na?zZ6`#>rnV#^Zp8}4JA{Pi>53@LvDjFZ1X z#>w9><7w@0obj-(-#p`P?Qel`^0#Qi7i@Tmaq_p!c>9R*x57C2TVwBd4e!|SF5~2HkMZt{mA~Se;dV>@Iv6K^ot$fbU5xv5dv-G()c$%H zCx5**+-Jl6jFZ0s#sl&XM&J`tNb$zPuh_uKFQ&A*PR6NzU5t~z2IJjJRJpksZ_@i;#_4;`KE}yk zzYP!A@F3&lZT}zCUzhxUrnN=Sk(Si*fST zV4VDQGhWyJdKee_`t>q?QOg|mg^zLiy+=Re?$zpd00NBD_v?d<`*i#;=XyWRIN3=v z9@KX7j8nb~jFX)r<7sVYfpN-5nQ^jTVZ5tNI~WgZ?qZzE-C#VeV?4%j5Q~P9`{3$R_c8ZL*seLj|c1nzsoigL% zB6Xcs7$-Yb#>q~NaYNgwGfs9IjFX)v<34St#W>Ydn{l$!VLYJibUD}iJ;uq7xNf+e zr?njizMeG_NpD_G^rnbpGp%2eh3A<7v&CjFbH~<5g|H z!}x->(`CG>d5>|j@3?-rz0q~zWZb9g(ZzUJbAxfR?_r#-A1~ttZO6xWS#v+*WIxC_ zT~}eo>)KA7@wVn^#>sx3ahh)w7%%GUw8%JJ*9(l7bo>(I)Ss6bCp#6+wVf*CWT(zJ z`O{!r_|$?2Cj8i%L7$-Y^#>K^YzREb|Bgi<}2{Z1~cH)eaoiyWQ zC(n36+bJ+kc8ZLXodw40+D?gavQuW9>{J*Rk5}bWWt{BP7$-Y*#(mmOgK^47lX0@s zV!WX3v>7Km9mdH{m+`u`(_@_c5>Fd$w`9k`xVS`>kCSoAhl_EtV=(U1cHE4U9WUdQ z&j91?JM_43!^4cz-;YQ$?$hOxXWWo~_!XZ5gyL#=|;(m+`XBPmgiRk61I@-iSLGr~J4WFY5dl zj2Co%+>FhI_}Cco<1uPTc#bbe}#Q#sddc!P2BtI2p(`_*E+d`#uP z&3Hlk)n%Oe-yY*_?U%T5xP2PhF9+k~m(zy37$?6B&Y!9B?`FJxui_rY#RiX z=cmTFPv@u3ctPij!&C$Dwhu9nM>6;+hg3gNwrI%m99s!<6u0c z?YI~xJEUYMugk~H;?HS29>(dsUd9tTzMt{5-VZQNc7lx0XghJn$qp&cf1bsUYC8qS z>AXe8!#aM6@w(nGGfs9YjNhm2)EFl_q&)u(7XMytr^z^-x5fC~I(~<7x*zE>PIh{X zAJujo`aXv8Np?tyQ~PnT_(?tAGZ?4yx*5MipV!NHSnvB7Cp&(|4{AF>#>oyT&wrf7 zKcMZT8K?8+8Q-Jh7a6bW{RPI!PKohd+D?UWvO~)AUt{riXghVr>AVfbx9a#U#>HCI z|Fjt=I~~R+w4EN~WQUaJ-=WV#e51DGWSq|HVtk#B?`Axp_dSf09WUcIX*+(#$qp&y zliGif#b2ZCgc+yv#u>j($Imlf*82s<$xe~+r)WDR#>oyT&wqu*zed}sGEV2MG49gw z8;p1Lev@&s(_;Jy+D?aYvO~)A-(&Hw(00VS;dvdM*TMLuI=+i>pWZhZCp&J&FV=Rv zjFTNwo_{}!zgpV~Fiz(UGJb)MA7^|)@243jJ9);(w4Ea3WQUaJzr^B?Xgg)b>AV%j zf2qfd8slxfUuT@`G#LMxw$oyq?2z*OcUb(Mw$o*t&f8=BhdRFF=Hd3{(fdxu$&QQh z?`b=3#>oyT&%c+&|E{*9XgdYQ>AXe8KcVB77_aO7GUH^Y!uZFvof_k0hm_~P!Qy{R+i5aR=WQ|m5got7 zxO0PQKV8PjPLJ^qX*&*`j`B%%NQqPXak2Ox)OHNU>AY^n->2hy84v4yALC@l&-lBw zogm|6hm_|(&f-_KoiyWg-aO-P*YS&tSM~k^<7B7A_+M%}6~@U9DbIh6#eYcKsWVRJ zZ7}}lI)00Bu~D_3HsfTc!}uGtogU+4hm_~vq0d9Sr0qBvr}MfPe~pgsW;~$xJ&cnb zFXMlt?f4ldJEW9PYX3nN{}tL!m~lF9objTLpJ%+R_X~`Zog(9ZpzV|xCp)A({}mSh zMcPi4aXN2}@fYa$4aU2AzsWe+X)*p>ZKuOH*&*fm@3Hs~Xgfl`ccu7rUI*i6bbJ@% z^xoEBob0$6&uTkf#>oyT<%imjpT&O;<)^EjBT{_#UUfZ&S^WB+sCaS4o0_K?Z)=`s zysLSE^OCY(!`Zm{xC>+*Noa1ZB)RDHFWeenVF@vvw!{_+ni-eLUIzbM{ieD&Wc-eWwe;|q^E zKeeAf{$~~6!T5o{SKP_?2kVNv7{5fH*I=Cdax?z7HD$-c_^yvA?q&R=4aI$o_o|Bf z8GnY(e}M6?en`a+GXCMZ;$g<;Kd*S4@z;D&@igNZUCw#Nzxo9gzrgt0-lKSt@fW{S z@dd`8{5i!-jQ`>h#mkIeuIr`3_=|M@tBkMG?XbppQOB<{?$h{+ScTujUtmS@YwwaI1a4h0`@D%>^v{B8&Z?h0Al$Cv4%5BM6OKxGE`iFm2(| zH~Qo){9=O8f`wmV;YAC7yoE1V_@x$JvT&z`mo5A<3$IxCn*%#;Wt=#%fg>#;cW|FW8obOztO_G7Jie3_bl9P;bM!bJz3{#E!<(@>nz-9 z;Wt~j%fi=NxMATNEZlA3S6jHp!Z%vD*TS!~aG!-=Vc~uYe}aVvEc}TU9<=Z$S$NpO zT^1g<@T)95ZQ)P0@VtdzW8no0zt+Nw7XB0qU$F2^7GAP&kA;^le8R#j7Cvd=RSVy2 z;WZ22V&QcQ-)i9v3*TnpO$)!p!dn*ZweYru@38QWh2Lu7T?^l7;XMoAW#MA$p#AT* zaEFCI-NKy~?z3>0h3~O&!@_T~aJPl;w{VYzAGC0Ve{Kpj(_M{@u+M3{nv`0TqU<(cy(>$UDt}&%db_wZ3!`6!OQb#(mQqvE4-Q=ZBEclB*oy=+7z$6uu~ zY>aOe9(1=jZhyp?{uNUH!Pxja$Hd#llzWTW8zx3>dj9yrs*x{`{3)XU#n||3SBX!K z;pAT%8-M+(vH!J79;~fG=ZM44lT|BorNhyGd)!Yje82GWV_$o+c;pG=U%g7a^-0x{ zZ(JoldDS@3KVLPD?fpVr=|FW8z(7xZ-j6JcWg$&YABcqu(7Jf8CgPkla4? z5!ByD$Hu=gCcdiK>ii3SG9vy^7xM+*8@cL_My2&tSE_3$c- zZpHatHZqQ1y=acy_lx0t?!+-%AwL`y^^yC&HGE8V!!H^chiBE1`#v{x?3zDZjf0pKp>FJ3hMfy=~)F2?4U9)JI%mx=FQicS0( z5o|u_9RHP5n14dzbI$P>T_%3&l=wB5VJ0ezequak>P1K)_iq}%cW8f^%XO28;mnW!&8YPFe~*fl zSwx4&KRF`q8UNUjt;=3$ntA2O_-9AOce$}UWRf6D;<#~qkJu^uR&zXaUb*CWruxfO zuCeP(r&g(~uX@5V``3=%Iu!rfvAyTpzhL_tNB&FIxUPT;|9IpzBY1EmOH$S?^nz`F zIr0h>NoV5%|7S*DqT-CJI8Vwz+hea9`SeBNwXpFo7l|*+rem7HH~_uq(<7&a_`&$T zvctp|sKXaMGCKab)#9bAFm;l#FIV{T-bf2KTMu0^Uhs>5x}t{(zC3!NI(ot9FByOP zWhRlW5=Sn4;n@4e7cMeU3P_GQm>;Zij&B=zfw`;pFTfqKb>A_5`FW2&?&9ouk3V^I z;=IQ%To*g<@m1qD*&n}b#37#v9~(b2Xd`P!uFF5=2P5LMBjX zi({KWF_>9CYog+4Y$le8C8uMD63OU}9Y^nu%+ANekyJExXXaQglSrO)uN8+AxmYGL zduJN|=297PEFGIpL}ri8Pi1q7+11?nb}lCrahU+;^bMe@1D@!1A9XA z$!KgQk&H#%IJ9fc?EKo*0(O(Bvq>YJN+c22fIF!uaxUE2-2BuIV`lz=2SO)fx#@T$ zvsPy(7Tsa2iLM@sGL7`1nb|}dks`@xYR*W`&rQWLh&jJn?8#&z=Z>FC$H@I7k-7dh zib54&B9+Z$Vv#vUW7D&8$1P?NPsp5Yo($#sMX?)^sf2d! z+ATIllBwjmxzv1iqnJ%h%_q;w>N4gcvoop8Tr6tn;-T`w{+^=;3>9y^EXGL`eae_h zoLqfg17{+$q14PwHkJ!zXBJx-7|GAgZ58-uVggm4J2pQvlgNv`spJ_{7TN=~6|=xL ziH#x`&!oi)3=f*+edmBL@x)`Kt;fl#WNTu?cr9A4;8?+O;OVc83u< zaddBJ@0~}EA3Snmf9S-Kdk!AiXPa~~gkqmYBeYsTgU+Usk=aD*lJk?o5h-Wv1T?Z4x;&=jAiH|+`SuDVKSQwO-HghTqW7WNnAKaD1^&oV$$0$ zbt4*^jpbtfeY8DmIZav{%+P_QN?|%OJBw;VEu_-8@{-fC)>LhTPV5QoKe8`$=Yi0% z<9qfV>bsOorH2Y*QM1N1ADW(toUc93#*!yd<2z;b=TLsq=bdSI8p`cukXr<_2&R)+7)zYb;zc_Yy|826I$2C2)YPfz5dMix z=fwVG1YO|4nLW9jwU5*tgYFjweW&8oUzlA(awcV<`eO!~m!VpIa(;F;os-uL^#?{W zc2*rj^9Z51Q`3W8)x%h8EOf%`EJC-(at8x$G>BX*pObCg*tJ_-IzweI+aVkdq04Yj z&!a_W5ZCNMj?B;PlFj)>^bzwhX<@Z2ngg<bhM^71IIp!d7KwUfxru%mR?@S@~?~fmVDqSo?144ND!1jcg>5kR3P z@@8S*mWiOBGdIx@i%q-4hCvDHj^p0@RQuB%c0XIPQ|-6C+mBk>p4pzznC#mihPS19 zY3f~U%Tteoy2GIBdA*^=G(BO^k#0Y7A{5D-%!aaw2V!nDmr%WjVfLGbY&pidfj>L; z9yl0MrMlBX52ui=J<%w7=IpUF%4_$I9cfwidk^k+?=fy%Z&)G^OdyikJ=q>c^3Xxq z(ATXs?%iYTvYJ_R&ijNMO2tfMHX9Rb4o41)4TCN}8&72h9muBs)$rVG1=Y6X2q~|jG)ADQbH>o{Fw_Ap>ih>&k|p|ov)YW*d=51% zXKg2AtNX)>Oc+Ctfoj1RG-wM0w#%G4paRUnV{z+9#!g}|ITNEP5qi!TCZJKAHHzLZ zlQpj_uQ;%8zkw-*@jxt-(#A}d$YLgd9{QXSF;JTrq%le#$fV{1scga=7`Az zqaTe$&7RY2E9TZd%pA-Oby**kBOW@?T?12s+M)x>T-jSe1OiChN0WBL;{va_)%jM2uR zPOTXYVQf6TQT`Lk;#x+|mB%sA%;M~O(02%`1#}CT^{*3pl4TA$a&B*!6MSf%kIxnaEhMJxmOLp?v2yUPfnBS-7XHorfmKnz#?0RDgeE@C)WyPB5CuOWC zBCC4>9nYGhWk+vBz%)CCd8}$Pm@t{Q3B!|E%Z0J5Mf2Vylt@#@Bj*CU)@0EG;a-NI z#czfBJ(?cYqDbZtQ%QByO^!=+-dT-8?jrGbaimIU8B4dpg`6 z&(0g@!?GALu!DKAIg>NHp*yEe?G0p7xs=)M+>Pl=Dr2_46UihlC$m#iJyIfxshio_ zFh|?4d);94GOr+NQ+OOfo4fm4mNvIxw@5^H4kpCry^p1UtW`982S~Mz%L$z}=13T* zQn?%+11w$dxP1H5Udss<)quX8rm^FEg+4rcu?}8?{*7A6(3q7;q^&Jm^?UP4+4C*x z9js49V);aFtudcWJZC-@ibYSxvSw}Kxk&$kgLy-emD8HFVlT!rvx+c5LLV5*_iJF+ zu0>PXJ%NMA1V%kWKN2u78p>;N!a$F{E${S3}2Q%ybT^63K>N}~y$#?9NP2wO*GBa;J70S{bka509=e^S! zx_6@9I2%&WkZ~J^%K%S})lHbXr#s)NcI?1VHJdtV-gM!%O{F+$+_-}Kul2^!;X5>* zoBJ@Jp%=#ps3x#65m3*o&Eb%yk>#UV#p39FEUf+-Qc#N>C)*lc&?w0;HF||x`YR3$rv7U;eJoO zE?5(p5qLx?)@0rNTMDyboJnMI=yxoyPj22NtgiP&SDf#m^WdOS!+YZg-=gOS1)%NE$Y?G2X2xT&0-2SbNvz-lnoy3Su(8$ zZo3yHzu`n@sH<31E_yn!M4>G4lt;wmCVB35D3D3~AI(04=!)4mP zli8U~;ZeLvcMJ2z+@5DPzyz4?so!z#j zU+DJ_6V*VkT=0Dr@n;`df{ea(*BmbC?Z#(euki&7B7E zp`rLZvHshJ(?J0&eh=DzO}s>7>OXbznD2!26alXQ={+(%dbwU?Vsm&^v}bnqU~;IX zsM{v_Z%DrDz&#TtJnDH1I>a-H>6md6cRKs;JfM5E(3wQ+?2a8-`NljW`VWHS*lyg2 zUJlic`-Mss_om#&jfk6TMfrHYLl0gTbpe_4*w*v*aGGmigJ7nPp26 z%FYneZ$9S-Qty2*$XQ}9aqX%n66Y&#^%6tiEvMLnR~?u-9NmLAfhTTL?>^*kGa(<5 z&Wr3hG{QNdCSbz6|HBJPywEW30V5a`P+pch#;mw~HZ>KQWtS!<@$??UGBT|Bbvio# zo+JAX?>`zkaOVm6K6sfoFa0^+vSWanKP*vNOLcW>q@9^UWtg`;33=}=Z;~;S%tUZf zy!M>jy47sXdbI@#S@w$PwRFA6RzHik9kJ`t+@>S*C*wJM2V|#wy|~+avS|(xG$l>l z5tAz)Om;-yPn&l*$IQt)-pfUA!xRzSoY@KP?GHa@uXhlG>S4@m%$@57L(GN^{VotA zk)=8HTX-aqMTf56^$* z)5y_h5(9JWtmVbi`7a=vVJ*Ar0y8D+)F7QaxoF4&+pQmK)BKX2y5jvQh9q19`tFlv zMjOykVdVuyw<(F?2WIJMdKP8-_+q(-7I;Z7oA2&j24?rUwbO~1y!YCu8nJ$*h6uBx(msuKx8N~^`bYw72&M$sZrS6PUX*q>h)UR8zGyFb(#|}&=k>a>~6kwJG zTPKv6$_v~{?!~obcEB;LViV8pH$^!mwHP1dP&YvNTqo9_Q!g<>_2*ACp}-RsLwh0b z$|r;7s-!If*X$hb&vNo{#(G2CD6qBykAR{yaO1_LT6^dg&&N~?wHeD`F0a=s4c*Dw zcgM1iA9~>M>pznX$ro|wdswQTr>!|Tcz`f?_=UDG=q6M&Fvj|N3Ns0~Y} zzPQU=X3Az=w_(oOFb>mFQDJC;a59i=6fN-sw{ zvVq5pJ9b#!yG+kUvRNP+L)>dI?#oA~MsyZyW1<=L3S!-Q^La}MU7@UJ+1r`@v0R%q z=p*Etl%aWpx`oN5a=6^FRu?OKPsTzyx!MP#*z9coJ&r0?>Zfui6XNB*+ptGDCi7J zcgQH?z2aYA`0 z0hU=c9l{GGxhe)7jrl5WaIK(D&`BND^ZUVwDNA0Cn0R5FNa7kouV%Knh*>8%avPR! z%LQNN%i~*Q9jTYeTQS2{-vyv%Hq__IaXy=x$I9)He9p1fP&;^hlO2j@c67@vaZn3p z`j2wZ`RX@=Lys%Ov0QZcsl=ces8%w#me+^?ZmbP@1|;8nnk#^0e;kWmr|!_?n;!Lk zTRsCB8YBCos?D54bu)NopH9T^gi5}Pw?0V_SlTn4kA%#ZO(FRNIY)CTGJn0`01sHAj6%LG(4AS_(#mz``sIXoVhm9Iba5)Rj~th2$2I{men zp?pt2XFdTh2c5y<5;Dujy0B`eT#$hATHb2QO*!Bua#<7a*1X>y6gBV#@!#t&#&{lX z(Q}bxVtx+K8RxKkZ#K4LhxOY!!+Y+bl*h~&xLP7GdkV(lZ%7X_YM%0_3dl9Wp} z@tqJXR93I6&2=0rXfIUCgK}TK#hCM=p`uv25O?f2lfb%6_W`)B?rHBe`<(+xyxP@` zePYKByzo-@EupiS2o|(vb`IIzH$RuYTiQuwcHd`~&!9AxDWK)rp&n6mIbvM|CJ9uE zS^1?btkpG6$J7dVo5I97&fktBx@8Zp^rhl2zB-MwXv_L~P>xW$z~NrJ|CG%uyy&e zbET_xBYI3Wt&ndFB1y}oW0-60;Neo`i1(;5461|v4Dn+5*fhFlBZWthW<}U_!=V{; zu&OVX1*qE-bpX2C#?l$xYs)zDW33;(KxRc1w36#DaOb{bm^A8FX*-XvH;$lBie^&j z+wjas_eofXVP25OaXl_mS$5AF*4=8Z$VD;hmAdj>+7Z~>a;v_NlFK;c!d+_&bsbyV z+2Vd^aZ6J#m}p93cGvP!NXkj{X-v84H+nG1>96*sL0`TnpTaEvv>Zpx(qFcHQQO*d zPz@3L^a!yruG1gHG&FBu$+_&gu`W1j426>M* zYv?h;$Y8|77gyDKMO7g~@#IPbe5p3^fLivwMD%_?U^C82b7y52(x2m~)5sTS@?L52 zC6V2TrR=$L)DtXoNy(xy?=_dZ_V&oPoRR6=9dd$#7O&2~b02Ob@in^j#vXb5D!(L? zN*~7rNJi<~W>>mSv;6hW&Nbx9zSv9zuRP7u%Dd*>x2h~~t32LvmAwY#JFm-FZbvQK zv)Gn_Gi!W)e8!UgFt)AxWRKFzsE@G^dMq^wz}>4{U}Wj9&F-3hGm3s|iuUQZt<1|( zFU~Wt&em#AzLLfA2Rt0YcT7`w0FS|me%t9fo$tup`G*6FPkl>lZpd8f$g$k!P-tRe zi&|t(r`oU03pzd-3Pt-*qWVY0;JA8!95T&Vj*}fyZEtc*2#=7>^;jxNKUe2TD{WAE zWZHVhq2$pGqvE&@Ip~WK9z^&Q!WR*~h42f6hY=n{c+)+j;v|Cq#HiSyRc=%G!QF#x zVp1Pv{7&q9-!m#22nIp`KLg}NFc2IFRj?950U?gieHZK^co19&&3EEgO%Tcm?stp| zCqm;9q(dkn6cFMFP5gdJ8X<__LvSNF5$bOr6%~X4f&(FpP=6cFkKjP){uS&X1Q2`( zoeFFr6cBoU37ZH4q4QR71OuV|7x-xwgfKz?p@*Mp2_iTVYHxuZg!Y>eAEAa2M`*kW zapB)%@c9P_Uq|>ignvZ12;92=w_qPYknw)_rBU(me;W=}u;db)$CLZ(5$|9BU8R2z z+QF}=EEUg=y#n$7xx0K=K%D=Mki##)oX^hj@1OtpQfVi38YlFZ$miJ~jEWl&_FXt; zo~wy%O80TZc{9TE5#EQmN0Fah-y0QIBYX$pQwTp99}|V|qK&K|ympm}`$sxWcg#NR z0UhUWm$175cK%)4dJk;85Wk=Ei|;HB(?4I#mWpS`zO=&hYk#)%dENs4DwJ6md86R7 z>_7730jJ|qzha3){MkQ38IO#KD{*|e@HVl0Jl6_sS9Jp;4}?8y_=6>w^|;;k$1LgmmTkNEk{j@8spGHw)zbVY zXprCy$9|yT-&dPZ<+$yjpEPeASqky-sb*V>0Xn zlloOd?<4Mdo%Z86w(nZ}0^K!Z;yKu+xRmyMdJWRdBJ4wW81`O_@F>plAi^xdK7?x! zq>UdwbxeE;;UR?QB9L8qzT?;xtmZAPa6|YgZ}VF zlnH{||Dxs(X_f36H~hr$YY;xQW=tH0z5==vVHJXmBg1c3-3gk9Jb_P(y}d&&0s&b7QQ2oGCa$&P3(8AYmg5*#|N#j|o5?{fH3 z!8P#Ph91suwebPAZ#`z+{!_$z5yGRW6M?p}l5l2|iWh=jh|t|QCc+zV{`F(xwKuC{ z&xghl?$Y}^pf?~~jL>7}`6~9C7WOE%D+n(^DDNHkH zienuF`m8%LF&Vo3@SV5emz_dKaO1j-KQ!JVpKI9OMJ}IyVsrn&$P$*8PrtqY+gwX! zV)^tFw=DYQ0E?mJ(|hH6H2H4al8Qp><~kbctoqg33&3es;L`T{>Q zu!8gxw4!{eHn4*9oBL}jmZ*glq~A{Gx2$OId$D}_?dJ2&B~HD<`k$DjcgjoISpNJI zlh)Pu{fz4-uzdPS&r;u0COgZgpV&tEr!S^bTA76vq~EGnNYlk@O}>)!6MDIXybg!b zuI&7i6Y?wBi`GVJODo8~XYe~Zi%!3S{BJ)j-vKO{`IVL5=Ap$SEDI~i|KO`>Eb+?D zzm zvi#q2p4D$wQ?fW0eOBV%U^!KPRbAii3erznf3l&UQ0HX%^j`Y42cFcluzdQ7t?YMj zhU}~)J^Pu&Uo-vW1eQNwNih}*SijJvt*qeu9()Z<|8CFFsaNX1ckzKe5xdwS0P75aaR{+WvzihWu@|Co|tp~9_9>2M~{+oQ)Nv z-)8+9;z4{b_~6X`zLDQ3ee}0X9~-2nYRXI3?j_~sAVm_Kby zjRATrH|a4C@L#&oabRx(Ki`8@3Tow?w2xnk%q8&qPl{NKnf4|o@iXuE-sqBtaXviJ zQ47%J55=1EPpbtSpHyEOuuVUSRX_Ok(2)6cRcl(gE%R$0802>g{WSGb77>4|{AC04 zx2r7?P+mFQUz^_tQg*$vn5MDF&paj4ndK%U;hSLn&yfj_T$rU6Y2k+)@k1GjxlrF! zza8}xu#KVglUM~YgWO@him&3%uEfkx`iWaY`X`K5oPnYAlkf%SH|4>{YwoH%}IT67BhjIdGWU_=f zB?9>W&?zkE3gG`kvtmvJ@c*GXkrx5{e<&}`i2(jTbPh|l0{H*XjEIT={y!8I_?<8O ze+a)AhXXPk!aw_POol`FXCDsBaLC+9$_@M&hzQ87q)6ds!myDNCq)4NA3BM4NByd7 zgVvAl{xRVR;W;(5gdq1XV4;Ei3PKG*9{(!h&xY|%3t+$wW$Ar6x=j;fZ5qj9a6?#9yUW6G0Cqfbdf2>5f5Dp=95YK>ip~s*l zgf>DQ;hP9mgdZS0@nvJ;=g_A>8_*k}HE0uI4BKUB3!#E=5%zt5G$vk$5Jh+;LJ*;X z@Lq)1ApA9gj8jKAif})|I}i>dycFR8!kZCZxS+znK=-|BOnd|3>Q}=K^bY8Ap)$@E zY`d`?Mi|BR#n2K$7sr~=Z@p$r{O<=*UkDo!evJLgpizV;ARI&}BD@M=4&n6(M-gP4 zw;=cs?nQVQ;TD852-40A5JpO4;`jc1Ox*Sc1nBQUfBC0)A5$I^fj5nbFTZ6>eCN$5 z>(?Vce+EC_I3|99H2;oc&jyeD#hA!EG$yVCdoj}8|JE__`>z`l?}44W|MGVio`d{6 ziv0fmyHIEEMw@}Y9zpJJe$Q}tE7&U$MiEaQza6>{!Gm~5p|6J45mtZjw+pww59Rz3 z)WwI!#LJ*x`Rg&U4*LIx|L>K-7tj`{-Y)yF+IB;qjk-MmJy9DIe~0>w;!g+4xH9a( z{uMa>9IzhtUyc3OV*7gt6@q}d~KZX4gw%?1;L^y+U z^d7}^i0wBZ{w(yf&^F?I9rj(=UW?#Cygx*E1=#;&co^sZpA5h4^?MP@_e1X;6F)(j z*0suQ89t8vdvSf=kKjUCKLg=U5FSE!9>PZurV(VEe}L{nxEtYJ2(o_C2sa?S9N`MY zc_MkvAatkky zJ$CCVENXx1Dj_;w7s9}{tAphLP(L&b4MOwKqSUtuQTc`tCG5ALHK_Z)glIzj&<->X z6Yhdu-iLf;2X zLmN={+rhqz0F6VPYmkR7e1l#B^?V9(pp{1uAKHY*zlZeiLVmxG@`F}>fOPMM{t$Mg z{s`qE^~cDo)Stl4d!Rk|D)pyGC-rAI*L&gT&++%{rTzl-BlVX!kJMkm?)#vR;kZ=% zt-S@QBO{^-?Tn5HJe!~_%9j} z0cif>5mAA59zP;_(BP#b!v8_|=^POysQ2;_(ScU37!lsTM*1h%oDb@ShM_HJ6`H>m z`Me3|dkXRitwGDs;8RCL7utjR+^}Q7F0^qS>_Uq-pgiz3FaH|YgT|qGXbHLib=-*Z zfx4hAs2kda2BD61hz~WOd8ij!fd-&0XdLReX$0%$kY8vC8iZD%d1wP#gtnnIXb;+f zI&VfgH_8d>hWem>Xb>8NrlEOg5n6;c-VR*{e;`~ePZj|dmk zu^#0KbwmBoI5Z5cK=aTRbOG9dR-ldzI1khXZA1Ogp5zS4e zf_k7es2|#bc3{VU8~l?TT9h1Gfx7p?9@GnML;cVmGzfKikbkHfTG@yA(As{~H@-mB zIsm_*-rJEL8uX(*Od|b3oLA~I;6JnmEkWal;6JnnZ9prB5q~p|L-AJ#ME(xcE7Wxa z?M-s1a|_A~>W1d;M7=;A0i=gkpsuZme;53PHlPJ){3xz-sN)#yZo@v*3++MUQ1@}{ zKLUFv5Fh)lyI~jmL1-2GC1^*+hl*R^-#xGc^+UbTAT$7tL*vjqv;ZwaOVARu3avmJ z&<3;(Z9#j`4%F#|eW)Ai2%^5BE@%+yhNhujXc6j%mZ3pt4H}0wp+#s1>Ut*fwH@b! z7NOo};W~xJ?}hyxNcU{y4_bkiK-^r_GVs25s+-UY2gPeEJI7eTwwH$pw1 z!T198Lq7uzL%$BqL&Xg2K(B&Upp(!pbPDSDM++eO+&rVqCB2O{Xkn# z_a3B!`k?L{;zJwI5;Q)Kcu>a~T!*)zeL;gz|5+S|cAzb(dDz(td*_g!M-cD#P|xpx zzt2Pcf|sBR&<3;u^*{DQim4X6j&f%>72KS2A2`k{Gf5xM}aK`YP(v<_`S+t3cQ2X(y!`Mn(+ z8i017aj5&H@EhudmY{xU6`F@OpcQBvT7&kW-Xi?36u;2O< zV=m66y*->{2Agyy=CP45I8glZM_M0p`AD5xYW16&SCiV5bQvEP%pIj z7bq{N`>iNvs25t1$DuuF7L1PwrI&^WXKEkIk)60`%YLVM5#)bVcAE7S$;LETX2 zQJfd*hWepCXb>8N=Amh*<2|qgEkV0b_j_@kD$eshw1;Cj@B85g)b#MS1)a$d5b@ zZOY@&4m1xH&qO|C8!7SYfwM52aSWr zzld^ywxDszp#`Y-OUUPEkq@XF8ie|wacB^lhn5kq2(3YD&=#}>?Ls?H$FpD$YCv88 zj(kA9&>%GaA1EJa1=@uA+bADs2Ws4lbWktU@n!e}bwk_G8dN+R@xOw2&^*)!Z9wBt z*Z+aNN1!Ec~EK7xGx7uqrG`=N1Y99o3tp=IcU z&?@|X7}|yYE7bc>LWpnT`i8E9wxGwM#y<=3d}sjs6=)Im9p6GcqeUXdYUGE-B1tqgV3f-5A8tn zP!UFcpf0H6hlmHQLH$tgk6<6#f##t-=mOO7W0Vin1+7Ef&=%AS?Lz%fM+E1C8qhe@ z3(Z3V&>}PrEkO&=3bX{RL95UPv;l2F+t3cQ2kk+fQ#k)mP+m|M)CYA#gHSIt4fR8d z&>*x7jYDhDJhTZdLOakBR7~UiP#3fY^*|d?KePo6Lp#tsv)I(qOQ>VnpxZfFbY zg?6ESs3VH=Lk(yg>V@W^c%vzb&^WXNEkG;K60`=bLL1Nqv;}QLJJ24q2X)49{-45s zs0-?Yx}ia+7n+9pp+#sAT874w zEVkTq zZYW+@3okSd^+OBLAhZOHL#xm{v;i$b+t3oU2dzMzr*M9#8`^;Spe<+++JUB_J!lc? z_$B;@x}Y_v8`^|=p&h6nDo*44P!}`~^+5AbKePx9Lrc&+v;tj#)}R$=16qf+pe<+z z+J*L@j#-@lSMVR|f_kBDC|=Kt|I6O{$H!Gv|NnP4+je2=CP*nkf|cbAwq;o(vp~@M5$PzYK5v*3s$H#@-7f)X@gXVPGmZUKrxe zGx#mgVen39Klm_|Ljp$O819BcmqJZwcr5qbp{>xB(9&_da|1d8Eo~;9FyYX~!;mB2 zp}V2s!+Af)7YySOXzmuu1vUBJIG*=)Kv#Z<^ll=*(V$TdHFJ1}1GFDH2rWGVJwR7N z^KT|!ALjiV&_QS?wDm~JCHMsRpb2Pk3;8~ZJNeMTiR2erdo<;Qnz`g-KIJ`zcM?Dc zKY|{h{l`*{TacGWIiRJ-Q4Z(`bOgHLqd}v73;Ka3&{J(b<+v64G5De838VupJrRCr zVFCQDl=CF&2fFm*=tJnql=F+E^9j<2cAiT8Lsu3N4sHA-<-84gI`s^#okaPd=BIft z!R^E^AwD#GCgIS+Qo^B~lZg*qc@E)il;>RZ2rWF1aOl!<^!z2_O+i1<#;N2F`gn|R z$~6eB9-zF_hzH(Ug&jgiW+3klXq@z*g|jFpG+d1yp#7gEzjwl4gS|m(=TI-u!dmnV z&7F(=E+G9n!l8e@969(qBhclfmwN^Bz!T5}wDBtBeHr;zlRs!=EvjiME@^$2*pWOSfo2L!qod=LJME;gxUwkiJjy&P}A@ZQ+kI~m6^0kI?L07NE zPRQpVbO`>!b?D=6=o9D(8h(m$LvPtYy1$|xq502{&jj|-Nj`R9AJE!AQ%-2%PU3fw zpIwAQM|i)(Ak=(|{C&kRK4S1L6w)h&77r30&NAx3JF_|40?j=#%gFEM`%&P~{#?$o zK*QYU5BH$ABz*D%Tp_>TO3XyJ40O>p0}9=r|O3Y9ys zeb8a(N~qkM9f0O?hjtiR2;B`Wg@zYXE@(bf?%kF`JE7IkrO-xbKXd_f5V{nafc8Vf z+}j<77D7j$CN%dRpGyyGzhEGQhv=G`1EroVMP3UrH zEwmrn3LS^4WCK=p@qY03%LYu)` zp|#*kp{>v-p*jDJeCSfX7hZ&1XzRt`-zFcIl3!@$<-~(8_X_w#K6E+waD$u~<9j3Z z{2j`FE&R~dCh7%Rcs=3J1T=p+_zjd3ItXosnl~Z`x)eGD&22_s&_-zfcggn`s26A} zv>qD13B5pbZ-yV*4>cado?0j`bP#IF_xZ?|@6bMI<1N?+Gy&Z$-*3e(9wPtHQfT;# z@I#kE%@u^-#&_sSXeTrQ?IXPPOXwMzxPyFyk3jpO;XBC>G#8o>x**Fiz6bp>=|Bsi z#n4u$2_1yi3w{@Jp^ea$5)K`Jc0z|Gyq$XZKKWcoexM`JPH1Tl<$(70Qjb3%KR=>; z(2<9+hn3{#7t|M2&Z(`0Hgd*j7}^Tm4ISppTKNwt-_xWI9b8YjpkdBN6#vLj&jgDY zf!i)H0n!>EYn%!`KkKQ$WGVP()E!cT`vczf`A6((Or zZXJKy2uGarh|CuLhQUu1RK>f(i6`N7QRLA-T;#6dFPHH1p;mk=z0G{B20ub@{Rsbd z{%U>jz2NoWM`~Un!asowH-bk!coDepd+`{!pZq$_@0H(@L4J?sFZQIn0=$)UPx9dF z!27_lLg$hEZvpT0!C96v7Jx&XNBBcQOTHVIddLHBhQCy}si!7Y|5d84BwiWe{qz$> zI(!k|<#UaeXUAIYY&g+BB%mE5s75!y954}q%+2X92|Gf81TVy<5&w# z=idyz+6UjBLH=IwVC1$@|p$H9BRhrk~b zUi8}=oEFXPk@OdZV$s6(F|lazRfk6l0Z=mxsCioC^B%-h854P1kyrk=LnBY}T|@y_ zBky?0pMC_7f%k)d!Umy|ejWG_I4+;NJTWOxg_LJ{wD=32+*d|!Pc5Lt(SkeC0? zp_8{4dCkbf)pF-AMc+4j@;5Eg=0U8MEcuI%rM(_PdCQQO^LN9*)q5BD`%=dI?LuDt zA(U4(4tqe}A0&U%qB)C$Q=+-OS(VZJ9?PG&2R)z?(Om=ESXVF1Wq3Ev% zdDSCo3Vk4XS9s)2^OIMGysgOFhP;01*pvCAzjEg<>&$>hptK-~pGOB#|IhapU)n_- z@wfd8|CG=6BX|pV0{jFUgckAFBOMZh@{NM4{+0fxJ!^V2KSd{l7nxSj_t^1XNoxX3@rN!1$1;^5UjcmsGNxOZG{ z17F~SF9BZ)?)76h}Tx@+XBh}W2>5(r) z3ui`)XQ>WUOOkZABWLA)!}zGigb2PDd^LE12cLk64T5|1Uj#nngU7&!!6%EHmZ%!; zM1Gwz)j%5F0$zw0_Hj?TJ>ccwZhOvyX9ajExX85H#b=cdCSL{Hpu^qq1m6l?>`A8# zd=Iz@{y3kdUuX|9B;BI4I(@y#Xdk@BR_%;vVLVz~lhmllEg6q~Joywn4n6?xwW|j3 zVQ{Z~wt)}%;7hdplO=!p5&5gYx54klH-L|TyUSk$|5otb;0N2EjQ z!QJ&GHd6*(4qm}$vCl<8Q}R{#J(Nac!JUlp@G2V;>n+Y(} z0;Wg47%jyU^ziEpv_Myo{s`$G?xbJoq+coNliH%OF`fPmzR6caf;aglUm3Z=FL_Bf zkDjcL$?!*qTz7%+AUMtY0s z5AAtR@B!cWv+VdKM2Qi9g!p@O{2nVlgB9tow$dcaQsP`w<=$#fZNwWmI$$(NJpD|7 zE&m5-IYUkSE6U zRe_W|>{$1Mjy#qKJn|TQ{N=4fUN!Q*qvb6QBCl84rS@-WRrudte>oOGBFb@^rbHJ# zjAx-{Ir6-EC;(py{u3u1qUvrbl8RWZN-_>0)~#uMK(Sc>#l^^UU^8<%vBba(-G0THa>l^||C} zdmwH1d`IRIDQb)H{ zOAp{8rvd&>_=|;KKO(0MeD_BK#xlW_9NMiV=W~vnDyyZIz_JSd<;Mq%&j>&I*X2vj zH)lkPtD=R~ZWEF8w-awA@l<~*ad~81-7D#XpCy3uudiodof4scw91depOYUj+umpejCznw?;+rTA#cY70j33vkB?av9m3VfRnz5#p~9O68} zzg6>l=OKGEzZV~WH2$cM`~vVnAG{2Fzz44Z@Atu*z*qa=9pF+wUjJnoxYV~7Ujx1r zoMF#-q`aHK`@r3Po3yj-n*Sc*PPSX@Rj)(QkF$1}((P78f>S+^TIdmZB_tsF?o5?O z3reojrOV5_Zq$LiTJjrtP1FFKa_&q}i$s3#vua_YfRB5? zN5DTJxUvi7FEVP=PNqk5@D){g^JI|tWWadA&8ODmZpV5oKYTi)MIqykWiez&TUAAb zu|$8Cp-@CM@oxe0&UWO@W4y5jO6k9P2wzUPH@_>u6W|BRuhjcy_;$mGE3V4rZ%aM%3{BFX@G<(E_YuE__-0AKxYtQPIloKJ7g>%;DZaHN zp~_GEmG3dWs(<2#&j=WMC4JS;pnvzcs>W-*UR56K@*SsiyAyc@Ct%OW+jVgAZuOO? zW_{=u87;^g!I{84aUQ{Yz-5t5i2W4tVFf@gc$;hPvmTYzY%`# z{9zY(y$>FugR2FfB=Yqma`M0zfP4M#67W_ZJPzIr?p`MrISt^QK6o4Wa&T|DOTY(x z@KxZu!H=*r2^IMpz(;)Wt>DHv>G|KI`MvoZk8_X%zc)Vx;HBW+bj!fY!I@4ukL0HY z-1Nblz^lP+A4MVwe@6!XWt!hT|BS)E27Kwc0nfe=-HD7BQzNW+Sp&y~)`wW+TK|2a zy~wHmOu#r*GNK>J&xDhxUvPq*NAM!>K5$vavLC@?;7fh*I`HM-lZDx}FVq4)=!5rw z?*{kQ%L?%Dd8ziChsbr{2Dln$RJ%CeSvQw_ZPoI|@Y#L@-vgfTq$B;#cu5#}j|;QC zFZ4yF%$T(=#90eVwKM$oh2q3t;K`??*8tuM{%Mh`AHmzemxFuT*AnohKKLr|KJb%m zAyARC0bJf;;l;Ot4}iPNFa60L@P6=v?N162Ebqi{%aL@;BtdY*Igi*(4R|j26hW0= zWA6(!rR)o}!Ph*CJ1W9Q{ps~pH7~dKg=DWIWn)P9%c}l~pEEn@fA$2gb?RTjw-TOD z_-R^xPw+Y?T=Kt{@M6N<>r#SGD5U*@S4uqn2wnuf06gCYp@PT2JALpv@IDVN`DzjV z&jxt?CXcdPndMkV!X$hN;lqTptf|9O_k-5xa2cEY_Jg(%9=;&mu6BVNK6vO9`cKmH z+F>5}ZtyHgM?aEg33vj$Ilve4pSB+0o(I;;hP999u2CSag?cm9&{+~a@sExz*fvZI>Y6yV^6=N$ZgObz^{@Rv&Z!Ox2WrWQzp zm#RgEY3pBTly%Oz3}Tt@I3JR+JNx`8-xm80^Sea?!wFX z90xD{Tyi~IueZ>iN%wLqUDj63O#3iB#P1`%yPpyJSOMM%{xeBW^%H8Isn*-@C)K&Q zp!XQu)W|h<$3a)FcOR_z!5-v|An$vQJhkqSvK|<$v)of>-C<_r;!N^lD82R)x4dNj zrt5QV(!x^~7^nKn6a6khUO)1@dRPU%8vHSxZ})yr(yx|dm>$O(O`t5CV3gqb`f$1?B3gcu+?k@tY4k+iQ=d*F~w$}Y}SId+qhf||cc z{%VjnjJzMG%2W30*n{4MaoLxzJki4nH9Dpl0JA$ zWZw1fy!S{gY z`rzZ4_~d{`J^3sE5BuO{GEoB0@$lDxi+!*S;XE=vG->`WLF1|G63Km9#$!+SCF~M^ z74fA$77E~rpX{%^-Jj)XNc>&IUru~AZmIZw>k{lRq@=HgNwr>I^eN(BdC>bHv>hKu zWm@_^NC$FDufqS4a_L9f!ZPq?@Sh4sJ9F=YFx=^he=NcQ6a9Hw+CE4fQMV%}ft)Q` zPOt1UxX&GMPC#}SQvB>`_O7tgLsdr1Xz9#od0ef$i!7cR{$fmQ8XFFJf8-8dy~H9- z?)?$T_cG*GU!B|^ai1@Cjd%LQ1ZA>@x2eo`OV3yNWV_@dPjza)Q}VqR`P-0>E35SD z_G6R#I?j6evl;T8vQHxUt|>v^jic;qG0eDLkytv>i(@Ma%;LMiRb2QLDzzbo~eXCCRsgdhAoK3o0twfHZU zky+Y*X^`;t^muLH&ET2hEhBu0aJRoA>8$}D1ZNr}^MCv$R{FwK5xJQtde}nvFyRv= zTt9;E0^bJi^*2H+&e75R=DQ}RyT4B^JO8EA`C+`Tn zFNF`kL-_RgAo|Kuly7A&9;)aUK-%59fhw55k2rV-xDx?=*YwO(D(0x zmpS8)+UZc`hkrNx*9gDLA9bxQLY1HJ@;?7?(feM)`v|vsA`xQsw-d_fU%-V&+Lw(N zfeZf;9y|uV)JIMoxbf9=f4T*H#D~8Jd^h;fF8Rv-HUFu)J?VRwYP_Bv9a7C!^14~l z`&w#0Blvdkun)c$JO}(lNl!m{&xL)!T|)Ru!m}h?KZ36UUk)C!L8#yxz?Xu1 z+v8U7K5*|mZ4Y=S_~9NoH@H*iKSL;DKOysl(|JT#&s|UQ^2VVhR>w~WYulB*WfSW$}F7R?6JXDT7 z``~%t#Xfimc%cs-2haDx8^CjY@HX%qAAAXT*au$)ZusCEz(>$?n*D?C_QCgnCw%bn z6^vg#cmepZ4_*c?_3KTy27J&5Zvr3i!8^eFeeh-At9|e_;46Lb&EU&@@a^DBeek{D zeLnaEHm*8-@FMU9K6nhg)d#NwZ}!1kz#Dz=9`Je}d6KKLqd!w260K0?0~AlQDSpV8M`rv!P`+V>TmDs-zUIZ@Vi?_Tn@KztZ4!qe1Zvk)g!F#~#eef0FwH~~L`dtTJ z4gN)`bo_|kAbVjwvVW00hYcjFdCnr1*u@^=i9W_iJpBkheww8ZFJ1sH`tagq;Gz#N zUIQ-r@ZwG2q7N_L0WSLR;>*BAA6|S7xah-+Zw42Ac=7GvrVqXsyxa$$fJ0yEgBO7p z``|I~LLa;iJl_Xz0nhcpd%$yi@D<=;AAB9S;e&4hAHhHJ>R}i7ZXY~k(jWNXdEnc8 z@DlK0A3P2|d=>a=AAAG2_=Dc^ZUtZNgYN-f>VuE3^7oeu zz&m~T%fQ7y^`=_`-s*!lfs4QFb*ZbgWz-xW*&EVBO_;zs92j2@`?t@R5 zVa;#6`78o2_Ti6#7y96J;Q2my3wW*%-UFWFgRcM&`{3)q4Ig|9_z3e(Z~k|I@Akn% zGqHakJP&-E4_*R3?1RU_hkWn`@IfEE4Sc`{Ujp9mgRcT#?SpRsU+IHy1z+xi?*U)x zgO6wNs?P^60PpnROr4D~@CD!le3tP??@uJ}U$XAoKGwT^Zq1ZsylEr8)L(Y!E8=D)0gDSsr`?_z3uadGM{^%jq9a6kM-k6!CEnct3cX;A;Mzyzk2@m|okr zj@Ao*!TFXy*5u}Im-jve^*MD86}Z;TYTy_D^P|GAAIX0cxcIAHyaQbPTQ9y0T>N1# zz6QL}2j2`nYw+F9~mr#e>=<0=m;sm1J!;qW-ey7j`0$k~OQ@^851aPCFF zuP3>GdSE$X_a(EjALM+O&zAkT*6CQv(FQaV+4%Z`M9; z(b%3by`jZf_XH7Y?WgZ0o#hWC&+F)OH0aUgKX+KAb{uG3lTz-o&oW*u3mAXY<#zXf z$^BWHjBG4B8Dj-{lSh+mu}i)7AaC`z0>;&nK~=BHu87ROB*&ZOZWcXFJOWI!R419F z{cRv#f_OJeJk?&E{iYhr0%@<-^#Zj|um}FqZ|ieKYTa0NzgfbhH?v=?Aae6Apnr$& zc9EORk6L$SrCH~}5-jI1+Ci@mBlC;|_w3SCt-b5Ah0@2aG4v^V#jhbLUg1S9Bri5%0(8@!Fht z>YSC6w)OrO;uU_E^<-@)J?ww5uCLx37))NooE{0zaKzXf-ck=Mh@bnQKDUvq2g@#a z+lWq0S*En-E%3L(zfKqazX>(u_o4r6 z`;&CqNXJ<5pG`;F>2}f?CY@b;7CYAclR6KZ>`&A=2kHF3W^KoDA_ogZJ&boWHGTd| z_5X9w@As19h~AH|>O(BZYNzK%+-C+PZ61l=Li~~U5r2)O|NZxtzUX6*q)+_kB}3@L zeST1#JD2k?oL5a*IGAZ)H*vYS)9!QO<(ZCeiP9`PE~~|#eI#J~R)$oM9WnmNx#aRM zkv$rQV>I@KRk52k;_qI~dYYepzUSz~>DN}lKlIb&dwzWNvDBjv*Ex$_#9#fWZa1lR zs!#nS$4Svw-skWaALAUkkG_)qhV6%BeI?-Qh*%{Sc}>XMy(VD1ad7hPA5GpmRCe+EBJ>)*FMZ1%JV?FUMF zdx_t-K0|tcb<$G@K!v}kj`p`9gT6T(==MW|zYcz5x%*7HJ@_W<8@Xi>80e+ z>i8tx?ZnUhHTMN{{M7RkgyJ7eZ?YKHG<7_y)n$R>K}QH+mim8+s=aJ7>ng^R!4&RwNccf?MSHlC%*jF zqRM}Y-Cf~WT16ylf-VV0I76kISI_wOznmxYDYx9R(dn8I-uR-8$4*wQ>D3W``Tudw zMe8B0zgO!5+Rv)a`lx?Bs0o!%{aE%b?PCM!EM*eD??CON$g6LwBNTfLUBUWWg7d&W z<;?n&){54tq*ns}@Q#4N&pM^qcUHFWTNT1gKh3j~_$|aA`3ruXU;Oh@;#YY!JAwFX zh@W^f_1>x2-DYrOxBFfqwB3?H-Qm z+3zjj{W9+NT*k@&VgC77?uL@L6?xVC=9%3OEe_6-^}boMuBGO|);b&AZ6R|(XMtu~ zo1obU|`8J(QQB4U>^x;PMTPrW5t$byZ7NIjLwLfuJ02+e==xHAKe}pmbn?@lM3p8%{QP@-lOdk zAx`2vqQ?R@0!B)L#=iydo9D4!aFwM*$B$w$OJ78CAhm)gnxVu!GwbRteW2qM(ub zN8TDssJ@K}95ntTyEKUCD3gN%^l#Ek9L`3i2z~<5_IPST>8b z$WJLG9aS~4mT_YPata%Q#;g!uGS;sir|N@$$WT{AMy>o!xE6W*#^FmJ;tTT9`VIV+ z_ObG+I~JXa(LicfA!1ZL)FG$uj-b&q)>n>NACaEM-TC1D>lvHV)hO4t2@$K1H`Ea{ z7H0X%)B8r!|6ZA%FM2iBVkEcjso-siEmyJ;AH{zh z@^81Q+wp0fmj5X2FmxUE&At#*Men@q1uI{;#M+M$dnquJ_jsiZC%-l$w~qRJ!kI*>QaZ(QD%lt)iSn|8~qvcHWkZwvCmOM=Gt zb-nq`KZ5spdQYYw3s7K6|H)E^%pVFczQ+6TM)N$rTN1)2o*eZ zy(Rxh51t1u^1b{e8TjKF_!~0txB2krk%uLkUzJzsEqT94!ql~?4h(Bba-k$CICs}B%ws}A?Z+XG&EfOr#bw8Fjdiooj+5HGI7z402r8#BaH z<=5ev^s|ESr5WOjem3ZEZ@OE-mmeVBULEd@H=&vH1qX;%qQkxM;^3C4`&brrd(5{x({<2d1>_ zl;Fo~H=Wd0s8NQjsvE<-+tM}%8mSN4>*3{j#8i&T;guep+$8@@B;|4#j!=yH4 zh?o@#{sf4PWXI1}{z{xA>yX#H{Lsh~eXc@Y^>@?z^$p-A_&6!2euRH3csaPZ5B4MY z9`I5h{_!`{--EmBU-%2ah2M*pfs1@EUIQ-q@#0P3l7BDW0bc8aF9Wal!PkH{`rw})Ui0`Po3TmABdPJb@pc`XMHFB>JiZj|u0QNovv625Mf@U5eS?;RyP zZ+_TCjKsoujVnFqjLW~om&6kBC%%z zllEp-d~)%}i*BKRcqnM_Y9A#}`{T)T6v)%RLo=O8$oGBcSyLmt+{VWp*ZWeE&Jxn; zToE+7oph4tzmofEdcIyB+~+HgB^!?7`6OgH^3p%;C7spZV}9e)->H2EIS0WMFQvm{ z7P`WM+{+khz7)Te`58|4!^jqWyWcaS)-jNGnWb%~v(&{_`t2cp`457|UvzwZ?k$6U zZ}Tl(Ji^Y1PpR$mM55n4q%%l5EV<}<^t`XZU61uQ`O2Ff$!f?n-;(UB{=b#+^oK!% zpDiBUUetT&(syC(@k!drD&!dtr}m%HPi+7XgXi-d{MgR2FPYP(3A|EqQrygserRDiI8;>_GxoZ%OfXd6bPPtNz=yym+aOf%7Vr@t zyazo0v7iz1@UH;R^}*MH=YZ!6Kkm$2J)hVDz7kydJ>Ws=MK7bN^!E@xOt^cVBKaDB zJNfnCqMrirT7GmGlGd*6enRXp4#R5rN`#N@xjooG!ZX!-DI~l}r|4ha z17bfC-lD@lE}%+}SMw>q)UwAVgclP3La|5QQx%*>cBVymTO^h%ad@PESqEQzf6$nv z?bd&N$ljmI%JJ{7tOX~LH~ve=`vvO{dcE!*d2hM{Z+U8Zi45s2Cq2>UD&!6DBj1nkS?paE zF`pJVJ-RKceN2x!-5tEwjUgLZ4-mlh;c@#o!1 zdhmCq^S65WMIUka^Ir&B@6AOY?ZIFUfe6KRn=Cp zgsSwH!M_~-Sprz~ul5EOOAM!eCD3X&nZQh-(;?tu``WuM1lz2?% zyydvwE{E{#f^Ro`On<$6{9vh4Kv?oQ;V#^$EkWo04@M+asvgN_3E@i#KOw-E)bjq; zE-y|i{xLFT2P>Md`imyw_5FtZ5RoKxD(i<8P`<|b>mj`QztLlmFDhzEe;13^r*uz0 z3ix+-^~6?w)>cW6!>Kn)0)Fzi7UJqY!@lw=qt(nbsDZR@wOZ0q_1})ZDAz^-NC$l_ zV*J$YE=N%1cQnDb8$NeCk@nL8p8HBVz6`wlcj@aLYrso=@Xg@G;D@Uet$!kaJ9r`Z z+X4k2lMJf->;=zxH65RTA&-D(St8Vbky8Y|8@xggRZgp1G4R5zskmzY;Pv43njd>k z_FvYESWXvyh;zYqSiEXcuEfcJyTHoN^ue%67n26wOflz?vmUkQG{Fk_!iJ7Ra- zX-9Xbwxc>&$1g-5zYiL#z4k}#GpQfj&J`j()Yb-~onxm&`^Hv*-{|@rd?(c%w|c0t zR{bNt{`FwW@2E78ZU=ZH_}Sujq}wS;k8>Mp@cwA$IMN}fKp-4#{}26_cHXiG`~4&R$8j0To9Y(?xA_-J z+ydnnY(QRaR+e$N7_3#_+Rp9?W)%cf$8U8*b6lU{<&pe^?q>WNlVwE3Blgq(P>TMu zE8@Ou_u57LfSAaD4?S;lYq>@8o=8rO6$ zYqg^-@OK_bd4yj-lCNFh3&1b-&zBly-TC@o-wx60uti>3C++p9EbH86y579~_1PK3 z)k_vdUJvqeaW^X!*rL1GiOk9WKzCGwy+Bi?WPkj~zcQEcq%Ttbo7pXAI-R zd=F*6zHh%ZhO%)lKA7V&R7A@yc{T7iRv7AihM~W|wr@XkDh>v!DXomxE&R3G;cush zFOm3Dbo@8d!`DcBJA7wqxa4;We@mwt#y!x`p9m+v`ZsuZ9WKW^>~WsRoq$Ji-@;ugvCK;?E~#Yk{x7%`o1AhHkg` z{O5wsn&BefnHz_eSf7)@K5a#X6Ky^?5gzcvEj^6Kj&ty}l^*`}dsT0?9z;(?V&``F z>#5-}DL>)G?dXz*99mB}wzeoZ<3nR(_hc;&RrZdl>=_$t9~ZppaE!~0Wc}f=({i$2 z44h_UO$(fs6MXD6k-^^*}YvbmjBwp z%!tU_gS^^bX?f3Y_1Et~%3FrKScvxe5?7zmbLbxAu?8}VJummIubG+Gz%$x2CtiFE z-9BEg+V^(y7yga5>l4Wz{gK!vW)JPWhFD9DE&i&-nXZbDA=4#VImwh{UVa{TN%srQ*IYW_P@ z>BO$rA#eA;wOtQd@>15i^u-JY+nAqS?=OK?Ad%91Zzr1ni_dLTmTI8*><>8&{cSZ9{qCN8N1ARta zR>#&NFa7)^nt1#3#*HF17&wbXBJ7UfmyL#CL z@*bSyys5|ug|Tn?Axy~#9X^_zmFaTOzsQLrM}8bw$=UK-rT@(Np(?%kK~ExQ33B>v zIo}>l&co?)w0^cCrx}4NKaHcw$()}8CUS+9TF$3Nlao0=O~~11%h@|TihdqRFBdDd zqMtR$Svt)y#)w^R98C^KF}!-Fca;8PFLLUq8^%BQ9O|>==ra_w7w7D4bMV`qO+f5M z;+HXT&o_1aD-RI=K~H?_MdG&+f14e@PTOMy{aR0Wc8sAj?N_n^Y z)~_DlRv|Cv0>)vH_ov@z{c%TVvAmlj?Oij>XM@~=_8H%hS0n8odApIP%KNY-Z!vqR z_L&PtMdz%1H9Nz#NW5JA9D0yjk={!eKb#ICm>Z={Yjb&>bHB~SmRGxnEy z_p6%qWwGlkExVqQ$*!fpFFJzx?T2`u1LJ1s9^|U|E%ShBGQaH|Gt-*hUZ$r3731*! zA3TkSJQBZ&_&NBeFX5MjZnWc1O_?8?%pNjeigO#L3O z(fc-3M*c(?3wF5ZuMJlYMJ z`}0LdQocf+?gHYg`bRHv7CA{td!8>} z$d-Wdd)6~T`3F&s>a4OXU%gC=i2f%W&HRe@ClrQ|_x6jU=|#r3asK1c%m|lCeHH|) zcHKrgyY~c)I?4Bg2TDhcdz=~x$j>YL)aTU5c-B5PBdGhB1;5U86 zRYYz9Pj|;EBUv~47ql`G>?8^Y?lDev=*Q_XRF?f@(ck$;SeGgBu|Yy+r!Ci1$}6ON z@%jVczIok41Om8ipIQhJE~;R~ePxBz;%9$dqy> zJYxdyI;)vd$b|p@RR@{U&4kOnSl>F!7=ep2z6~L>uIZB{3C{{>Wh9gCGU1t1_-Y0_ z3T6t(lmsuy^woy+v3^Y_6P`6GQ=<(&l|fXdEd8hPtOn`w{0bM`m?3}xjAsvjTaV*k zY0!8~<{P&@w{QQf)ZhM1&hM{Hbvvi2_t2`hZZ6^+SFfBnyy@`uFm1#+-HbQ*D@O4b zqT$(*;BEf+oqq6)NY=AKzktffMgFi1`9A4pTMNxmyPd9;v2xfV&iWl@vY$9HlDEX0x)@8dOnKt>URA# zw~tvA>dER2N;yS04R-v;lX7F=SKIiSBwhzT&&D4~;w|9U*!apM-UHrf;}0hB72wy} z_%aKZyvXCq_od-vzBwtI`oeR&p3X*2G4Vp5Ps)$;{Q?_rOyUjTpSSUONnGmbE*o!1 z;!;od+xV49d=>bkHeR2^H-L-1s{CA@#HF78*TyeR;(Ne9u~hTln#9L5Kb~UawfR>U$SvmKeq<_4qh@M zYl)$M4yg9&Q;%OCo2>njB z`6yHMt1df;f#=$|tNz5kb8NgesmB)Zu#HzI@gDG-7F)kB8G6KO-#+>KI!7_>_E+xB z-`3oK)&G7tWBKN|;x~KauOa>?$cNp3`{@5DSA4m5V08e*kw^4g!ry(ioPkvRfBYCN zKaw$jKMN(x$L*8k_$+y8wB_Dm$@R(Kd{=z(m)arQ`I}+Osm>_plP)=OvNEN^u;mol za!#`3_|(@QIC^W>m$R-X@g?tPvVP6-z7aYrsVA|g3vB%KBrf)Jn~k5G#9P4cV|`P} zIX;Q^fbX;Mqm%dw;V;(w*%qG6cZz?I_QMc%z89aF`ujmX_3)QqvfRFMO2|ju4Bei- zxh`cqZsk3{%CCLZ!hPiTy5y(5U|jMk^542KV6g3IgyNQ*WH}vwuF@rkdnBpjS)AYP zVB6jZ6=%%H7+3slne3$gf2`l_4NcA{=ZQd4U%Wrd-;cQK6V|=K(8n^$neUR*ce2NB z^R!;-X9d-{*wC*3QhEngkB@#%a>-+T)#9`5V5n95nNL}AeB%GXNR}(b-a(B20qZh2O9W510QJM z0}Xtjfe$qB|C9#I-g77I4jN|fm`@0qJkoNM**)e3UP)vw9B4Ug|F-Po7f6QJYYKlr z@t^U}H{m~JpV_tBeEN;^%%?}P%)mzT*ZW!~AmHm-KzRSQdD+LulJOUu+jU-+Vaz{$ zj@f?hE%N3gv+EBn!c2X4j^~Q(y}RvX=~eZv7ld86|1)Hx zB?n21f4pyh*K3mo65o@c;koPk*3UaF`}hu3WvUz(CS-LvaT5Q!))V8^)smD%xk)l0 zaojPCii<8Z+jkW=oWvwm8OI$?IwTj7 zQ2QDyk6k@G`x?u z<-65GeI&R)x4(T`AbZLB_T2#@eDX)s!A2DNF_KKv=Y?JWg6Gq0&MT$=o7ZYAIQKK8 zVYGb0>^|dMJ`w&3hwPE?I-!8Y7P#qg1^yC{T>l>=`*;el5B~ePq&8__$4|iA*~4zF z@Lwo-JMQrd{|E8!B7WCfNVwj^A(^RrRtdjAg8KL#*sQ)!vA(PHUtX`E(><|O`Xc`` z{aXHqJRBmw`e_yUt=E+N$$xM8#8<|AvP{`me*1Yw%fy|>Bn!1^9E*0C)uxa+`R_OV z73&YcYP1|{wvX9=4lSf8OsYq2zykvZ<<1(q5n8 z$V1{tvMcR1kM{Z;fV9_aFx6f=$25^}Ev>e5%$4esEoRPz|78_mSJ+&*-^{LjT}<%f zTG%++OL*?3>l5>X0g;~FkxxPP5AFP&-H2(7;l$6eXyIH>qB~B3BdMPq4-hsl`Cak< z$<2RwD*yAmP{zf-V`pmo9j|)6KkxZI3V%h-9QE@(7ycKAlBkhgIVoL9c+voBsOs8| zCf+pL_l3>usjuz)NHYJ4>jGL6SE0B4LuS{X>8|2ke@X<&_`-qtuhBCVnBDP$aD#cO z)&I;t4CUm^y|kikePR{w*>M%GPwC?tw?5cNaqDkpYWdRjC%1!>`P%T&61j<(X> zF3~QRf45n2SJ4e7W(jbu1u%v&&{fn_b?v#(lwR+S0Ln}qNpdF*yfZe?5;D8*$eHvG zJ>L1ezNsr~;XrGy*&Qp$rAn<~XZ~bsB&k~;@A_Y}>rZH4Cl?tK*AKI;+}ltPP#yc2 zcy|K>K`uh#U4LYRylgKz{2{NFs_NRXQzeS$3{J7aZvcKgtGw`f=%KG`QkzdM@Vu$BAc8C+vK?>yNc4Bvz zw(k$MtfCNKV)RJ#|BJMH&p7Xyefys}S9pE^ka&wXgU#!km;L6z`pxcPYsTLF`^Elm zV&_NnD;zi@y9%?{W~a0*l$$cFzVmdT#WcGw4x8P#;w;R_#&Z!9I;N3h0$drBJba2> zVZpwBlVLn1gK^^a%P4Kv8yF!vOdR)TUS}}tX5KTi!|c5^kiGWT?N4WCuYGRkW1`_p z2*6Z#Tm#kq*89j`ROj!D*N{K1@Jaq&SZn3)Z(p$U_v)qY{C#N$`3tA!FY4rPg3jN^ zT=~1W;-ZSKS1RVjdv6RVW1S-oPicxK3+biAIZ<60A9BfzAxOS_fF5yk%Z=9xARNX! z<1p!X_l-G;#lvK-x1}H)>v}8R^=7QQbdxZkr1gmhC9Lb&?yOkXIqf4Sw9FTa4_8fo zw&jZR7jADE7e6IY)fFn}hke20^dS4qK*F3nbnE1I$Vp)S$Az8NWH*p(&uTH)~5lQyY=ZJX}n+N)pk;QCsOy0y;ivN z6FTJjxj~Z5pr5Z<*>UTqM)dO=OFxnLDStMf{%5eNYeKq?&aiZJf=5SXX*%k&^z+UL z`gzIH#SBL)%V}vAcBb@{SMUlv(N7+>m(}=Oep68s@4Y54$L#u>SWof0x1J`i z^<0yas3yT!*PoSwXz{V`;-vL75LRVb&!@$DF4fkvp{2@P_!qIBZDv=cVd(+SGR0QT zMi0gDq_Mc19!np8{5$&irB(L(|7B~T7=0w>@`6Co!s*S@#+e@^%O5(_SE z0OiLzwlSp(CUxL#FK4Fdz|~&<_)fCD+)vxFG*OHu5_5U6q?F&hF|GWg)V~~KkXrWb zJ?1mBIm*unW|CeeCEQ7vTM4y}5yaAs;3SU`oZ(e~yZnEn{4eVI|A$k4G?AFg3p%Cz zSKN?R{!!}xJ(i#JGL`=x!VXaWbR+nfM+cLFrT z&zX@$g+s_$)z)oNuYaK`V zj0Dq2G?Nk@C(Nw`R)~}zv7zO>^iCk((*TOS4M6w*f2I6ewEfc;=<=h9#9UsaEcO5F z=hMocx&GgG`AIZW`F}~6yZm#V@=urYryD_mM+YTd9mqPHrH9vcqlY1@^k1V-us#){ zk;H|($XfL9%C&d{$@w6oVTvnYrnmy7mLr*OJCcQ1gR+Za`G9?V3`cUFGe~Gh5?7#7 zQo~WoRenHW(ht~5fVfDfiW5Lu3@=raH?&lHsy;t3U$uYyTH9f)pxCD6E_%lQ;O&aeC@qnJ$PPd9=*=_Oq{kePt31Lgk}tMdQZD*sD% znZHJFV1NF&(nVtSpjH0HhGh9Kmi3rPFI8Mr)wNGL!#Njrz5OIFERfYDrt7Y46xL^0 z_)J-%k#(I*xPFzrHdL@OklpbJ@#4(j!*Q0O0y{s-`ukOhs>yHOTs`R}7G;F}RLx%1 z_2$X|YYZ%;GRv>%`YqB~p)of-Eo%qy-Z5Y6+qeI5S$j@|(c{8_FRq!{+jL%F=g(BV zJMv!T#X+K=oO#(djxVU{nwC?MeMD3ZEXlE>5S10iyLP9nHY`B4QPH)jV(87v?6v!M zn53I1+GMHdva1k_soVf{4VQft?^|D=Aj9uMU;nDL*hkgZsrTCY8m0^x_4Q-q9YTHW z<@ZlSPnTV(^#vV8U*@|suzy^fRM^8*najYAOSiA*HFv7Ms?zOi-&bvYt=xD3ecg_{ zL!+-Mv9HY=w7#y;`hp%}eLZ*4yV2JJntN1zZDc9aYLBhdc}Dx1iM&IouVw4CzUsBU zpodsr-<saI+LVY#y`$nogUas{8J;eIDw&vaF>m1EJ zs=m(TEP!QSxzu?^eeJyPkm{@KX|1ozw7#IJ`kM5T8V+UQu&V3dGCk0yIXJ7Kmc~#kWytwm3``m|&n{T+r&9^VGgj99C zu`<9OtTk>bg;n+55+L>so05a)*%)8H8a$t;fGG1P44!8OcCL2D&4-X@+g%>xW~Gdq zmntYXIdGnA4V>;}dh2{iMc2lPp*I*e-`Vjg>DTr=W$AI+rIsEqg37$K;^Ij!Ny#>8 zC5ql0MINjI|Nhye0w1##s1%o3U+Z<8gVWdH_u2Y-V8CBrHzUv1m-rb0N$eoJZ(e-sa_Lc8m*>~AhO!Tzu z2}@72=308X44T?Ll|_m7`V>Qhn$L?1>t|Xiu503i*Ex8QedCyqa8ZBKJC7etWGX^! zUgJ#)Z{j;t(xysk#xR}?@lS1Qa(1euz-<5HKC}Ie01ok_m&C2z@WZ6$SHtQ!DWRSD zYJJA*2j8nBr1pzRKlm4vUY_%Cgif~(Tl)SHg~_1r)2Zx()OXFwcd73ab%cY__r;v>vGn~g zYVg?@?r^B&XusrpX(;cojrk-PhrzRx8?{`$TOd56Znr;B~Zeqrf*k+}Eh`#zaA zygz;a`gfMTpW`Ne27S+>mJiavZ~or9)Av-(e=z!fh*J}mzJG-p{Pq3zibJaJEkC#P zJx!(uGTzCw$hs zcTyY_E?3Pth?C->{QGApTwJZuJ(Lr^n^gD7`Q zMW#=d{mX2`a$?j(#IBfwsE{|eSQYa3Uu3M1ChXuKE99%+Nmj@cc7@2SCbaVa`k$*~ z9kl*m;ViSI|83|bqyDpyYwQ30&2J`){dwoGDT>SyEG^A6XXv zWBeq?U%wZ*qwD)X^P?G}=eE_Bo?n}7>G=(4MU6TNIj1^t!x@D3p0V#ib>5qLNVGi- zU+@p7Hs$(xoLV1)na5HG0s|Naec3#17!uFhND~m zmKGa!F>^F`{_$^T-^l9!w4CfCI4?Rqhxuh#ZyKxq#_YZ<9PhpX{9kY0TF?K9w-)q< zjx~E{2Rg@@3tyc-d1L4pG~tT1K=Kmry(*`wefw^7QPsXVkk~#Xr&_N@dzI+yX|wCn zu-Ww@CvM}MxV>9d%F{d7B)*^y6}t-f|4RM~4wU~*KM}K#{Qs1Lgs$9g-fXMas;6nX zp2mu*(fE3A{n_lC<@EBzTk``0X79{E zXBJz9^Uv5AIwiHL%BiPD>gf;E)0lL{*=s-g&iX*hiPGtslV8rhi#ZN=7+*G} z1D^c6nLYitJN3Hc(9Z0&ubPv8mwor0ny2@MfaXzs_06iU=GS?St?G>z1S&eYMMHO8 zB1UX)>#m>I7w^3p{r{2n|M#l)7ij<7Upc?4gaum`_2|>8y6OtTGkRM-NsVwj;>E

-y=n=%Uh35N)6V}Tw_d5^WqOIhxfvPfZ1d3onVHU8M;y!Cd`gOpJQqFYqq zPV^Q|wNNKlD!#XTb-x^?eE<5PfB8%u6mY>8AGck|0TPF`yjX%=d(}2VYHj~o2LxYdq=Yu@nb4<*!#w2FUqq%f$>8f zNMrloZ2zC#eGC}!F^v4>0Nr7FEQ(iDnc~L~( zg4SE)u4T#LX4gN6Fk%JTnK8K|24tCmZ%*P0lR$?ZooC_o$_ z2oMCw65t$wEPyP4kN}?m2myou#t3i>z!-or0AmFRXRj?EThUoQ)(rfP{bX0IEg-98 z{H7GUTrrqZZ_g;Z+`-BbYtnKLV$y&`v)7`IPNfZ+!}|Ge?)(nLu0pR}iAm4x2#XIm zo^y_N725XsLH+$_>u>wULsx%s&L-OR#}Uf;k(Bd|cjoLEx5;71UVHRA&jh$Ut?orl z&#~`m4{_~cI`^Ai@>~E_Z5YSuZE>vReC9=F_w<~NG@eba;1%6mDz#>Ih+)8!j2KUVsONnNP=97<3Ju0o9#Zf+XjtlEAjM*l=@qY<;4+s512bxQ=C z(9N4Cy|m-EpLSW8y8k7|*qr%c`#y%OA7N+NYmdhp$?o_o5=B_|v*O)f!Yt+> z=$SzF!YX3MyY?q8UcY~TWyd@7>jSTLvZd}!Ce<}Jt6xt%lqPNdqhhr41MLH1x9ub0 z>~u%es(k}v?4@CulHz6Ns2OQ$a{4^M&`zaE z20+c8CnDmM^QL0sbPm4LlF zP9Y$1HZ~CN+Q%d=an5&x#*QDz9Y8LufBU4MQELya6_@XLp>wa>_#V9AWgOF~0(mRPUatV-% zU$4LaO|AmUcj;S>RGB2Ny340z;$L))qa8W@1%0P_6@bpaYPTA3rt@$(|8 z`<9#?bI+t_INqQiZ{H~6=Z+Zw$rA54_FVT@?k|^TKlu(5oy4Q3OJm>00GQ-vyo;nn zN57u*cAnu$?{@%63Fg9gTBcq|E!r0RnRxfnV#HPL|I3>*@}gDkf2OGqi6Kwkc&i%C z#dMQqYuO)ltn71Whi>=JNetpktdBURn2e_RD;}4YE>!=& z_q$`Sdc5yNkR`_+?-k-G^xg9BIJL}O9qN4mdk@Zky5fmL;Xl24c1rr$YY&%xj6r4w zin{=NC4Ztfx?P5xwa@LCk&@v#qA+!>*IG++d?)jiXS1JtPNqyOP|l-cF?**5-pNv) z6>DSk72Mb6RUVG!tPh+(dOqGefjQM5WN5AGdPmmo|7N?D&zoP~wNwnI>+6l!KrDOh zd9kj4xQ;4S4o(_q|DQn1dGYSqIFY;0#|js|n!Rv)kVxH?gU(U2?1jfiB~w=q%W)w4 zQn>RzqBsk*PV^Rz>HK%B>z_k^romSAhK?{NZ^~}(3Nau~{zvxR3-}<;tGTICwSnwY z7B-VPnQ3R={UmSqmf3dp-GReo+Df1N#1{zdw0OFICgrG_{1$I$>H1w~uY61V{C?hD zvsQgJdAUX4^^N2B{w)#OH7xJa>9`tS+U&k`f!XyfiD7nst5uOF)VBaif5HUlcrLr+ zHgb2q99Ui8TQJf9$;rcvRKZ|DS;Yf+8m>Al?8q zDyfQKQ5gkIG=UkJK@>r}AhuR)y@0|@qT(fz1Tyb9HCnA&udTIjTU)DbMXFU3E(v(S zfQqQ~ipm*A0kr~xn%`%wea@MgLlD~c^8BCQ|L5~aW}nMGYp=cb+H0@9)_2Dkv&E;j z21h`lX+tP2#8~y5^$N$&K%ZcE$TGd%EYqLmZL>_5dicdMoio|#VF|pQ?^RHAhLQV_1*9) zHGH2)GR{N1R`&sm`1WvM1#ieAo>6e0P~NyFx`gHMEAg|f`QrnPXE0HW?;_?$&%2 z2l}=`g1)T!>D}RO>W{+RSl7#m4|DAQdN6N>%jBE7zSf?~7EtCeN_qTAu99Vg5mIKg zNa!>HUoYuDoDHV`(ACm^?K|whpKJfU zLKjAoZ@FElgNOLIe%gi6{4R{Z-HYi$(|3{3%7VU=a(>@QIlu3uoZoj+&hI-ZyM5>M z7>Gngv964>7~7Ax;*!>Nst$$>yK|18<+Sw3uF>SCX!5>t8XOJ098KOKA{I?HNMA>j z$_V?TpR@gC&F@tjP3eVb%G^$whg14CdM*>AW?9FoU8nu)_3PZhV#PAE#**KJlPj28 zCq&A=#0M9Z9SId|K5o9+Ando_EoC!Lz8V9krG#(F)yaG&YQ{k{rH`MUJ~D8wjCR4p z$?V57Ir%$1eR(jDxebT1u?u49%2GtcuX%3oT2xe(+!{9OucCb&k-aO8`f0W5MvCSM zZM5cnpC689eUSbyvdXHOj9Q%~3Q*PReB?{j>cm?cx$6jLsKKb5{Y7BYSBhsanm!ek zv(c!WzwlP1ZVR5l-W_Pfq=NOv7}@BpJ5)`eZ;tF??Rlhn39Yg}8fo2_c#r2* z+kA|+!DTq|m~iL|tDaa7HZWZeI}o&J_r`dsEMV5-;XAP$3&CM`3mZW_cljE z%i}Yx`9mExmp2|SBu7GD#Lu_pUl}NGBu^=QC9cCS;$yjhx1t7(b0x0BFXAU~p(__q zIalI3{32e)g&-HuIalI3{370;3*ESY(zz1X;TMkJj&VGPtS_6Ai*#0b19qcW?qX(n zkabqYP<4$KNl*K;{(Fn~OvRt+wW7d81%5 zaq3NOWIkb9*+-_-c~j6D%qdct)$ltC^06WO&6`s~_X!nNz2Ho_6HI!7tA^57A@bt- zea0vtI`}W<7uV!aSaI#r!^!83eA2NayP}A$Gm1!NkWbvHLEXA^YH;^R`Yc?%%=PuZ zq-Vtgj33xxkTaVM$6=s7j4Behu=tS0B^(a0=6ge3!aN|PH|~ZHSN0l2BZUKvr>S39 z%4no;m&Vi7Ei7d;Qn+j5Y3dc0G8!oyY&=b!!cs;fg}XJLraoaQ8;x`dvq@D`ZYqf~ z)|g6!sXCP~g*43KT5itC{D#d3Gg6%?C$J=Q8`q{awPziB#c3`J`pg};@ZI^y^&2}p zv*!2dQPFsoGE@D>Ycp?)F7QAWqv_cgBHF_d@pmLUB6TkZ>~4P+-S^Cu(1<1wD5~@oK*2s?)JRuRf!qS^q7vXVUmycf{PP`#>Zd zQJ)7cl~G#etqquk%}9+Xug9!;i{QMgq}XA$<_RpL>@rV$ys_%Buq)k`O1GKc*nmcT z>3Zr0N4gJ}TIxEG?gPy)q#GPPDE!Q9Y^YpYdye$BHP$-{D2l>Dy&=`#52t$_gFdk8 z?}55V*%#rm%`sdSR9n4YI5qP7;p9m4RI8__K0_y~o^k}aym1&Rg>Lrr)K#jd*V+%})X8r4EB*p=bvx0H3FJg;webD~hpO`go zM3;`xdmR7?yW!`3c5biYFsYl^X zB{kC2`F+?_@tCMqXJTDu0R5(67eKC=$P2smkHWo|gnP}(y(=Drg9|c$G+i^lS66RK z%hd8o=06b$9CX-1Y-^2zBFr6~@E8Hf6;QO5ckOhF-ORM=CA;riypg`X$?j!-JZ5*5 zAIU}8edV8S^3MQ~y*Tb@TG9eho2r^j@j?5~QdBG7T&cAmuqPap)ZuUW``BTpw7!5`Saz%~*={f%x!3uT@{k>{D7?(OQXD zMAq0ijEwuz^UvTPU!8rD$**2?5(?wZnB-2I_iGLhm#vGR7)|qjBz@hnOm*2>WIxKs9B#m94{1;QV}7wt7dt+0jvPmN*EAw(%ST1jA_b zX1J`??ya(z@6+n^`Dob(^7MO1h2H~)2Y@A0b0FR?nyO$O8!5@0bCl>`r+pQOXIgc9Q)Re{fF+>hXZf4281^}A>2$%mUhcK!Z^lAM9g^l zTDw2qEh7E|lVC3$v-Sw~^|%s93>D`wVa7*{RHWm?WeDMjoCAGPQI5es7)(R>kmp zT}IXtrOCh_53-Mu>T&R&dsuF?2|gsPJ{(U^#iqr9_^@c|5+L+moEeLhy(r$|Na)q5 zHRb~W92_1_54S+qjIuA#FDX%qBDWRopTMFIqoMa}_T?StdGRIlPU1!LkbMmF7zvQk zy;0n`-+z01VdfEfd@yr&Z~pI5(DR@nh+AF43{#V6z3kpe)M0mFSzZQZH#k0ck^Smi z)5-$T#@V*BGI+m+Q0HHFe@k?dh$EA9ZybbH{VPZoEkBYK>J5y$>B!9zF2BZQQAoux zff{9ZjgIMvhSt@v;m{sy=t%T~zW#X<;mn&cya?vuNLhO5+Zt_+F`aX|HHP!MFO8N4*C}5bLt@kB9BuVQP*bQ$5l8LF`RlYT(&Eml5vaNhwGLIJ3h=* z!5&xNMneCHFLU@2F3Zp3b@}0AhJ*@!lE;rlo$+H$&EB#%_a_b$kKhnZt`lJ|U?==Ji*e%2{2B>xl12ffrj|t77E_P-$YvcFE?ZbmDzsRt_Ve{Z`$C$_ zD2Q5BA3&GR`)DWDZ^UU#4d7hKKBpZ}F=^rigs*pn4&vo7n^KQxY8(Pst-9Z;U!h&V z28!&-h8EVK^_a8TqoJkM!Eppcot8eJN33C4{PI|+<;Lo0VEMT8Sr(?zbOt(3H`cz? zc>&7yooLyzY;WzCa9sw6#^>=dlEqqi!hi$=6{PBo*?r8o3$x86TkX6#v~t8TiI>BK zVy(l`402}~+r7=_K?Jz1t{vlaGGv@Lg+@15T)=(WE3K)J&5Bl$&1Eu~Sq(?SH=W4a zj3IEic1SJFERuHIAE{f~k-gvHPb9faS=2+{t$(_6W<)|QvkI7Ti7?|TKQk`$F@s&k zf0qwc{|X%)O5&10JPWIS2vdm(4ox=dw@Tq@@_G`pJ z>I#_+Ylw1h7x2)~#_C`s-G^BbMZ+$lRe!5I>5RS5svCb04tyboB)0kl1fvbiwd7ZH z&sdyrp8>m{-9zVp<;TYOR3jI?6M*)x>$fr6w@@yu`Ta-im%uT<6#opB9qkMrR~}?P zmHCJL)U#=2&VIl-mwgqx^gM?$0X;XG$oz}AVD1yrgMEFrO1#^I20XywbOp<*6~t<^ zvtZsQyBpp|J5YYYP;P~`BtCbZE!a<%x!;=Ko_*5Dec6_R@9{CVXd}t5WLNlFv{8WR zobTRH0u8fc*sw8d*f1X-7e&M(vB1)3U|Be@H4<7HKQSEI8b8XKKTKNXjw@OCEsXC| zK4NI%b9tyA3%+RCQfFUXj9;!Mmbj$4`iU=#Zn!~s_S-xOjx~R4_UC!j7`$}s+|7vWm&ofY_B}W10<%x4P3o5b{ZZ&; zI)~o51@y}9y)Jm2s|NClDX$n-4-xYsejWKCR5_eJ zC4eNZ4hOQ5<8Rh)*8Ex6!}#;phU>Bc`d%`2oo{7- zsq%?g6HQMuX3ces9GN^cZpE3J3WS$7$ZqUQV#G@NgsxAO=EQ}c;APS|tNY_TO=@-c0}o{?~5uC z57XXoMbnqo+CRG12>!c{g77-Zn`ao8rg4s{G4pqOR-*bu-`t+-&uQA9nHk8J7u|t0 z<>`~FJ$;fR5WOc-bYS*G?Jv=L$PcHYp!b$sV>+!&1bvOt=qJYEFYS9z+vitwF1=xg z-Uh=@qfbC$V$yN+o_#o9s}O}EJ_Z!Zr4bZLObsMYiNH?D85@{AR@*Il#IRE7?d#Bc z5bMXpn!LG0h1%%{x|)741JD($_H4w_FF#ZIJp1K1BVxD(LM`F+HKNFf9Kq@B+EHq^ zBnHYJd(LiYLY_?Aa<)zB#Lh2J`EIXhV$BaV(S&t^JJdol)Fg5O(T4M3YxHYky7aWG zu9l@!Vs|diJl?vq8M8t_Dd zTY{ZH%Fw!SpiTF6BvB|@wkR5UAwnWil*7GW!d(d3&H7bYb6Oi-S;j6I!u=Bqhth2d24rK@29#HLxNzJj=Z3SXt`cvXFS7#sc z`d55l*{5^%m=Tia?RG+-h0gqmU-XGRJ0Y&R_i59xO)MkBZNx$2D;&;0B9Z>}fXr{$ z4^GJ0i)Lj+!VfhKqb7NU+M;q?2`ktQ)aXp@nGf(zYkX@(vbloY*F>qStJ_ERtJ#BN zoZ=&DNA82-xV^bj)~aT_NnrTd#Z|IvXM)Wcpfl03Tl!YNp#q7gV=1?@FK9Pl^KKJY z`r*QeR(d@zt%7hR(kf27gO;_i7g$up`F0w2dtPoxqc5N>S7s%VEARz>Ik1UnguEws zUqfcyOA?3d{uTm73+0!E*4_0X(_VjlVUp&;$=ny5sC%ypUM=worvC3PBXi#yxqNUFS6$c9ueui_u|#6@c_o)GU`$Qp8!ZmkeYK&cMiXFP<9ENBgYpt! zKTVACW&Y~=H+>G3uGuJol65Zx?5}TQCo3s{6zX9ot{q>h=ooM~Fzb-Hm+iY|B8bTr zUu|{*w;=X;&dUfrB;>Ls^rf4Sn@`Wm#Btjtij%Mny@Hde|8 z27kpQVuA8|59oMOk3#L%oNWvpWg3yFH8_x$sKBTRzTjhC<-Z#_hp2jjoy9dl z2Jn2n&J4bl&G>%R=XsJRag9qEFO_VUtZZ_>6~A3&W0SbPue%Zsm@miY%4W=Xg^xr3 z1~!BP>!Sf;&NkVTcq;QFC~ndw2o~LQ@=xXjX~mJx=JlVPZ=}m9SH=k#Ka-Uz3`)Kx z#t-Y0<5m9yQ5Q>JcOW`)JtS@nm#v0LHM3&)>PwixSg?#7z}m$AY6jMmLff6pnt;>@2HFiE^g zVM)A9^5W!>D-6JJ^0H#zOJ=@&#to;}JyvbMO@;?qGom}*wZCRpd-A!&VO#aHX}Pb; z>?1ONem#-RXACV|eP0+q@lTNr>1GFokg0y=aS@3T#Ii|D5jA4ixu>R;I-Ab0tGp=i zmTZAF9#HDoLz*AMY>GvICBeL==_g`o{7@;=dJ+$5&b63L525fxc*J^YzCuTL_7?a} z1ZcnNH}Ldohob_PWq&JdH~UrWf;tU(Nt~KY-|Smi$)!wv@#{^B&KuSoNxYK9^Evg+ z@tT6*G?Ct=NUt^TI$9oM7?s4*!GYt_!^^^DZ&(e#;i^!jOYJuMa0pYvt$KZ4_TY(| zJzk_5wURH@?ULV!td<6h$1fnakpbGY38qZ+e8p@Fbs)iq>HK+sVtZh-9BN6XF?R{m-8A#y|!8 zRhxCl3k$LSO<2EU@Fyla{sI=>;Ht)>!|AF3fe1qa zMH2Nt14IlA?DLrH7LyMjJjC@v=s|4dNEe$BHR!~D>XHJnmuhjgQgz~C;espB?0{X0 z{JR2u#8%pnnpHCS+|$z|56jMV))z!n;b~|*wIlmewX@KxhXw4PUWVS27(8!;y7Hnv zt*<9mifO9?Q|x4P;TJv~+jhA%Z(qbkcFxN-tE`BE9{svEB+|itk>m7=)unjxgo#@**;fB!K03j9w<6`;-!v6C+>G8 zDtot#=SbiU5vj4V7mh5A;yIN(5u+%^xX!QZ;cz6Gn__hlnVVlHz)0z6$@JLq{JOY$ z_IC5@J4vZ{3G=JfaHsIGjtQ>FYIsjf(^H)}gn_6C`+=h1w5L;=0-FDsl0iUCbl;K7bY05w_g4=IYnw~N zjItA-=#tky3d~>;#YB*s$t786TWgQVqJx6DgRloTN*8Ug$ICk4aLKF`#=scRP#5aR})JmK7U+kfL;=f-0HjXLMXBmNtgIyY|f z-#Ej$agG1R2#ff9^?}0gKI&Xw<-7iK=lZk0>o+*pf8)D8&AI+l-}Q0M^=jYsAvejoa>W(*ZVowPx4*=w$8NoFyHl$o$I}Q*OxojH+2;1h563) zcYN3HcdobkuFrL@KkBEVLy-66b20gKGUMqmd8C4cR{iUOwaQ}G{`b~L?hEdIv=gG)YQK4*Ol2(@F)WOK z^XqVEvj74Kn-c@POmdl>_TV8|a0yCEE$zUrHhJgS_y3jtX~mq_dH+1fQ{(MP1Z;ce z^!EL;ZE7d|GtFBj%X(v@2?>+4F4Sg8zTor}X3Gb}K3ny&-V(bd8v59p_oy~2w-|M- zjMdhh2jmWm#tUP>S`Zx_jHMDK>=Lmu=uU!Tyfhm6W@gFQbg#o$eB-DHy@Zd&js&ob zza7gn_H-c@8&YHmN68v{3KxvoGmRWGv3e?6g98e!9_KJ=cmE-0|HMM?S#!m|AhByl z6YR};Y~pF@K8IlWUr0|!WIVOFPspAkPjSE%{q>ta6!e$t>UM&FUbj!XKpf`m ztciac!PE6b<}9bjOj5Qvxl2MYh%~0NMlh_7)?kJi>?+bt>#cczlhrjI^0Lj0v`Fan zNa1Sf7~+oP2=7p{HScJ7G&N4v*k#esn#2qphijzM1KDWVnhKCM4aip{c>&Sw_!~0& z@ecfCzgj{v^^20W$`>_t+gVt--<5@xe9kPa_~hTI-1%Jm)>zrv_|4hdb-cP zP+-k@RX!eSsXm`5ibI#hjk~xkwY*WJI*H^T+sX;s|24Tw=`^125aN+%IMt z`GR;WuP0!-DMwxMEVngbeN>7WB_yK2~nnUAO|oTPP7 z*cMHmUphESJ%i+{{e@2@_O|n+rE-I%(IBZ&!^2Cmf7U_3c3&*HkS%nL*N?F`QXVVg z6OocwXlcwE(-IA=$Xvn_!s!R~&mmMb`$o%}iCBzUqnAny6pzJ{OEaT*a=iV}@4O-K z9s4~MP-gUgr~5shamcC|n{YSZ}ps_u!kr8J$H53O~z?)AgvkZ%dG6tY_P8Bj`fc=o9q-q{ z|MvEU^Sc~7(ti4EnerDE_U8}s`tyn)v%aX!kri!~_er1SdQ|a0>Cca)KcA3W3Ick3 zjkG^Oy;Zm;01t8c^J6gtPT+&tzl#51@vmhYN_$XuSpMl3Nuh85{{i~$to{!Aelf|V zuN3-neA%(2VdU3b0#b|FWz=!7mpcwtCU!IqF5P|{bR@n*Zn_9v$p1qH^76k*-_-A- z?}rnWz74zD=F74?8iMp1wW*o{O_3kQ^e1 zlA=b$Rzi)KV^Ay4l3umXD5Q@4Z%>bz{{0`NZ{UvU`}G8+@9|$2a8LGgT)Ff1bWH5Y zo@57icbO>mfd~34s(Yo6|DVM@SA>}7oUOL0i&*dc0NU~+>sN21nc z!x21Ig8^KHItJ38uXr9yXAL0weK!5FR`kI$@`z@)A3jU^Wu)2EC+N@x1^y!%0yV$c zK1^EwJJI&VFhKtP_Cdpl?`;17xBapYd!UB`{A(f2K>;SPuvX}Bae%FPgbFEpq}^WU zOKS<SCG6R#AiW;q`>CYkToD}vaYmn{nf?uERY3=XC3l8Gz^BMFi74DX+EO0K6 z1x}mDctLENfefC`hyESN;N19V8xxeO0aVLAD=o`hR*_52mhvN~6qJdF6RKE~5?Slu z2PcndJ4I_DzEcJp_`$hLh3t~}vY=EDlxER(f7!&F&^oGD%);OtjhoQ8T=>wNcWk87R#+?^1fs4_~_4%hxvEj7ydZy=j)x~4e#WBA^-lj+rKmV4Lv=j z@0@<4&$jEY9pBGQ!RDm}B3Qz(9peu#J2p>3bML#E+GY_UQ>4sy6PfQ^AmSqNSnUXi zIB3mPaJRbFpiECB1t}2mq})7ObCK3~j@06;cI+5Y%91U}DyM?DS-~j2s1T$04ca3Z z#ed;R!6=^GDMqok->aWc3(oz-Ivn4HpGe#%nQMt4t0~2#TxowCA(^9Y7qE%3gOtNW zS?4gAgVsE8u=(dyoi`m^;vj~a;u1^uWDe$OyZfDDxhQ7vC44@!4;M{*_K#mw{aXQw z2_boGL)+DI%4WLTK*_;Q_gw%wwD?xj$8M_6)4z|uQJNS|(KCe+*6FEZ;6b0uHJ7R(1Q=&?NAAS=JJH|7<1bCS|xh%jlzU=q{{vQ5)qdiQ`{DuBht5s{2dR^ zxGgwOujQESmmg%SK%OXj^6J_jm2iizXWeb^p*jW|8hms+-VAy{sL2Yl?{eic_FmyzjcaCXAU z>z5AesE`mZ@FFJT{n;ETdzVxjtKlBji^N-=2qe`~50}SksOlvIUc9;h`M6T9O%8f* z$6J$2jRHvzuZx7fwCdg_uY?pqYu>B;5YLPc^Ed_&Kw}YgN9w*UvF05IqGn(v09rVH zIvVe|l7C~Nb@8K2{!C?Kda_(kOGKu(5kss6kM_ zqN&LOSosTrE@$;I-To3K?OI!~teXa-3-}Oa>HRG(l9B|YJ-La4ujL*SOa6J%x z)B_O-yykcy4y#B_EJ~I{(i8DPydK|&VlX)#n1_itAzqL7pj@0BPh_n2q|+$+V^a~^ zv+s2?e9kqcex+64z=j*5R|}7X&-Nc^rt@OvCwi|OpNC4lfX>rOU@Ii8`;#{3l~e}Nvy(*u(;@~g#GCIb?XY!Qx!Ti!SR9rz2x zG#;M44S$~({w@Sc8V^q|`2-=eFNFgy2uK6`ttv`(jijsKFPIG!gUM6jZx#GqDEzG| z)-Y-1m2&l;R3IZdGXjeQz3lKVnw(sm5m=%~pj}TFTnM7CAp7ClssmqB#LV7!64s&_ z6<}$qh^F)*Yd$B6v^1*7iPFftz&0%+!}wQw%Hca;lFE9+;=21}FIU2elmC#=&1B1& zF5+p~KXrrGf$Mx1QuO<^he10wBT?TDmM0$uhEi^N-=8c5oU0>c93RN+&rNBk#1$+G zW-7Kxu^hkZcDjCH;oq%tf!`=paYV3fRLgP9|YRquJ6)@d(zy_wc!~#Z5U341ws5ja@x@1HBXcZ zw4vgIa!Stn0J98`8miVb{by+u2Xa{%I*xXYx?dGO$QE#Doz+PQKmM;+<%)foy_-9Fe-s#kjR!?Avvr7Hy5 zR|0w(zvhO~D`WU&^vYk+E03vOxt&KHy#fx9&N!c$8Rsiz91eYOW}GJZ#f-zALx5d{ z+>U}s@8zOR3=p}aO$zoCB7r43o2&H3ijoZE7(Mv1Xp@+1DNts%d0Dhc%*8WYlqWWc93O-hZnvZ9D{d!OJ97ha0%RXx{Jeo+ zhUq7Q>*m-$7?D36$z^vQ^1Ay4!Y7BV6n$TjVpdJ|l0hm5DIkAyH;b@cPsbWd;)h6| zBi5P!q!R@iSKf;ww+e{Mdz@dGS>Cy<-jXNIlXSI@$IJ{J#MCdNPXNy;GCEacv|@6h zkd{%^c4EJt&ng=VcrfMoo$}x#J{ro%FIRxcRBzrlOA&iz*CcS03bGb17LwO z03OTCqO*pr+@8J1(+JqhYlgjD&(5)TZ-^My%sa5w-9!@BwosRS>2UE$0gL;k1P`aL zf6V?we;Cl*_cWr|LC3Z7&~YDv%sh15+of25jyp%&@vi-Q_9Bd5t%Q(5oMxG!j$dt& zJ)lY&-mOjYkkfrHyi;9WJfeBPy06jIK9|HXtcmO|@rq^UFak|wi$G@J$$L#@+_P7p z{;2F)Q`z2BhF^?F&XA;HD$_)hUesnPm}4q9FIPd6sUT!3Sow3ef|C3SUXVuMk9&yM z#qailY}UB&!F$M+BzH0!VYw8plftVMoE5m6iyPiCKJeqTQL=ds-VPsl-K?t(K5$#5 zTK+UkvUi;PM9+WS2|h48pn@-4L;B-7V80xE;3wEf&H{(sKucQfn?w*?gy1_}gy5eU zyHaq1ml|;3%+)gV-FPYk9lS!?+KXs%#s6gm+9_VJF0HpJXT(<|Ea~)lgBL6-XMJO7 z-W#ki{rDkwe{VbdkRaw{bEcLX-t$8W@q&X=ySK4J>aTn$GtN|n92guDttb1E=RLvy z0w2YL(AnE1y!~DG+rb(B&ie=PT`yijcGg}c-1tZ}GI6=AQ-nU&IjizVjHr2mI;@nO3F!7xn9=1GS4?9yd2qhn0?^#56BA$IDmr^hdzO~zi ziR83|1DQ6yt4t?MR`3_mJVh|oc-F~l+!pi8)?QQ4BIMtCEjEE1~xj`sHRgIzG< zQza60FiwvH3>a|)aUYR$fvdcWcOqKar;%vZ%rxXu`b{o~0A$nb?~xxl$Ryl2*s-?sp=_(u66KK%bw$NmO;249eYawlb19~9shbIORpFOC-C7iSN4$6Fp= z@!$fy;vf;caf>y{EhbpkzHO+)fy+3vXVFIQdnTX4zUUvKpxj~TNjR3?e+zzbpCKN8 zvAaLm>HetFB$_+P^K8Hvdu-8b_V z*GzuEa)lvr+`NO;XY8JXI!1BF{w@AS(%8Nva*ryq>X&ZE;pSc7@V%9(Ltt?H8jrdE zILd94!(34#1P0iM+c(EISR+Be@^+k$c2{+ZM*L2X!5IyXN&}IQwhdv7AboPu#)%#6{!Kj z*QXwOBIgm92<| zmQ@Gwo&c=eM|S{3so*{A2Vx3IN}pNv^W{ZOj=e=<#$)NTO1`d`G$EWm=j&+bbxuR! zVrA+a7RX;^dpZ7tNa*=E;QN_i<%#=(_l6fP`FwUS<}-D0t_xPCj;Ra{O{`|WJ&`5L zoqV|D>o}(Hp^Ab!Pmz|fV^lw|7t$*FVVUTM6Jf1L^%#|EgKb@|ILd4WG;^~f+miEt zfj6AsWJ9;!MS?$kM|KrPp(B6h6>o!y?~zU?>iEyJsuN~KLd#}-2eX!2^%{>S%xa7hWsR|Jjc;>|-QVYfW4Q&;8=HN&IexBhGou?3~y~AnN z$8uKGH00^V9{|wYmspQ85Tw@lgvwL|A^Hr=KrbG{zz!>adM0q!uz~V?wp0G>R~hQh zQ6Q|jk*8Jh*}>ax&ZqA)p^wgS zm?8AFLf>bJHXnULobazXdz(j#{BY&dY=|=RTy~hMOly8uD7=cTPdEzd3!OC)`J6Rg z^y+Ep;ST}Fdb#gRys4g<4)0E=AQ~STvsTtjY$SJ5X{%)KzCKv&M~q!!^PYce3TIdvUdI* zMcMu6&(y514c;zTok9Djhtha(FGB(W>Ei9mBHPTo5)Z>}p3Gk>v%gk3G0gmk~~-GfY3nlIjt<`0ojfiw=SBF&XRccFQJGe0QZAIP#v z=w4N7=pMEsy1nt4qyH{5j+j|?O0R*WBbx<_P_wY@Dn4j}x)#e|gWsHKpO47=q44`u z10m_s?(y5Bdw@eX8Jauhx9S;}-!c<^7rmJjez1IbnI!T;dX7RgAEu|o%m%C*YNrz< zrJM&(`fRmooiW3P2KUJ)w<(q#5`*Z!1QMCj434|d ze2FXJ2+6GSSbF&Fz)JrWasUAaWoR#1I6C7^sf?eT`o1pOH`u>A*vQOBaloGH*+`PLiaf>`JxktB1~&TE zSuZqa>vdc>(5@0Bo3D?a7$~x|*jQEZYe||l>WG6yv1s7uLXX@76o0yq&Y$zT$l>>P zM5m`eh2D3SUIy-Vj8}S%U6id;dg)+94i@I{odw((jAy}jCN^BAb+KpE?nz+xv=`bv zv|r=6a`?___qa+zSCm9_{EPU`PYm#Ydv_h*Iq2d$BR~H&d}sBmI{3~xaOb8hE7WZ3jV25vFMa+mFzQJ`QE+?1Pb0l8%OLuEx?E_jtX|94VIialp-?@};h$}J5c7!Vn z@)v9_58T-SGM4ia47~F-3hzA2z&m4p`QV+a>YH=;&X2KeY5(*9PfWfO-}zUb{zg%B zitoIUC~%=q>>kaNW%SCfvb}A8aZ|pJ%Kxw8J3mfMG9;;c7*RthqjLl z*7R0<=Tz{{hGrMw*^J9h@txlzvZ!mdflYVuouzdzzKDgS_|8bNKjv86oVf6Sc)o-+ zxCilU*0F6~UP~U>GxecA{%^;74tmZ5+8f3%;V$Z_V4s6E#{l~rCF@Xt06vYgvJvEFam$^wujalBf=w=o z+{j7#924c?26qS;^agxh9vHOdjb6^~!y|TzoeB{a{`1o#6Wat#fSPG}3E2lLJqDOy zrR1@12TVYGRVVq_Gx+`vaS3Mj*Nokci#=NW^yG4hNpSvAF2Na`guDdj&d4e6Jiqzf z<0ZGR)BNTi5u1^H!oI4IngafSWV}Pj=fS9PWf+i@H{y@aNa(ew zbqZ$+eG{y{E*I{?*$u9ugNkEDms1CjENMfN>BnK_Loj9yWkNg}F>p6kOunZ3DT_aZ9W z%v3krtrN}JdwMg~&4=ntbu0fIKytl3wyz*K1wntTG+3!^!6$`lM$a)d6=d-*4<_YN!~as-@J!JEeA>f%jbLzKj> z;!O`#wV%s_S>)z#)bwksd{T5%kqbFpF9j@_){N|Lt1{r99pFs|6>s{Hpo=%19Mo2> zwYheT=7n{JH=Pdt)&rdOzeW3>ky>OTCN6@2Cux5H-t={Z*7)(J^Qz1~;u7xsWvT6A zH#^%$9%P$UulQbtFw|#9^z>s4Fx2dul?qyApUt5aE>?%>`~C$eZaSr#^;aSOntP)h z&oijMG)wk43UR7`kAuMJ&cqzHq!5hS-1H*muq zKDAXZmZ(X)0;BpH3gUe_7mVuJJHV*ETApEtO7OED@Gz=>#Z?^wATQZfdpV=Ps=rpA zR@C6!NMLiQa3mCI;9CY<8b=Rk7K~sP2aUuVoa&#lzTFW%^&AO+5~>uR`hko@Bj({# z-#}5;rviHA!BfkywL{Qq@To`fj3|dMrg{RdST_cmTF!AvZWCiVagNNk7>3x>hS0Kq z%8FEWz;H_lI`#Q7370Lj8h%FeMWDwdk35WtP^V(a(f|*TEDd1QIqoXJwze+)!FQ2^ zw^I0A6_;8usez^zd_Ozj)SdWszWjqsE%4MV2i~=BBp!0IfK#8Fz1i`j1U|*B6$J8~ zk9h79_#pVyLHndVQN9<@YENWbdD#Vg>irmjyNXXOf$7>g+v8Kq!M+YYHL(_(O?PjP zPmRxNNzEAwEgjn)pE}+xFwcSdDxe;h>{sc3#YAwigz2FV}n zNpd^ar23=uksxP9SC)>C2JBeC7K?*t>+SFHY^E0v7(2MFva~p7Z?rMB+4r93+8e!Z zReNJ?v1e}_a3F0;cUy9r+8YjkAfoaqNvFsX}g`z9}8Y~Fb^;LA+_`sFZ--LMCc3gvfF|W34e{{;X-CRdO>irWyWA5;6rvi z9Ng?*d2XUbd$`*gtDGK%wlFIe4t2e0Z$s;d3)z!e`}wyNk!VPXQh) znWH@QGEa@SFFrSK6v=v=JwrW@Q`De+SubI{cobRjxOl8T=S4%4JP*Gy?kYiIWjjE@ zSlK7a;dzEu%?wFQgLqYBQrG!mgMBa+7f$M6!YI<9st`x+W#a4vm$k2$At;-3J$nbQ zI*C5P>bnc(>cyTtpbr{un(Q)D z~5X#e{PQd{}Xtt7lF4LEW}%#n~%2|{0%X6_p#fz1g~Yr!&{wej}u~9g@~*u z-s&hW`0!Tqu$DZa`JBC;KW+-}R-Yq&nR&Gnyw#oIKaSk5r>RZfIy9m3gL7u>_M$^IH3Fv8qROIgWF!(^|WbiG=el&q-4;Ir~5|opV z%DT7Cj2DoRES}b)jO5?J_k3K|uptR38Y<_bb-xk)lcV# z;al86(;%0s)hj9;J1Hi>+jY&qJ#Nfr!y>4>Wv4OV@I;(*mO^*a^ zix^YWu(lpzak}uV+QiH@?ct(|9RjT`+$>^*svA)Vv}(Rb#$iQc_sqwxoQN_z*yE>w zpgP|ktKW8l^GNM-$W+4cQUA{V9uF9j8mLUXNFSahqAivNEhK7_^GxX^BnX7nvq}dG z%<3R{AM-^JRx{nDb#jAn*g;YiFtpr&Id}WuT37J}PgL~>;nEQ~twA7I50MaP0t#kU zM9SLpAXtejxs@XwGUJ(%biFG0s|ONEl*3Q3 ztiRBh#g`ef_FyOlZ-!MJXYTDw0#s2To(uymo+Z_Gt^Ez@O9XHmN5gJ%0(ptOgsR28 z^(i|s+OO_En46zD`TjB$yhb=x#aWpBEHLo|2=MvUB=a}In0=~}uaMv$)Yx_G{iS5! z)U63{o-;u8us1^Ruq62Zz6*>>RY`KcdDd#UOwz(h?vHYUxmn3pOqG*YE9LU(wAXn# z-d34xcN6?MCY}WU;TWL+&RKILsm@7pdr+iYQ)f9((3-cu)DEP*HE$}Z?GiwL{jH4? zo50MApFlW*y1HYvFs{;M{8u(=j=udqhepsY%^i=X{64P~skd+CE! zjo}m$*6O0x@XX#OClVW5qSdTkKYZ)(4Kub z7eEq%+liOM1mb}R!FW{NYfpC8ta_OV!)2=k$&y;D_m8CF+)WPO(>%@D(e2^D3eH%M zUu?}UY1}iM9#0ppil1%GA0KEuBS4wZ?^W?Jl)E&ZK|tyFu2SaY?eSe3&*(~-DR)); zaLU2PGtkiEyGeN;%H0~z=tjA_l)>37@7`F^U6PIIjz;?jJvpz! z?I(@}13aKZ`{_FFCP(%(Pt#98^e^X8(@*{2tAsT<{WMAYNy?_5CTTxO+4R#S?I$Ul zeww8HBxTc2leC|tZ2D<*cM)8L4p;#rqFt|~gFef=h11fEf9^)XpU{+l`i;E?P16L> zg3e2iM7kD+lj*A|*%yAxYFdYZmZK?sNRu`fHCEM%Hp*P322OdA9{QwRC@}QeXLV8T z$(k^ly0|eWCos008IfZ$mc>}$iczuFzT#$52CVtPQaw$uA}RHbOZOTED!>{+1z<4t zrNfj3(vOs`7Qg1@KZ%}oowz7dPAi{u1<4<~0N*<#Xy@8cm+6ex*I zhECF^wgl+XQ05KFobOGZ@U4>UekjW@{HtYIN!%4r`B@CNM%9XpkJX7!GsmDA$TKU6 z8hHd}dt*2`u@)0p^UGH3FX~3hi6&I~gUlx?t;vaZA*v$5OfMzOqAI9UAQQ@JMP-nz z-aT_T-&e!g9=0qq24hl1IP(_IiO_1z2rl9}Ax8YVobAU@S!+L)bVT+Xm01$j7d1A> z*W@$W&;?W>aYqv$mkgSA zthve0r$zAThtiVtY|pM)u%EGOU`n%ugD0hG%n+lWxmML^9|HGOKgXa?x}Y8b z)Ex`dUZZCDO6XH!edbH)208WH9=0-54H@EdO0Gg!VccYH+Lvsh36lBy`52fdxxg9u z;byhwr~Uo`+VawV@1rQ5@+Bn8y7uo!;)#tU$suyKNwAD^W>Jr-WQ)%%3NsTk3!}Is zFHk^`@E?)Gl>iDspeFVN+F{-97tAsxJ^ z{?vFelWxtKNdl1~!zPN2gQ=u@8aZ*3Zmcy$D^vz1@ZXS;U(8tV}3?qO7zQzs_rYeTfKxu$- zv#);YS%48;XqRgqjvGx)j zP8u;bNV_xCtU92TjSccVKen?$1UY20+7NX z2jGajmDv{#!|w&KUH&V?!C5I^a6_fA2?PYkZYDZKrfnuWzd?XrEn?zO0h$RF2&4HP z8rj$%d3ZPIxWtCcaWZbj@M;NWwn}ds4ei+v2XIZ4rb?j5UFKyfHUg*8gur1n2$D#m zQEi9K_Vl;ZcIb}jFwTLx5g(_L^;h0jt3h!;mLPID+z%85IyuJ*LH4x_3lzw~Ub2@E ztJA$`D@>vV9^!}SEJY&0;%20&dBHHd$!`DL46|^a$pI>uN72DhF^{!Oi)o-Lhc>g<>HsU&rc7IABqP_EMeweYqu(UuHIJ~zpeyv z4O`O}G3xt=2}q|G+saF55{l&VX140BJ&Wvm-rxl8s$#Ye7SW6hAuvTvjGuJ3p87pE zY}Walc;GI~`%3*TePw|EE0Pb~G%eTN^1ZtyxmSCohbGhkSGuvTd`kcaR5=Zo{X)&N zyOGTL%2IPrMSG*wGG+^L6<4$1FfZ_f=57!XF#P+ywJZFq zwRCjPrp*gzndiK_`j8Z$3vXqAN#=@ z?A0+N&*M`fxEt)HPPyDEFL263o$_y;@*pj5O>WG~?;$n@>3yYRe1DF=42LEZ$M>it z(VV_E`Zd(`JU1S?N%~(Nx}i8OVCl8Ay(rXAF_JxfLI2jHA-wk~-}YKz`3>FS6!eT9 znNLp-6w03rDf@MFDh->GIjJ{&pX08qGg(_mjMSW ze)X%lHqe~!!L}Mwv|PQ|Df6{F-|bv(g>PCcMA2p2#dTD1&|kV+2A$3_cv$n_gg6bo zd)AQorG45bAO;ZzYy2hA^p9B=ezcKHKRs&fULq}G$s_S|8=trFcMB4W?84fkc$rBr zGnM!4rgZ@XUsifwGp&{6@rYr|VW*FYfhB+_9{GvgaKWR&Wc1o+@OqOO@14AT)7B2F zsOW|n?h!V^MQhI<&kd#h_9pW8r&%+)P5;Xvg$j(kXqD9Qy8ukV4Wf z=Spk&pqUP7&uHXjqZYbHogZ#={SBev?>uXadD(a9moZH!EQxGB(3)0fxeVw7bS1}jS9#! z0#V5Qx4Uyc-S;;&~rCs zhn}DFvw)uK<-Nfk$m8Z`2RunGZ$w(%tSb^z<|IjFG3)qr!%skUV#!q32Nf zT}!>ZbC5jc(la6X#ZBL;B=)rhsSz&%sc^aTG3=-1L*`;`{z6`&%weg1{xK0rF=|^o z#t-$$Vg9%Xtz}%e`q7iWf4{!W$Nu_IYe{wuJ{eR%UjJdusqQ4Hm0R<>u&epoK;l}n z_e0-b6UHE7{9geJtoeOPB`Nnh1#b$c;<8o!uKlYUG2Vlvfka#)CE4HHJ=}JxV}{7k z?dlE$LgpBR{c}p;bhtgpQD*dZt@dN;6amPOldXR2r_YO}_O3l>F3T*A%0hFI_X<)$ zX+RRCNsBILIU(C>(~B7*wf1JtDnR>YQf#mI#)GDIC%4h~@!{m~QXoxAhYEmVns|GK zroy}`;7S-pwhwXhVF4@D%-p|ek81$uPdC$MG*2LLJb#7;5*5hLIl(TXc8H&6lD77g zsL`&{;&86ThtcBk8bKs<53=X?(0Yaj;?K#2a{FN}aG2c?ry*~Ovx5e$;cwyit(Yi^ zQKaNWwHA{vi!*Gpv`w4iZ;Mq1of5QPE?l;+M>d>U$eP#4l|Zw=TMcWJr&xA2{DNyT z6A7bA$jKLO!HBi?U(iOnUf5taZgRL_H8k+Ja>8m5hc(<7%jPKDpwpEbF}P7hZxyhE zbJP}3HdC3~|NeO?*j+w%xqKelW!=R_EVHdlOpsy!S~=B{8L#4DaXI4 z;cbMAnIBTLUtVM6WImImu=hUx`s78~%Wx5iFi2$xgUDj}rf86yqv4Ym z%}S~uTsgTMfnn#}3UU`B3qJA@+Vp1ns(Ly({0x% z)2=bJ>vY-CO5%v#=UwQ*V^qw~$HdkUanW)&iSr%(> zzy^Ez7Y7pL5~jnq>Fs)x*d#* zP4>;ov6f7vU|fvu>hRa97kz4YJdBGOQg=G+C*Lz-M0TdKnIX)$P`kj@H%L&P_)PY5 zRV7DtkQpcfB%!==HIK60p02wEIxtTC+!dgaMu0}cqTU_2zQ0p|j?8fg)lu%q&osEA zwZxw?ZJWr2?4$NO@0y04FAf9b=VJc4kr(DKA6@Z{Jk%reIBJdfp>8ddjRf_u@8g=T z9D-CRZ*c#Y&hs}p&)=H&{1xu=uLhuq*)hJG5L99>rYvi#Pp-#jATx-$G01*I9+bJn zTqw7H;XJv}|Kv^nC$Ds#yvzUO+5RU-J5NscKY5h@$pf4xEB#M)^*{M}k!jI>{wLqy zMnQ{~I!|uAPSy{yaGsFd_S_&wg&hN@pN_G=L`@2G*ZZPjlMC&>2nu%P0E7gC;FfLj z)CT*3A9-=xabA>Z>0av0?@@QYPmlWZiI{jXBpY+4y7oj6?iVMH4yTX$1=q9tyY`l- z>GY`4^6W74?8w^Lfuh7(-XK6PJ?b*Kb(py|s?w~$^4GhBcOg^;$}jseFUKP!J*v`_5~ei$4ZU{>C5LF$$LUescsYkk*hw;xV@#{X9k281J%-ft^BU|y&2<1&7+LPQ8p4WvYLC;lE&Tk*r7PBB{33R_5 zD6-if&Q+lDdDACDnW*!0XnpBmF2KpRMNzS9SpRqF5GBy6e@r{)vI|vzOcgq<`rpen zhQy1M!%qc{IO5bXvW7C#4_y+)n{5{aeeIHS-gmyoF26h zhGVDEh{kg5m&abvez{gksbhb}gFN-2>Gq5DVpq*=;%2p1%hxz}sG^Gd+VjHb22lkM zi4|^DF63vi0Ea90;eRLhHwvh>dq3UneJQ5<&e0X(9_Ohw&rIH}MXo1{3nLm;{^l`$ zQlp;W{;*B9OKD7*5-7tQ-;_m2-qjppU{S-qi7UIrYUuMYCw zn{>diX7|0<4K4ETPTn8f`TMP>4l1%gWBcI63AHy4qCd7le`2rJF@pjyKpQ;1zdVFa zM_Iij8bjZCM&8N)PhL;#C$mPu%Y)3z)0*`C7VrJ*ck%tf+rMwV>h({qKj2nD{}hxf zlAi&RN3_iGY*oh(5*Wc!>)C#6#nx_}o+wrPv1E_z?z#DQV>Hy9z$di+`IHqzpb5har9uFC+_Ii!Yri>QZdYSxZe*J{Pn48kXsb~FXE-zl-!lO}wOfB2 z57ln1jt2>D#n5jKJgc*7vWI=Tj9LKkkbxfI6M4Q{?TD%Ik){H9TJf54&kuB-pYJ~Z zN1jjoR_d(c$R*l!Z$bSA6t_M8sQdiG^8ADOdq7c%4}fMJ7sSWM_lD&3Y3;RN^{n2z zZe2y~)_tpY$Aj+5)v#sUuS}oPUQxTHXZ0|CJ;$$oYxb{OcRbsw%fi;P1mO1i51|8s zs4=lzJn_liQi<&U^jQ|Op2h#%t10`Yj6XJIPA^tx{nGd#<=4#kK^1>vmgMj{`FGf+ z+O3n~qiVN~jZZR^RLazGCG{UK=Tx6bfM}rNZ}?si?2Q#q$q(HGts z|Hr>VcKknn{9n!w@IBLev_17rhbnziCeFeXJmrCJI%I_}u2Z6s58WVWZ@^F6_Y+6L zk!;vW!-l;TO&@gt4;nv++>RzQyxfgz!d$`8;2Ftgn2cuY82ByuYu!yRW>BlSuMpCX@A* z-}04T@RgtTl^^ky@9~vWzVd8e`7&SmJYRXNuY97fe1xyOzpuQzue^;(y`cTR@>{<0 z3%>HxzVai!@;$zC%2%H4D_`a-pXV!&^_5Tbm5=b1_xF`|_m#J;_qE?we#=*W!B>9T zSAN7-zQ<;#5K^L*v8zVeB_@)5rB{=V|=zVfzDeeL&^-}04T@RgtTl^-cA zYczuKk;y7SOz9KHl#qIx9?$N>gyKl556<`!qhtVOkEV_%4Mz7zmW0z|`g4**S2>F^ zl6(n#DnN536?C)x+DokU*p^}+1?7fL43#d!PKL zKHtNwFZ5?E%U-sA|5D%gC;RAo*u3A}eLnaHr@p-5pIZ+er|Zdu)dSr1q&0s`&yii? z{qWXs>bJYzVSR1*!5V51>z!o&I`cW>&j95*Vm-T0?bfOB5zb;LMBfsA-%5@lp#Wlk zi+xWApX@D|sz0F|wyN43de7$mszeV52f=#w>_BpbQ`=s=4b-{R!@srI>qWI+O-;NC zG$`OWs}g%>m$~#3OfB`q2UyQ`Nw(D4L+e;}tdlGB^C%5yb{NUh>FPfj<5w;oSiS{U6o|txBrP9&6F|Bs0u-?NWa-xwut|w{Xk7+z$AMxRkXbEp2 zLB6-lu6MpC%Y7pmiC(O|LV9eS>LQbji@76$T^(PJR+Wm{QtzCKn=Tj-!IbHt#w+!H*La0jO@-rC`L*v(>6O+RdW#Li{+sBf-tVNh z*iSEbu4?m!Qx6p3FeGpT(Iq%Vq6=>#m$#^>nj>4kc}{NHEsIg?44tFG$^K6bMMp^B zKwCM%H1e}So>{>E{GL!O5+4qLJHE*Ng`MB!oqcam=ifUc|9gY9;$8XP_x|1Qy}u~0 zeY^g>r--id#M8l37wl-!B(n4TrEnM(1TKc%dkIP?b)T?`4fbxGGJJ^P_5_ETA9cd* zNFKLkx?VCZVom3$sMW%HnYvlytRZWN&KVjA1SIZFo>*-4L+9b*<^nuloUSmLcZ?ckeEGQkN$CVNhEc@q5ntUN&n9l8g6z@|6EU6gs>&j2oAx{r+?OFrGLn- z(SNK*KS2E9)SsmKaQXtPZp}u*&JpT9W{ces>Ys#f+O|=odnlVdCHC4DL+b@(2NdN* znoeU!p0Oa-l{0S=>uCDtFU~vP3nvHbT*Rp)W-LUK^`&0(NJwNLD45eC`<9RLMMZ_i z;P1bt(~bj^G{IgA?;@#twU&H2DbF7$M;khhaOk)~c*gmtrt55gNiSo8r*?0C=l;n{5BY~(cn%j3IS^M|G1EkN;Qni zi{#{bESq$n+i(S1&tBP4w|;2&B97zkGfCcI?ik9I^4f2ERJeSu9B(RychIu7%Cdb@!LJD(*!*kNZM*a?q?qlKKjb`231 zUk-HUI4AyCq*(CfQkSq9Fhe3{%>g->KcseCD7%8`USHDNWc>Zd6Q@mAY&3gHX%E5 zDwqwpi`(yt!B?xFW#bT4(yKIb)3-X3sw3$eO6>dagM{TkFR`v*n^mu9b%nz!!myI~ zuIwEA&jRfPG|dKq+tCW`p3PFnWtd&3(s;KnvYMnBp|+aQv@&x(gi3##L2#ISmQMPK z`L4W_94sKK-jG}pPSy#ZbcV`wn7zNL98mjIl|+N4kG)8(|55ky$8Hk}(tA9%klS~g z+m+^a*W0-LTW;Hn-q!&TuDh|g$f`e-9;rCHYFLLE&)=A^1F<6M!|LMJ%2JCo^nXrk zqHpM{cTP(kxkW|@Tej|q*~64MAdO3dWt&#|F;dqSh=x9jkKxoat6t7Hhb`<@Cc1=E zCyGUSfw4%fdAG^Eq(mFG+M7gZWqBvIu2nCAeKNOahc`u&vx=!IetPY7r4~R?|Fjx@ z0xP3pU-K??O4S?{^jmjbXFql1AYuS%0mke4X~{lp5S&^ZOm_lK(Ry zKse$9l|vVF)F_FdBnnC(XaWgNa1cDWL#nY@uBxuCuC5-Jtu6_a`ykSi z3`Wr?F`CiFnk0i2=I@o}wOhPOP$z-0RUfs8X};za&q>ZSgE`B!OSE`)`-d8NsGyFo z_|bsJC`nO&j(FPh3+0q>54uzta3{vlm%tY_%Te90M}oeh*ybgiWGz zLLJHrw~m6CN_H-}98LEhXYjdjj~mSJ@G(x(bB(03LPV?rZ0pQgjc#QYuG8ZMfG<$b zLB8X7;j{H!>obfc)9T8S1Jq8>MyR|omDtu<1Lb%Ln&=aLbrW}ID$B#KZj>*>1BshU zxa3Mi0{nv`(2yV`J2mEWqbr!m{Z5C4!9Eupet?dxeMzk+ldo=JrDV^GlY8rI`L_m% z;K(bp8ZkPMufsDlal7LxEZ}cPEVV4L5|R5+w*oPOxOkAU1^8DA4)SLN`a~YXE2Lr) z|HaKL7mWL@YQPO9p1++x^M7QfZ2XsuJVP+mdz(~Le~rupHK9UD9)oy9a0mpN#VPiu zql;3t$rJJ?{sbGrv|D>SG>ggH!`g3g1pOAsypdqJ+O@9N;+9vXOzl?lyU)Bs4nd{~ zCzq!{#U_xWGX5<@wiNP%X)cFPM2F#FUNA8Z8`0b9l{%D9aC!);?prVjC&aofO1?;0 zXEFqTNo!;nnN9~_ZX61!oQr|L{u(kL<_6Tl_EZ=a5cLntRull*DQaDN9L=4YtmaoC zRt4|31tR6y*i#0xmOy;S*mS^@pg3FyE6ly0dOM5u!Pv-Yj`goh<&^Y5;%w*})q0_B zy$FUTTZg_duNBHc*R0@BAd6&ww4aH_`e*yFeVsVS^Nad^Mw{BTX3TDvic*N1IISbn zvvG0p3&&ngfT0a4BpS+C^>>n0KQfGdV}65MDFQy`;+0i3`B$&^HDcz!WX!b&#J#n^ zs`hoPOQroU%%*PpOg4!jZh+wQeK-W4XcPQSf$R|c91{GM-o1f>(vE`?jFbl1GaKdu z#Re%x5%I#|gpzwgq0EI&H%A z3qn(;L(XeX%l12{wy1|{2R>;4RU1#oA4EP%m@j_(2k>p$Aie_|#dl5f8KLpLYfj7R z?I7B;9-@aef~f5W(sWJ3x9K0jw-xctQ=DZ~T_cl=61v;4J@29Mj7Dhu@{va(J{jj5 zYffwF61!EsK8Sv3fY=_s@ZxV>3H5FE`-mi$g+n9!+pIx)fBaGk?lynCJj*XEyry~R z;^|X?be2Q(X7vz0q!FSsK0@)a7hmwlt^aiR0~5ueRXCoOJo=SeuL8QGaVeHH%3*Ay;~uZS#%vDO1OmiK5|v~&7)7pCYtYou%cl)_&h?|{?NS$hC; zk2P0bl_0;(OXpy}|hh8{#^rwMODsR3EVY*QD9aA6 zvRwLtM_H!p$+uo4H>B#xRj!`=>;pY+qKt^ouu_$|58I?`acqIuh395voXri<$}?e9 zz5gKHyY05kbM{!x;b^2hibY#fe+&oWP;wLsz6x7 zS!dtYVm52(GVJ?~|Dk=4iSZBH_Z|P7eczr#YLD9gG5h}U_q;6gf0KS6JokT>et&(} zOFxzC2)QyP*OAf%Do9~q9ZnfT^2pIf^dYJ;2p{B2%Yw+t#aB^Q`i{pRbK|q=vhw;J zA}js*_IbQjvT_XVLnSA>AZ|zFtw&B0DCfwDElp7R$k{e3nE#WKV2E?F-91-Au16!p z;{4Jv);Xvqc4so@1q4js_4>IEs^&>}Tl*XZ?mx1(W!iNO|zYld$=7d^MAwMG$m3%Pc3`~^bf!by+dKR?q6e9G7kr ztA(*F=Ja}BtwVa**7&5qr}X|9vTW^j?tYJ2lQ9Xa+J6;(T4zqc`GmLNLu*|YpVKMc zzoWUTLOBSPe^6aWfIfV#Wov*V~1NHpz-J#efy1MvYJiFds5;w!@Z zM>kOg@9V`X5T2@Xp5p4*sI#jyvdULp^bO9}crBS}BF|4Li)H3~S-DkBG^U_?6HARL z?@Tl_m{00%% z+^)O=ZV531oDZdeh3;4xbA7HCsncXNX0I0J%f0JZ>zbE7nL?p@f)`B!^lwPT162HJv+OX?xFMxk}!qaZ32dls>kT1(}#dKh}610K! z_$CiOs`0hTXoGHkE4}-UJqik>^j^lw>HQ<@C3`>Fi-mmv1#B{L8W%>|m6(_S2g7W# z30P!Kwq$8v4Q5F8=moSPzVv#}=p5uCR+j(YE=J!{?RLE{@7Eqs-f8sH<3a(Rfq~r* z-kOtL`>I<1*YEFH?C-V>wKDnq(J;jjj1-caSIL@zC4sVK0RXL}-DyK7gqE${_ zLn>#;epQa^_i*&h9+JkL?UcT`!P8$%)kYDRQ3hR@lENKvLI4@w-* z3u>a|?_lEU>_Le~=~kC$3i$Nk_HkXbCn)E9{1Ko;H#D=Fz}Y8mT6zo}=X{mpnRj;s z8MuKB3WOJNJF{snGNhHlmzdoFj#@eyk?aFUmj=brL;9`7513U`f~L?ziJn*8ROe=$ zd>Iw-lWNyvU4iW-GXtHrXA3ejD?k z%c{qMr}0U;i;?RmH6In&^Ul0UHQvl$P9DoPSFx?h&?<#T^nZTKHJCiqX;4J9FL^T&(FNZ3ayxcq9SpJ;Dx zsP#Tx#%n0r?R77dY#ULgzatpd#V}zgx0Tjp_3~2;>yawRC#l{RXBuCq^+*6RWH+{$ zI6|bfA~}%_Sw*0S=+6e^$-CZ{`FyE)D~>SA9Ab-=oO)G0M{IuLsEGmg!>V=?0;0mb z`eFYRfU%$8VPZ9}aW@o`RlJE={Uy(Rt*L+;mWTUzWmh2aglq;Tu<4xaS0I@$`$`yx zjw;yu0Qv%LI0L)Zuc%-RXW!c1AuAxDczHG)CBfVl5;(w*ci^HZ%WWC(eMgZD#;5xc z-!L)Wucgtp9|W*TO~35y1p{#c?btw{{@F(CBv8oW2X;oR#ZBXrVx#v$Kup*?1A|J;sO)WC=#dB@(F6Q>f-M-w* zV9fdXSDb;#*_%G^hBvV(4jzE zkXY||=&N^;4wD&L%*guzoP<#3u&lwL|0Qm8gl$GYNn+X551mGcwXeDJDv(l6=x9p0 z!&y#J*w(ips2^MQA*s$)SJ#TUTI>eh&hWCY?_IEN1FHN`L>ugo}I8T7Kj zHwQCrb4+UXzS1arpO5=^*?_PNUz{OtoFuXcWp>jT}Nf8X*NXKa2)qA*M+mC4#E`ByHOR9 zxpcoG^(L%MJYpOLqAVDmiH|f%b^U=mueR z*7Nv1-PLcxuMc_To9P0a$Plny&GdV%=@j-A%g6!^`6E)tzcmeMXX6H|VR!uRNulbU z_V~kue%U{2!jubOL}OiyYm~-ZGO;&BO4uQooYtCQEJpNObjS{^0aqx$<)7pSL>3v* zKgtic-JOE@77~`4LB-bq)PH+?P2>D9$mR#17b#vu#Y7K(h97EQ@QgqC}J*2rTVV zIc3TPlP4e=xHV!?#B8C{hbum>(d6@e?)C_sYvh8^kcMk zoa!-Te!N|f(S{7}5S^3FRbdwun!;!c%xCI6M~eNEQVn`%PPSDk8k7DO`pZ)6AN;<_ zD=%UXSU*!_?n7!0$8)H@sQr4ehFs&vo+TaQ?--k&$0=OwK)yq@ip12Ir!F>+f37@w zf81S2Pu+~!ujG$nXSAbs5;DQqp4va-bsT9=>2o3mm%gC)I`3~q?@gq4cch6)>4?y+x27PvPYY&kZ+rcW3DrAq{c6N6z%ba; z4kHU-BEb#&fQpj{#2bqXy%=$!mu6c>$soetrvgj&W5-l@Q43v^HgC<{j~+Wl+6bdB z=%?a?zk&}OzxG3r@A~%3Bi0qDXJ#((>NjtE1(sx5y-Y{QQVoq49{Muo8+zDbk(oE& zq6kXIW)m6qv$Jcn&|VW9a$J)!>VgX|sVT`4t3@x-nu6W@R9$*`lCJBe#y zbsd~IVxH0?mZi|qx~|gJbsovn<5pkatz%7Oqq4gZVe5NY_Cc)Box9K_6Z+-aLF|_qtO;GeHurW5h(qZjc;PtgyxZ>l-}? zvh1Z?Bb=h=>r#r` z4bL^gAD&qe5%Z#-|3lU6Or5YX!9XxYeqQM5GVV`vxhGF?57jc(5}JLO&tJ38udpM`_=r=K@?IH)WY+b{SEJf5PTy>a{hbNbolopPIZ7K3yFXHFledB@pF z^e!?rf6&|OSzp%v!>ccpo*vEl`s!!u^MkTCB_ufhU-6%bP9`6GhsoMO--yaxAIdBd zEz*epEL}NYZ7!7|8yCeHI=|;H`nUBIWxgl|r{jhTW zQ(IMBqR!lKKZQd3jt=G^*i30exvNBZ!Q9|5X6intQi#($qq32>JM$i*@?q!mV1%0Y z>d%vlQp#iJ4CQI&d*?}|AJ0$Y{~OI)pVE~7FZhR3x7Z#2ZmH0+XGxpCU-4r}?b%{S z#t7MB$1pT%fn4Y^dGy*p!sM5E$azh|OU@ZKldItnCKtP`K3ywPDk}cwIW#X_o&8%` z4=ikizq>zKpT8SfzY1qLlSX@c(s+&CcG;?>XVQS8fG<$7@W1gxlM_?@`ZG=w&cyRZ z^i6uS!>R62>{ym=eWxx4eHrmp z7N614+;|jrmF)fE#^a7~J*>wQgVR937t`J^?3InU5nZo28M_}#kB4R;C%-)7k}D=m zop}85aI#K}^BgYOb}|CVW^}~)zLtSR)4fmZtt(5+>BOUcSSf>>Ff){AG<;^UEO9A9 z$*`vy9e~!#878uq6993_zzbjHIEz-5TAvADnZZ;v#qj|&ym4fDl`aknhLJVgxQZeB zI>dMokMN1od{(X7XI?AZo?!coAghruGQMIhOi%`Ed9IZ zd&nBPf|YT#beHr=N1{&H4hiS9<%Xfv-sZKB8j!bQnOI7A0PoIAZTq^3`5ByYY_s+W zv)`lK#a&~)g&j!O>0$E=PYt>L4riOKo~H?SWhs7H8S`|^i2C^)l!V&)FL|1b$X_w~ zOY>ziQm#Mc5P{_lM*$aZj63vL-fI2vU+|0kTe~}(vY^NFt!PTO^Z5$h{{LWi+g^sM zIOt)ccU&rZ*YcHGyEz<}W`C#o*S?Q<`WG6+zFAX*w_N|4wL>0+0bFM*O{+BPeax0R8p&Qy zM`ZZHH{oDxVB9)ZYz)sBwy1fV4pwNJ^^bDTB+@lVB#qi_l9lRWWXPow~Jvm zvq}&Hhtv8dPaCgf)C>|*2T-Et)<-Z7z70AVfI4j~DHYh1rUvze3Yi;rHA*8)=P(>& zmoS~BevXX}mW0A`8x^E~WfL~)YJk@`Pv@9>x( zZZsvr42JuBP4j&JseT;e%M;rNuHgEo`1ThUq2N8A0MTn*7|b{U6%>HDtvO7;#4v-) z5DRby2nnHef*{38w!Pj}%Y|4fKHwH|s1qMxM4u9B$dJ4Gd?8HC)_0X{tW!v@%=<|%(N}W9&KsYypY<<{GVl6# zF38Z>`gi*A+HqVpOyte6#D`xcoTe;ABYLJto!Xp&Phzg$US@zmFm}izvYNn*$-SFN zfy6;ePf<49#5itZPs~Mr6^XM?E6(9Fu35yP5jHfcK9eC7R-_aAZK6*jk!QD!=vo;> z%zhz{KZ5HGql(niJj{x>rdH%dIl{{0L0RwmxkOK7bWvZR>S+6T49R9aCv05C+B_%j zOlr1Dl=XCNwU__F@ATh$>?iB7)cMOgg5!JSy)wRxHk6?S9cQK9QHTrAFi9~nkS~Co zi8#_}%pZucKz?#ws#Ou+ip1N=ev6G|!+eolEsO!x_#OFfxP0GOy(P=H0k1NXS8PZQ zWT7UJIuv>bzgdXiKSbt5-$uO3GM21kmhR*gEZDRZS@TMb0Xz8!W%bVu)!%0MKE(TG z#rKtAR^oBx<6KbnT3lVXOG%jSN%mB-_P-y>TeM4vm(A5 z(Z(%)T*e#>3*LJLy~}{Q$*60wb}1j>h{Re6mj1;!x7zG}1-TjA0poX^DSW37-_sD5 zh38;fk=;B;vyPo0|=?j^En00A+wio09xwP!r3;ZO5-QQtjhmd{2?m1%cNJT{|4 z3y?HEb+Gc>!3yRt$6&QF=AQ>6wG;^tF~Ars&}f|+FgzR5*og66CW@6M&sxmyO)#2e z#(<69p)Cb5OMRR8It$lyZz9wyq;yjm`(56`GH5K?3>W9sC8v`a__KLrA2{G`JRmpl z`v<}ee21&#!47~rU5d6S1z7OIPkbi)AU`+aHhbR(EZ-8Kca$a11=b39tJ28tEy|4n zZ}Oqk_?-aWEkzThzKyKY8knz3{uNcul}2(Rd)d*?@Q4WQ1w!waR)2-!@3Qy-<;JoS z!)V(&*JvBeZIeSXa;s7|9ziDYL7Zx_B&HA_OJH%$+_rP$X`GkGTQrWkPfHeE&x$Mi!LJJi8ruw6v zHDi_F-)HX7=Lz=WJbOog{{?xJ5_35X^2Wz}&}tQ~T(g&`Flu{1c|+Ww)II05TjTJzHdp?LKcIVA~}S>dLxU-d!b5S&K2u^cztgQ|t}=UTwaA zyEZf(`#ym5HxO-4@OCdKQyg+Y^s`LW2OIOJK|YmuQh$NB$&27M$|gAJBd-`~R>a%P zvIXbmwvf_ivGjf<6mPT27T}QVzPx9^-a1r* z0ODzLk?Nnk=!odyRd&gOP;Rqjm~At>uZ?Aa7Rk#uL@cG}-+@f{-oV3_K&Gkir}wR~ zY<8S(g1K2s2|ler8KC8qCNF{Hp%Ib1^-wiszIXWMfRY7Q z<~Au$Ua$d07W0Ww8ree?@Vz`l9sY)fcnhg>%6#itq)o|!Nx7Nn@9}1A+j89WhnBua zt-Y-EH5Ey@?+d;>5Q^a--q+Tq49NPNqI~4H%a=h%Kt=E;kXT(>y%nu)ky|4dNX{In zZ%k%9TaZx~%FX8g=Ib+*z>c(Mn?#?|5q7>8XUYlM{OgC1x%^u~{&j$^X{imUI{{>5yMT^m=hY-^r- zUSivhhgB-}tA&)|j=kEz{)e}I2uniP#lOS7()j*IJk1_HPIPw|4WmLJ3df5a%YaA0^n-o-D%P-1up~%RqHQLz>K1QGUvw0NCQ|&`A+uiI}N_r zWZy&jA!EWKKv3iRPtW)&zhdp(-t|c_#>c$rM$dZlS1d|&j<3OfS%latT%QpQh%TwW z5O+=^{>B14jd*}`ffITQo^O1>Ye zMb7D#W9LepYn6>e9Ml1rMVKzEe0T>F4{PzT|wL5t)lY zewjO!z!WMhs~51wVz!~=S*eF?8*o$@5e8eJ>-mAi=^!e$D#Q`tq3lUyUa!LqQ-*|g zRUv!|I5=niUL@YmnLi_XBVY5}pV}ZUK|QrD4oW@c6A9}|_Gp|ZmCCcN7d`x;>t%WB z1=8x}+Vu)Na;@={djMTLbJFm%C^W?La%mEH!c&4Cki&}^I%+Lck+%!wHP#7=)r#f^L;5_ZXIkZiLT&=|gNPMc+87 zM?S5H006d{Tr_a+TBi&#{B8Z=dC6lu35`*d5g<5}7@dW`2(YpJTPnZ&b!Ki71B2b6 z&p}#O`#bqnkYVXFgKiMY{A} zYd}Vwbb2S-2aQg@R7si=8s-laC!$|u*P1Du0{@Vv34{{okKQoLuhZR1-4{iV#ATK! zZ;k8g4|*)Ka?)c+{k4XpCt{C$gx=l3)UyhHkx7|9%@?q36>trN14Jan-h6pRh~}Mt z+|((|l7^CqY`R1X6! z&;aYdr#$1!_hc$_#kDe@(VqBKuXk~NegIDXJ|_KvoF=C=gEE5d_+Z9x*b7TT6G*x7 z6DCc-b~8?SRAz_g9D-?eKr`rCRD1^&^|V!Q#yX=xzSHORjH<1ItJ1?eFZQyO+F&GG zS+m`KIsB9u2A(+usLD>UzU{*`AhW7fxs@g6wPfcXp|Y1%#8$w+Io5cc!xU2i>JfW&$Sfcpr^ zp7GP|_sz_w^!u>1%=fl@*z+rYALOxf_wLt@Xu`T$F?A0O&*rJ5 zz=L|-Dr2MV@0nvco@Vou!XbDiQSdetoVYNEWxybkjq;0iA9&pi+{bwoM)Khz(_w%X zzNn3B{o2+0T=1#A%gZmcPt6q(Xa4(h=lxTU(DfII#+H0<4uETUkYO{@wDLal9TU#wutHv}*Ik+kA^9vAevUF_wIh z?ufFN*Y@$dX&#W5c|nG*W@W_}XV0stm|CehQ z+V-y4CpUWMKjr^WORf_13HHigFrMat*Er*)q%|=N=7%4hnyF{Peo)5OvExNQ8j%xa zJRpDZZ8cdPM$s&(r#(|AFp<`@Y1L8K4+%KM+apLykG{k?ZeZamY>58o+9DMP1zcv# zpk1NxIQ|ddfg&A|y9G;&h*^pWri%6$aaVfYNd#v^tMrR>b6(;{cTj&^^C{^;8GCR{ zD$;7DDS2jmVlZlaeX8CqcD+l}>M3Ja*Sm`{Dt3ZvkD;qB5#bVYQ(S`%ihvzbVA6Sm zO1?EO`}@*nLTc+w&UUi=Z<210DV1fZjp`DP1FZ+ElZZTjL;s&EIwp)R$nVi@4j`^<;si zqWxq$s64h6Nw+TNOqx8d8TRmStDA{Q*9^j`p|^;+n+3&$X~&=9JggS7sg=&;)OKj# z3yQuqW$|y&Na!GyKG}y{L+vsawC+&!}?<9Mkme2Wpv$#Fx!bw;Aq}~U!`U+EofwWNnKGvQ~ zuxg)7{5s|ZyY;}Dc)Pb6oX%A}FGb>0^k2=^agFy*FCEI@c zGa5(*POTu#=_TEUMeFg$ACEvRM>-k}#A?_vm}-Nzl|CqmjEzd|!(XHisVWU3>G$ZR zSywiolR{(out9yK{B2LNH(Cv}No$l*PE-X}jyrB5&v(d}h4I?;BT!{DqG!;&>ebWd zdV3YBAn^KnMz7C~CtIoY8LsdIk4SscK9dW2e%C%?#^#V&@Gp>PI`2`ovatfUohRE} zd^DIb|EFc3ANta(aJh9TkZ?UXrNfa>7k6~Mjby(W@Io^&F!T-5iAfa(ySxi9(~bcc zIZAijkpLgW4TfMt^X-wyqL=7^Sjj%T;XRpiI?LgWPVie6XYSBsyFxrR7ol(&TwHjN za}*ww!c;9UYF_zHDOd=|&4rEe6|NWx#6O~AFxO4zdl72#y`UuR^gRykB-?$2-VI7j z$S}+6l{m3fO1y*;W*0#-D;Gxe33Bi9LwKs^7d9*RE+_HKlb86H5$IY`r`*cVc+*H$ z#-P}a(2>Y~#oJC#1~KI+!opIPGt|eh!IL>Jl}PJFE==~6Ae@DaG=n2xFK=UVZ#)G#I^XdysFKW?Op+(teQ>gaKEiFx z9%!x1STX^kIT_i_FesDTn1iv#;CI-pA+#ZvpVXNn5x4{Gi5KP+I8|-BAe^9GFRYm~ z(akT1gCLRaSTvXAxcnpsY!u+v>~PT9-S6Vb?{@h>)`oj6 z;KFqSy*3o)Ck{~t`09b7McDaF8oSN&NF{OjGc-;B4Jh|GFg_NNK=1= zg8v|61s5t$ZMzX@#qQ8j)C{6S4&_Jt&G%v42je>kYWKm^I3H``PXb^K<4vqzZ5Wk+ zL1J6ii|Ch-o{Jw8dPa_Ahk*9hp1M58y9^b=b8*u_GUs`a4wG$D@&%IM<1Gq5ZTGQo zx!DnAt-t7bC(-##1$Rq1`=d(7ZO|zF9ee}i9^!t0bVC^EM0CUY0E6U=PYVU-BD91C z2LOZ?C*t=IpkVea02I&$TsZ+bO((ZO^aRu<$o&GWX|XW{ft>9p2#{SELm^2N9d3Fe zq=`KOQbUOpW5a}`T@bdc>3Ll{^0hGYJbbl!07LP|B#yp>iO-Tb)BCC`1AyZLcxzL< z2_UAA+TlP-H&J$wchru7$XMl|Al8FNw^eXk0dDOh7$=|ly`#(CIx9z-r=K9v*;*v+ zKS%n=m>lJ4K6(zRJjgGSKq;U&C`lN(8@xAZkLm~DEK+d#o+2dr9HZ@_b1~ph?hnC- zEBM3aIf6e2k=|u=(A1gXd+X}7;8)_Sm5nw4zf{4$zEI#_l?wk;?8iZV1W&-g3?@<7 zY^?|%mA!&c`S8%;R$7c}K>)w9%@4x_j7PGIS7)9E9I(RVj z6A*5N0@Pd^%(u*e(zI~=4WO$l6>aYam3n}AWPd7|^WbS@yrvflI_)08@T#uRDloI0 zepjmLVl-qFa@h&ZYk4X~7NVGhL1~P?f_FqOjPqpat{#8& z{4Su9KL0E(a7bvjR4u}bt#sV6Zfk?t+TY1%?);U^nLxpQUKp(e{q8Jz(~>#AqJ3L` zR(Rk)K7=D&Z+=_Z7O-#I3D`DYuDeI*(gDMZQEmnh9=;q8C>RhZO-?Km{J=JkiIZcG zwj{glDBMJG*>(Vc4j`dD^W$X-?!A}~ths2=Zf~S|{yTK6)zm|`QuVw)`Q3!1?>dZr^h>-+{Y|N*~r}1O^vGF zsKDK>WV?fhp`2?wg~>O7GZJtBWy%1&6+XP#1_VC*7na60M|5<%*Ea_Q-m-(JDE{O4 zD>zBWckL4VX5KWxuRR^T$#%syShQl^4Y&%wI{`)Eciqqi@mqSA;CBbd2XnJCZj0o3 zyA{O_4-;^I00v{gIk`2|GCbE0s&^Nv4}v151+WCOjbTpgjFpHF`B&TlQ$^ESZM`c*_!mH+8Z>Bl!SU$g>a(-6Q;ROIhQ1^;<+mK1)on_IY>z!%9T zbugrUGjN5Yx+bF$&7+61Vb;&t0^UQLS03VEYJLsBH+eJ%RVCu+(`H2T2O~N>8(-oU zCI-v{a?g(~U%T#*e&2{m!iqtO*w4RE&!_PDWY6<;>iIJeIBTfq`LpWzu^4Zwujlz4 z>iN8b1ltcFt{%Gf0K$#f6D@wmywK@i4*ky5@DBw1y+T$sZbj;OCEXdxb~97qzjr#v zZIB;hr{)W)rOE=@0mV=aFat1UDO&)VO6&o11NKX`8Bk~b(t^qx1^~#4$=`gJ!!7fi z%>2Rl+valU5-0TaqFoFvqGvy2n+m2x>-LVQgW(f>0 zzG2VoVT>5QWiev8j|l4D00CW0;EzmT=8l1!w6^{Z^$GywGhq^xH)0R4{bbH5s`24= z9q5eZsD&b>;k|JDE?_I+z1W9ajBn*psOrZ+c+?yY-G|mXdusBSh!w|c%47B4qz`|` z{AfJ|U2gNuV)Z->1Hu|A!>7jSM)mw7HBN8ikB1Ms0W!ykGY~1Q$EX{J2F}C@YG_0( z3K9C@49_`0T3vpthoiRVQ*yFCU60RjLu#~(EY(*Z_}K{)`}`qmwX(kvN=B_~V0cQ6-7J&Z#y)|5dw zl7lXTa6Y%1lR2Lp3ZyU=(V_Sa-m>2P#gT)G+XU|}Z9w1WJ#tp4p8pH_%X-bD2Mr3A z%0H{hUkWqAdTpDA1Ne8S=Qn}_tV;Gene!ZBwo;sNJ-^qf=V3^x^^5x6p8fh(eUD(w ztWVVUa?It{JNT~fOy;y~jR(u*L5b$l3fKY|oeMEl*rW4iIk?S&S&8P~;gVFQ;1ZOu z#c?r9EQCQoW`1ir|6&Cp%ezxJfL#7rH=<)o628WQ6K(7R`gR)^5yBTBl`2sJ&P0h% zJtfLfg5W0uy!AVjCa^g~6?+lIY*Y@@#Rjn0nXq|L>}Sv$&`2)3*l%SD+Tkqr98`g7 z_jHBXcu%|QQPLWNzNr$AC|LbGC7#f*-n|U4z}&jzm8xV%Psw?@_DP0NWDG_WEV`@z3wTI#}a)MQ3{+A_jpP)V~Is*vZmaR zrFflrG;q+%zJZWUJjNgc4*-`LGYd{V5N)Il!(4^^W=PgvBf3Hr17%mCb12jUh3t{M z2gR(j6}<$|v#^3tri}w=jsmpLh{nlOq8#TaK(&So=o|v-rb-B)ixr?XC?k|HPjCPg zI2fIv06pLV)QW&!Qe!ND@)e+~96(k-yK7gIhJuj=eA#~wpcN=)<*5<^=mQ8o%Jg&q z-LC+lYbQB??pJ`?cmQ2aK<|6H_BRFSdkDJf+HeP>37C%*M#B}LH7H~gyMTZWWC>_b zNIH;t%!Yw!vMpa9{GGhMhl-$-NNMO5YCnV{Sqn^R-jFn+@1@kjPoM(mUXWcI;G|-^$byKO;VXW}Y<)TqL`Df*P=DMMVo`J+F2jg&J6gGI zbDoo_DwsGC4OgIq8lo3UR4ejJ*x>;f2qyb#JX_g#n%Y969sP9kSS_N(5RR9!MOw6N$)V=zUEET30}mjz^jx1Pj||HvnYnzSbvd z&hnw8c^TeDx~30U09$W7p8UQ!?^1APVzRv}23l63L6G~VENSi^EwOi0p5bGc%v=ET zF#A8Dr#;NRO*8xXUpve`548h*gxPO*nSHg)SBlxsLka1Qxu$RWsJ#ed6Z9)W5pyx# zHpJ|m@Y2id^H45@*^TH`3NM@CU*DR-@E3!_^msj}rqgvT9FXP-07r%wzD(VwgC9xP zGZdaTdg|>i2H*cC?mgIdEGM1BKq|if6dRN1+WDwrzKyaTzWAUvIQ8&h_<95oXk02Df8Xgn!?c5NZ-OphBCm{S39ysJ)?K2N#m7=cL-;AvGL8S`x!sTW0u0UXL zrfwM(Lu_DcAF?WUYm#NFfi|LlidL(u9c9atD)UDftBE;iRr?9cp(44#Z@r?nv z-y*ww#{7Fh!*H*OMvRw&apxLGI|9D&u4$EL1(E@5PaA=vH$q1mOQvO(g!fH1q9YM+ zfN#SPC#k-jZbTnP-NGGa@3FkX3*d2fqIsYXt}1I9?lmUV50u49dHJZ7;a$PXpBesM zxC18(%$6vRluV%mJjJJlCJ}^P1ALB}D<$@I-WtiWM7avYtKfsEGyjRHD1c)%*^)&G zNsmsFJO7qrOR@75pPcxAqau#n6_cuQ3t&kbnYsdzYDCKz68o-#YMYzgFQ}_S4`z=6 zJfJUas6H6cNfL2{&6A<2#C6Y_3a6WJ)nAX4Ap1~*tgGEBil7mjE}(R-o6$jZQ& zy}iNsSH)A5{*Rai$Q&}O2UBL%HWzdU|=^~ET0IMj`C#-tU2crW}$c+9Fc5^TU`E0X6SxnBHm;AKQF0qN1p zvRt2aB)+mglK+bC$87V=9#mo6TZ9%lA{g(bGGXxud0HkcYcy(`g(G0VIN1|#-jtC6 z{pVbu8^Lc)@3fph`aA>z*0@wi%k)2qflP=YH6J_)*tlW2SBggsVY#e;G}35mHdEb2lh&}9Ayt5TJ^BEH$YTa188KemhE%G+$V#&754)$$TRod`Z&0O=mf zt)u%%NAMk#iqH30nKQBucSRz`3*gZNV%iXDi*q(Z^UQ~!IVJLrpj9Vl!0b6l;f^yV z_PHqa&bTH*GV8;N7rE_%M&|83H7Qiqpv!dA1Y3KS9p|L0c1o=}I<@Lao~k0nh@VpN zyVp@pWWPXA1RaqcC=a`qmyU#L#3mM#R7z$_WJ5&IF$R&Ll#Sfw;AW=04){oB%5fMb zPiD#sEY3UF57gv+fz0O)caq%ziI#Tra|m#424k?LMbbF)e>*Rw6G-@}@TZ)THZxLj z{s=up%w4w1X9*tFM^wE0B@umy=~uyZFul<^6}kC5CISxut`n|CRhQq9tSKd!O6E<; zltIRR9@ZXW@3D!~2J})Rb!oQ5mnE7$GF;zH-Tx2XP5mn{le?)~<4fsl_<;8UcT-<% z-G%SmXVhr#IUxn@_wknF=FKO{2t%HCQR8+g^NE(?R8R|FC^ZWm?X2>mZAP@02I7jP zoE3XQk!@y_`k^+a*TalAe_fb23?wnnHJJa?>T2^h%YTW5ye&8U{kOE=N4oIXmQ^R- zZVlgi&-J%RWQrSeaVPe&d^FB+=z5Vy!5oGkq8haJxgJ&v+yJ9!)ky?wmN|!y)U9h9 z>HUgt7WHI)#pl#2y6V~dZNIFMm(Y1oci&BhHFO)Q3O~4aaBl`~a2fzNYm6v+!5^)^ zLNiWUZ({D90G|bT)>>=R2l-Du??1zPt2MQK|FhWs8mkq(g8178f8Sbg&nEDx`M0;6 z5N(e=j~wTb@1a75r`|a#VP(w&@ec#}YiPG1YgTr6*H9z62*Vz+3d6gQ7OXQ_AW{d@ z>J!j4+^fupVLr(S&-V2PoBchE#RnBPB1^j6BTjAvMmdpmu~{YLa- z{z?vk@bJC$0u}mtm$SUBs-3vrMQD8>^Uv1<~}8{+}(Y-5S=V@-@)iL^*>1mc50z1_Z<1H-d3XI7Sl zXE!OY#5DlRvv8>>)`1*rTG89hRp;;`Q&?L-Lna!sMhoH{eHzUuMz_vKLr#=Qa(r$Z z%wOBhz}cj586vu1IKjU*7k2tL=5u3!l%_)HpJcLk6_agCW3tO(g;c~><91(b8c7s? zM~zkWvE;03Vddq(VNeWhW9YCHjt|12Q{uvObAY$TEbC?uY`nU5b5L8EG_xHm6DhIg z?bcaSaceld=d>KrWrWWA$jW0!BLnSKWu_Ng2*EGQPNS(@>>K6pT zx64CE(Ohi6Qhd1$5U>54`F=k0|D3_zt+8sLPVM!Svd8zi$ND1Qk3izURFx6n(gvqv zqR=ylk_6ZO2IUdn9Wqr}xYJier!Lc&f3vE_r!mnd zvwa~>tVV1--h{961&!EBe8c$QL{>BH04t=^TdR_H@pe$+5tV|L;**?)9JKpLO=CVz z6ze>)2cVlWUT<@I7+0^e`^wzZxnwKi^9jVlGCTvhXT(`)t`VJzY)vHPr4@3FuG?BV3KL5>yfWk3Lo|hMEjjK8W6AqBquYU@AA> zDUQ3x!o5JSLjkyOdF_LG9u@uf8_JuPBl1A?JG8k1H;E@^;^f?qp?Ar`2!kJj>)_kN zOb)d-mJIQQch5k!A$YgLyT=<*gKlosCq^2wJ9k$kPN72ik#P70bJC<5BR?zGTzEbi zA6Jzx1F51LBf*uE@ZBSaj0m1bb2P{cZ`wr(q0o?nM+guYW4Z zK2rD<%6$a51C+Fupg8p+^wrFkB#05c9j!1t-Hf^e%TyTbWDyS1M|qAQ+^fQf@z5jm zLhJ_=FEMVdF~3I8q73XCc*9$OvTDkKpL7Qw8+0G2qR@rm2%?rAUUeZ!k4^wXzhJ2P zLTg0PJ(w(ki2gPRGf4iL3LJ!seh1DVWwBzn-~!R*kg_Vb0IlTVtvP&D8hQhO9w#Us zM$GeK7)YEa)r7!5VtKA|BqwuT7^W2KN{t!SeITAKluptPart57nfRGNmMii5Ps4fMBxkhv;OW=;zRIV@YOlId&yLykaPg&HOvah|;bV zUGib^IYzV^>`@GftHEDko9yB;6E`5h$u?e;T6DZ_rJm!Y;rY>v(y1 zg)j2$=8|w75PcM=fIL8E4n&)Cf?qGrPE_5u-DTBri*j-2g%limeM}mDc!tdI*AXNbTd8*MbRQ z>7#|E5txin;1Z-x=;$kpkL^S%96eW2;VyO!J_U^XZ{{?{2;0LJ+YCO%aT~vs;8-G) zAvFtmcsw3fByKAJIu%8?k>`!*5EhJlmt7H;=hnAWn#_JE_x+)!5o@DBuIHQN6@Ye1 zpy)Tmvf#0?q#xvLx78Hi%M)W;mm-FIU!<;yQS~vN$UW~)CtK&5M?+MCiPn_cqtHpR zSA01+4D%fNh&y({+CbWfSs-rV4y!q5Zl!C$xJL8~FeUqZrSuu55tySaH_vQ+r6SBV z#1SKx7bpUlP+Q7(p3N+?hC9q+VfWDCZlsBdz6AawI}Z+J7oUY_D~!3OoOK(%v(2?w z9axi54=PM$r_);Oa7O88cj7sU7XCBgFt8~ z3P+)l2vE;rx60VTUt3pGf?M48X>=|x!F=~Q@uUI}RiLzhBlV&KK%Nq-j}#k*p{l`% zLUm+X%Q@~yYvergSx*ToaCK0J^45>|u;ZTs$$l7jyTR^dD1a=J)OGRJxv1tad`Yw}bTDGl92u{iD*s+dC9UAka$Dsb}AX|pD5*;`Dl~IZ^t;a!p+j`^vrR&{;kw)*hWFy7c z9EQ5#UgsFGcBH+w6=Jl-NE2x3zsW+gc|~&cZe@gTM+l3W#wv|b!T1(;=MjsR7nSja zImh*v>O9$E6Z}X|snCjf|6$p#55~v(Ij<39vgfggZ35Re6P+bj0iXrjf?65gU0KP@ z)Wqvyu&|iT$mbE)Px5?3ata7|m^SH8z4>%kogwX@Y0P57f1#sP03bmnX{tjHcZKbTO}63-Jz5W4~;!TcI7 zhAS{_MV`j27fd8dq@7opI4>CA5X^rkP+ixg_F&js6^YjC6eA;(d{NEJQ!5e&1K)v2 zy1Q%omsoXCQP7sn(zfhj+LrYU2He(;y?S~wYXkg1Ff>%Jkdy%H+oBbrmy9I^$ul;E zcNH4*pJxBBM6y%l#6j>N7IuX{K+Iw{skuTGRr3!QmBwyXafkt|{J$q2yyWp@6g6-Zugc7S|qGQo&t$+xU2Mr=1t&)H2;?nn97 z>_Q{q;~Y?*v(g$=wHh>1(7fT36hWtum}i()lr}jYr+Er z-9CXnb1<)9?;RvR+4_w$WAtTCL&==Q2zq&?oZ^Ntk#O3+haqyxtbnu4kvAw;AN?Y& zxM7)PEV&r-?q>5C=zgB@F9sxYBxhgEI$Z^{U|$m63xVq*XrzjGA3jU=}j<2%r z;9InzDSSzPh5XfcY@VloOy`dqXaniCX=aVp=MMH6<>DI~suSN(zMSgut~z z$~eqmfT_k}Xag{2rogBK|5)F#yDpte1&h_9OAi7Bv3fX?7nh=3(FWX3CSiRtpooUR z53zl|LY`8t}WmZBy? zU#kZ#QHVmk*?j60T8Ak3$U+~-3E0c12$y%Tl9Hlnt)eJ?_--=j4lD@*W>G_4uDm;~m3-e<+vZ31(*ACj$DfEZ+ zmF(GXCrEZPDXaLw>VU49w}UF?$|5m4)AtwPaF>7#+CztmJrp#e9YDqKt`SD`Z6FZd zH88}R1mN`dy5Mi_3!{|&A-G5rW3Z1f6&ijP6ug*L+0fLXelovpHy6M#Q1+3^r0nh~ zd=B`|oCZ`2v|5>UxBSU|S&{A(Y}Ahd)5J6{_eXx3?8w1iWhw9U?lvlf%m| zK;5g!e-hH2Vu94L7H2F=6uyZ1I2cPgZL=NZc$uLj;pJf7Z5mqWa;i}!_49eHj}J%S zX8YhQeUTlgN1HBZlQv%}$_qzvqp;U)P zz};PW5o41M(>@^0>+J*4A;x09gBRm)J-%F9+Fy5ggx`W|6Rbp1htHi8^wE;&s z_Sw7k2V+8fD4^?Uy+b(H(cI{06PyMh1)S;?pL9-37R`F!Nk z2_P6FaXMVv8$(UQy~{&A%cEOi|Irpk&u*b^luog2CWBBm-463(Ec$9!IQAdM5&eXA z*&K);WIoAdgKf^O!tEpG6+u49xw%A^pNEN62SqU!D}jqUTGh1F$PbA#p#YUdleir3 z&OwfkQ&|<{^>Z62*)(t}8THJL70H7?48-9ezP!zv((J!vW@c6teP~37q5Hx3&f2zG zPoQfEj)EQm@PhDFEqx)JDcM}d>fMB~T zzpE*2;j`|!qinsrYFz{nL_-?h)R+_e!>hY;5 zZRxXK0|m5RtSN2fv)*xkL;i|$SsO=pAa21pp}^-S+^r?X$Ok)G-PsT4ygUk8y97)W zOpMDVO7J0DD3~$G18)ke;+u7?(4RdsA3302pU{~bP?cKq{6Jg;Q3rX2@faZYQO9-0 zpeWS`aw}~Cs#^(wmd)Z5gzaIUj7JNB1YAw@&%W#fnI^_dTUx&Wd5#qKc(~xfF!;^2 zhQbDW{rHI?vF6aj@kI`%?bauNh2yKO>tF}bYb4$x?c2yUPf6k}1&nc%oX)hhkBG;L z(7~9PASl{LLzh4qU9FVPVKh+1tf9x^3FR3or)LPea_VNt^VLp~Nwv9(1GAp~AJGDt{RQ<$v$+1wl zW*|N!yTV69@48czT|#1!c*h9w&coifgERtEr&b<7q$=$!&3cpF=;s(M2)ir?j@V?E zu`XbB&saP10ovDoh560w-<3pIDK0nsRu026=m!P;KXOKSt+uNmebr*G{*I}_wpZ0i zvl}mB|J7Y8)XsC(ziM0uv{K`WgjRW_|KKL>D}H>Whe3V~N}9)ZSDdYUqK(?8cC;|I zqv6+qV|0cQyAMVJ=rrAkeJ$TI&%pjPzQJmQf2z4T_*m()NXWf1aUx5d&}{`5#Bgih zj7bq5%VQ8SReQ4NedP-!!>yFI5MLV>|*v_&WzylKjE z9K$xCC5B^Kw+-H7IIE;MUu_Yad9xzcAuWYiM)Z83?l?Xhq8IihZ2cL6p4e_~qy0en zr|Zb&AQj}YqyP?}i;P%nMWq?=oCte=5XeU~3j;g2pEVOT8CR`h3sD1fMaIaC@a3fI zj~umRDO>^MHcW?GS$tWkBN6Z6ci|^8^*_L#CXfF3By)4VyU0-Ko5bd?&!6Og~izJcC$?vyw?F7 zWANNoIYx9IFp`b_+4!PVNfRt2WVm~xq=V2-ca+!2NIVI*y*u3k$GdMIZAbH-)0r{f~QiVZ`Yyc!g6JQMmmUs(jF7$D9We6Ta=nH!&MqLagXiF>m z16~b{aGEpS=49{4{0`n2y9W-R=BYhA&8LgWrw>_YW3mtTx(LUN(5@B42kkG3UsrMK z&SWv`B#bNA=XZpKBA93;v<&$s7(Ws5m~CnwaqS&=?P1J9t6OIRo*LbMHf2W`v&RdV zdo`qLRUFYG^R;~RPt|9Zqp}4LZ-CUA?H{rO5YzQ97A03hXAa634{JuY9B9=eoQ?(& z^aymw%u&V?UojNTmmNfHK!NN!XZgNM=Va5Jg3xJsqhoroi zskeiQk1?vKh*4r(S_y2GRvNJizC;j!i0RM_liAsAF=8wcMe9~P(|)DsA^I7G%I3qS zc&1nobI%VR7DRN+v(POfOFy3Ca^i8KW?%x06_sh#0GL8}Lh*q{)rGDQQzbHSM6W(s&M59?R1H=o3erlX4?v*70XXY4@T0M0y00`j z;J)GxMwL%NyDbfx!nvY2*QnYI0Yk6eYoJ6i!m6fqBs(-mnmz`gnBdN0>|Z?2N-{j) z$~4wU9Hv<#*|TChwrs#5*gDU2GEZ7_j{-omp!r0!2?GbZgDvg^K0i>JvAqOQ0#JISH+1E z2~~p1YP^EQc}7)t0#D|=oW^76WP0^44GHL`QxQWS-Z#sLeFWSjcDd^Wn9UHh@=g|UKS(VgWrN%8-v^raQ^WUIK>^04ZRrAYiANI)>qEJW;ryS1^7trnRxCKct|hq*q4Sf@wwI z48L^@`lJ1Lj-o6PdqZvHWU;?#(zyf*otj@WAq=|1*X^nE=r%1VNUmu_(PTN5dxl&^V;>bS9b=V=aJ!Spc&^m6 zjGFwetXuP~B7SeT8YT^>-dm^+E*jCsTE$WxVjSs#@Cd%U(f^acWAE?-dVDmv$MQz0EwBehFQv^3B>!-j-$`Z7)-eXtA`q-F!$OsbK6pK1&;lt=y|dX4ki~o`1{yG_iP2I6E>PGVfV(!`>@K{=h$oJR8+U&cwI0Y*w&ZocqY_oXOH`7!3P_iJx-|yHFUlU|+cPNM4e^q$Q5#ua zkP5N44RON$AUej*r(D87J0nx8)n(X{hog`>A}b z+wvEyoys1xyRkBY=9GhBC-&8@$i$hElaRfXygQjoKrN*|;d|N=+&=efR6U8gOnEG~ z$PyeL@MOF0$8lkVi1uXXY`9AB6|?TjOmI>+M^XB%yVyn}YGs<+`QJZw@NeSkq)AwE zG*)>lW=uV1C%csMbiTV&S#i**SlZYoOB3n&cJopRG*XIlJHMxK)jFzbcT&`f^9aOq zxJY)O!O%z#d%8%T@`lqx4{uy6nzdmW8Rj(Jv(r|mvZsfG_EY(7x8=WJ+Qo3E8QX1~ z%8EQ=e{wClt@us`3CdqZo(=q-MxLQgBO-Lr&S<3Fp_{$rdHF=ttIvbClBLju#e8R) z2PdP6MtSh>tx>@~UlG7Mo;>)(u?`Pj{yKZvAP;sR>Wz%h-IN6-`p>yM=oulEA-~}f zI@hgS*Y#CJu~8>T~%TlpimE1t>|Ma?x_xtXVO zgEEr8pUMl6qEr)cq&o?Di1@vOwjyG&Ck$!15O4kl|9-Gc<3CisI0y{(Qm-NB8GG0#4(yu@vGIE-QpC_4H?V0rBi-&sfMs z0w`GIX7UWoCrF80j4;S@>Ew1Z=T#Ketra%V+H>_=!&2VU+I4KrGaQ|OzCA^3`JTXW zep3D_9rzZ%r*-KjB)9c+Nly{~NYR1+I!>9uY%9ohb&_WdU>1ofEDO@=PuQHj7?{9@ zU<&gHb@OA`DD4g`sEaL?@+tK2W@ zu3;(u+@WF5L`_4LZx(gu^4as>xs%n&wbEhpK0IS60`2Rg_$;k!5vLLFWHldJ&YjC! zVW7MWak{6ScL-mLQ~$2~-tPNjM4WmkabnoSf)O2SN)GW^53#U^vobw^=JOpmYn#Qt z3gPK|mWJ?pW&n4EMbEC+yy8XpT#T=`ojU>3=HSB!QzLxSvHaeyPa_CtSicY%55jG+ zp0EMEFNkh8`+5+r=Jzy&k4Z;ZmYM6{@*->zkw$o^KhJimxp=(H=IJ)edDO4*jc|1P z;B1U;!8)ZlWW;(3?$uBl*mq0A=`07Qo<1M~y3-Ew@MSh&7IQPsY#gyqT*RU{D^wrI zO6X}~^2C0CJnjN{b4f!$uA;J!zft&!)rVjhH8YA|aA-&w@Q9)qhzIofsSU+=b>xf9 z#XLQM%kt6|tC4@nh+QVpNYJ84VnF|w9E&$p)y!f9p)-3gz%=3aTo#V3$ik7t z+dm^-P)o@%5|9!6mfXZM)JAjzz87wiRM^L1{~%>wep-or+_>sRX*vKmh6Fdr?%5l1 zAlA2*TfE%v{jhAo*aEn>+JPIn%v8%$M0H~AP$aS>se-IAXcC#68XQ;VVb;77a;fQ= zy9+RP(d|HHtFLprt2iS_;ZDwRj^h3P8xmY8-s}DN?kL{f^%XC@1tTedO31XzeTn)y z*{%RPv*-`K5P^6bEcUrk4<8@hbtC*%gbg(!2gmTBvXX-&d^DQfJbgPylx@Shbhumj zQ+HkDVfYUFsr-XW)xoY!>S_67QKAjE{F7VxeP~sUw#T5oXFrwax|O@QXnR^7yPwJ( z-O5{C|CFclQBv9KKeCf(+#!llW(3=fv?Esra17ia7{C)SfYuAJE|7LBb`UgxOwIK$Ue z;4nu;_18%IwAqMu#2ioGVBYtrsgh#|*hv7ZVp%QdKfe`*n5&i#vsdIkEOxB|LS_4dI0Q5UT`&6)+s(&mSZMqGe3sSocfX_fz1<9)h5_1zTB*aQ?Pf$G5dH3S8&bvA*!FMr%d-P&q*0Pp!lbp}^niCF5x*V>bH+o9;e*6c4T})}_i;c_1A@ zNEcO(6-W~*d+9RAOSjbQXYGX>9eahbkG442H1WdtjPEhyFLmEPu+nJ*x!(DHk`ZeT zu~qFDvCXcC4BX_D-Dlm1vgVMlf%WP97{HHGeiZZLWPY5$kK_1}$B(1wt`?)vmf90 zJ5Cs!#-#6 z-IU7nA=}u2&-)xhp-P=2Q@F;U8%u#$9k*F@97h~=bQ}d-2_Ok5qqqSsBe>tTQE>qg zfxMq@)xEtW!QbzFp7;Mee|${ed#mcysZ*y;ojP^uRO(X}`rZk39hKGzAx`ArXj&ic zIk*KE-dgOO0iOv1DxW12opo1xL_Hkt6Ww}-@@ZMFn5iho>u~OJ?lblSouvwY=s7CC zt$njMLy7yfyR7XqG6q>6vFQ^)W4ErzV{?tF$*K!wA27DnqaIf4(9@@UF9rQ2{UH!j z)d%dWFbDnI%t4bYq|^sut-zjGhMsSH{!-`TVPln$yVg^+^_{W-hn}uwQq9iO#tAun zgL8c$WKl@AJXa{}6P$P=%Iq#2avpX@c*3yy@ZOqyvr)_<6H%2bQva%v|34f2#m?X5 z+JbB|5wG@6ecxbsyv(aDd?C}oeq>nAJ&X7x;9^E~Tbc68QMyW;Eh{Cj}{%yBK|E5w!ON=jbD*5KE z4gbhzuc2MFa%85Jc;IOT_=+1nx9@SBe*o2=(Nwb%ByE}DC=>0cj5gb7v0P2d`6SCy zBi^^|9r8c6cXqbDUbI^Qa{70(Ee z->^%=Wy7ZVo5VuHIK6i#Z3ewE4aC$zF)C$vovG7_m@h_F{KtOzmLX%UJ>h3Fc>UMIYiS-{YxXqmNE$!MzFRrH z23{w6tm-<*ji-^F=&{o~mkI%XXn(! zp#X|P%CsVM*cD6?V~Vv9R|nXbCY)k@iav<`KtzG(c*w+*;k*r9U!o9W&2q=bC5@Hr z_i>q0X?BI>JOn{L0x+ZL)_K0&!f&xcx*y%d&H`|3+hPFmX9HnnNV61N?K{%_J?a z76zZcKqmc4%@gsl6*z%$Kio7s53kHogj{!jf0+7qNLvQk8gv|57EH(&zR3T%fw;qrjgw^ZQTDl|1!uO$md93^GL zVRTQh)RH`FQFLqd)%&1AEhNZ+fBx%% z@%{f0@O{(%p*NrF^jKg42uSOID|K0A^Mh(ap338Ie$m*oUM$NTN@7-1;d@?FVH^fE z)@Ewt*)wZILFB~*G?=KbP(erVS?I5G4rNG6+_soPL6WV@p*ikpT%kYd2h?QWFX1!y zWJfWi#aitueIRQl@3~gZNETB4*rrK>pD8mwM5^`q#pl1brG{+=4% zjlWl;9}g>;7J1p8$5YWQEcQKmrcA(n_guZzn+E~R8~=?O%=Vb8b0KZ6^Upt{yw}tA zd|{wBBp50m&UkTa?Y`KBiu=qS)k~W`ZMFDPC;oNdS3>@Pi&D4) z9j&#gyE7FnLv*~+iZbRogLj6Gdp>9Bc#50vf}P}B@{rAUh@0=&o#Z?8F`I7(bFS+3 z?<8M;Lgk2btKEE`VLY|{Y}&o2e}1h`p7}DW#@2tO9kPdcRL6}51;asRj4Sk9^jYKa z=x+*8oBEJYQvN1IxHCtOj^e@g`Rqk1Y6GVnPC+xIy&fC7tCbxbqUhV+4e@~f&bjhp zl-Xs72%SNaWU$Pu5Fc55{Y|^_E}V(o)6|V;tP@RJw_1chWUJX5E{e*NLH8SF@X-B) z7lZD5JXmz!et~pJzbK_7*4&BH_8xfIV@)?O_gGn$tQrC0OdLYg#o6L}WCq{-{@?td z+y7`F>aPo^JZHRF{(cRy<269{DSH&VtB;zA7d^NKK)D#Kl zW@XTfpg#mKGBW|BvquR>(gN>@R!^gS>C9?B8k9Y=nmSn^=H}Dw`1>usd$@QUnvX{Y z--nD@rMvUx?zUa+Te@!7k6!!Td~0@+?;ck^{@Km<*iQ1D>yFo{ZoV6Ll5g+3-Trs; zU9gjUXBY!Ycjpi{-?2N%H|lQ|-j3aDd;L4f_v4>!zSVBN&%W=PE{FZy=KH6cZ_!Tj zJ?qW~v)p`g)Xw1_%g4R==<)FpaH6Eirp@}bB4IBvKWwtx4*d+T*z?W%^wtm`(`%7C zw~dBN4DT>|_BFHHwBCq|tc-X@IAqRO=gb$^Q>TZ=12B#^o`Svf-1oRm8c$VEn+*kY z=u>*8q$&+3+mkKmXLEFj{dYBY6@O!mQ_*p9d8zdftK%kF^rLbD9mM= z7S$c8%aQf@6nWnrzh@#RM3G;qzC4;0-C^5%jGMzaYUkzHXiTEro!#6V2X%t6$mPn7 zCD<&t-CX}2*_=nPW6=Xr<0Zh)`<7R? z^Tks{FOY(s*$^Q&3Rz?(odtaqTm z>$*;b0zaIyT!x{0uy~ss4vBkT`zAYHH$ezp-gvc#&~2Q+bmc|gabV>xWGx#CGh^^f zrI>&XgT=d>%iuPPCEea!42W<2GYd^v6GE_6O}4Gu8&+njVl`w}zz+aJ1Z!3}lsqz~ zu0%JipMC#pjJNvP51XU9mrcyG@EYU1EYPdM_1uLSe~l|J2Q79j^DOL=XZjK zmrc?xlU0-6WdL}qI>l4+%rRK#&?Aj`-{_M+_`)Ab%eFf9)r7y!Sn}HfBCWB`Epl6n zi@k3n{XL9E(}RU(xKiuHlJuXgy$^V|Lj0>DH|3AW*}pd<(nBM4w*P7Bn&XJHUh66cdxfx;k5*UubVT z$7b;9-331ObgiQIb7nJv6(YJ7qufOEfMw;uPC%zwL7V~l6e~~b-=q8Qnx;}fmW||T z^wu7nFBX7g-+gf4uxoR#?|y2#XyYe2A5?IW$YO$F3UYiG9gbJem-*C$N9%4@rU$SeO)Ywib$JRsyhWOVaQ^*8V7=Hob}nM=8V#Ofpd+gtc%Cf531 z>sw>S;4nL;`PO;jbEyZ`vy>}M(SgjZC0G>Q`TaDRnv#8cjQBvr~b^L_5>&Fn_CE#9Da~HR{MW0 zJK-C~m@to-MSHA6&ngZlbaY`B-YF)k>k_kWbT{|{@jG;U0oBc}9Djqs-Lo3iL8*O*S$w|;5#E>p*mW|5?C#i>XZ zxnsBr!&cMHI7hg-9piGQ+`iPvZ0?$SyxhK2W%lzvW`5b9n`=^gXTN@YvzNJVik*D+ zXD;J-$NJD9Clu$3nk~itS-N^eeHBVx)!XU0gbwk)FuGeHQLwpwl&^$n$C`Zj)ZKkw z@4gqh@2lMRBKN(;eedDEKkmNwbl>lF-;3S%x$b)}_dTk2SSA!dwz2v@robk$~9GmM;6Nzx<(%@&;VOd0kZLHp4W0yy#?*ZQM zp77pND1V|qt)1|;tM6Vpc-Lm|zA6{*OMvC!o!^xu{#gyG$adv!&M@Jf-<1a|J*O)N zxbI$97P;?US8mc2LVm9+KXKo^u6)aV_quYK`|fq+Q|^0jw`;ZD61-j9cN6Q2_df1> zvzy-6eUG^BySnd}xbJ@VeT@4~KNwbRaNqm6?^SxwVWoq4k51o>F4;#Zv8B7KH{S1> zm2AB3sa)RWvC;{dE}6}~EuzM<((7i%L+>_Ye5#oTHEIh}zcaP`K;XQG$rGL4tn{iLM)SE8AXqjY(mka2w+@L9;a>x%;R1Sy<;H zsv~-_u`H~BQfBX*yEbn}Auww`t_^hWcYQ&Q7_m`o>!57RFGcpREN z>)>4SG`|};l1Tu^ZZ|+D`=e%kr~Rj|>f^+`#>wG`OeqEGeAI@Y%L~*w^*rlx^adSi zOlvVP*hx4|v&o!3T6A>Yz=zqzr#Wmy^@}A~*)xGU4LY@0EEU$yanIDV^%W}b#Z2}* z4qbKr1!t6-oi5;DE$u!#z+T`etfiIlv3)wXElR6_=kDi!?QeP?`e>C|_dF9Y7O#zF z7ykuq?$^Dbt$lO#hS?KOPJ=2la54FTi^aWS@lEw|8Q7A#BX`{=`t@|qptsdLP4R>+ z+)pjkO)Kl|bb})X!GwmDPqM)db)T+}`!2pz)jW6mHPOu6=kDYE&8yw#LL&LRuARC3IgfVXsAwf60Z&fy8t! znHkx8ST{G2E8|uP#kzIlCiOs02KA=AHZvLilg+RT5SktYyte$FGT#|)i7|MR!MF*r z;OZ%ddlyjunT&KCsI7L6(g0-4qUM^udRS?R2Hmv&vvUHmhGHFXTH4eSxl3OSNP!dB zT#%RLfLymQALQ69NYi2i)or%9CswUJR;>P0H87E9ke<<@hRtq^bv?WRVr=&f#cqt{ zTBv?Tc}et|;-3EI=`bTV6XA<=A_REJ`WWrvZyvAj_>~-tIzB3fIxhXjKmVxOAxu27 zTuDihh8?J09Qh1kvdE~1oaQ~VyYt8f#;xLktD-V7$=!S*)2C>>y6;JfsQejYe3ra) z`>{k#$M)EsI$2{RYwzQ_m((~ud3GVaD_`!rV&6G`U2FVj#BbY2N3RQshkMYsFwS!D{i~tc@Z!`T#Vd(3!R~ml<3^p;Q!?0{ zkyCzQe))^s@^sHLlcoTA;c>@JntaKW3&V)n3GuUvTSxY0_?XVxMIDveKZidEre_wl z!oHg`P|HNJrm9SqT1&{CQ0#FtukDx_seYxM)7~g~!T*hA~F}w^xeoCTz%g)9Sq1HcZ zA63Cf1FPW`1m*X!GzvF{s<+sf?+Ch*ATFUSHznU&r<@+}wdlU&8_zUc)_{0xbxy~# zJXgM(82HH9^r6zHgd_mNCon_#xrA&J=L4ZqWBj}Hy*7RQGl!SiCXoE;gm`~6%LW3f zPh{P9MRDqA(+4Dd=b__HH?98I9kl(#*1|&vX%;g=pQ~iUy*$}hkr+N?9#;+!o4#Nk z6QD!(d4-(w;1}!2uwm%p&+@FP?Q-&PVSOcjd*P!+F%ya&`jlyI?d1#am;D$>)WW5n zOTS_kV^7I@Upm*Lls5aIj-gZqs!%Saa1YmEuj15@Ts}h%&&%PnWL4kWwTO^_@8vx3 zDbn4d-rPVbCV-JPr7mfSM~x|wC|C3e|kXk2`&WL@nN)qlveg; zy^^fD)0l0AUw7v}ggj4O3vw!#p5+|zx&~3sKQsGi=80{9Td$M7_>ONJ0k^xC4pGo4#+Qp3kW__di`h8b1YgErBOw(ve zczEgmaxPkyo}QSLXr_ zpQs{sl7wQZ6#&VZZzT_Xx&6)iMfvaPe>!7Ux&0cuu1^s@e$Zx2Vq_mQqVDG8@urM{ z(vz$5tIqQ2U{O6IGv!Y&hH+l`ryjC0^B_a>2G$Uhif)kU&DYjCvhsL;zn=QCo-h7> zBe@-MQqjlunTfxlc<_1I`ym-WcNq__ug-%(k4hc>k<_YM=$MDkuEHmy_S$*Ls=Y%y zpro~}JAMKTpo@u%P46vrT3_*oneC5XXZs`3B540p ze{f-OPJit0^~doX%gF5y;|wp|+t=nce$rLaI%xNlOD?=ZZSJFC0DY5H=R`B@=$m@- zgZ~BH4ad97jiry|SAFOI9o;kYC&$6V89e%>4)^d#R^4+==KC(G{@L#%jA`HWOP|!X z^)B9`|F1gJ|7}LkQfCDBa9APd-gfKh3h?|$u4o#`thR-_q$fbwP~tM?q8SBO`kSt! zO-xo5je|DmFj98a6igmN$blS=+-`cra^yn~=Tqiqu3|x_5c)^~3eNfmaD7q-M&qKe zN>@ag`e)4&7$d8+;b7%ewpLF+%Zvl96TLa)=nl0$sWebVkQ`x9mk9&)12aakf(#rs znz6W*2*AH?e}=3DA^-5tL)|Yc!zj-3-`_M2S_YEGNJ%IONnE3s{M?&u_cIln*)|k! za~9AZu_1SCo24fK!l`>kaVXQ`OCGWW1 zVmA3oY)#unmpWg*FLl|an5)e;dfIF}3ss7isM?*oU-EcW9P6I1E6F?a zS*-B)pSrG6D5Zr3PSYq&HAh^+0@@s+AxE;WUuMzdfZs>>QH$Hr$WXaZ-j5pmE>~sD zAi=AjbnRy)2VBC(!7T&uHXVeBKlppH3sxYSwrfV!J`uc^FJ z(ax&ZaVy(L=Hqkr3zr&E{<(5TLt*H(T$oH9&X)`?D@4|{6B_J*KZgL0y(s5Di81hVi4+~=}yD7o8+m5t5q-K zpWnncMZumHh+kXI!K{mQciJDz3v^a2*;GDH$R%$opKFR@z%oZixHYNRnSzlmkf4w( zR&A|KG&(hiEk@c6HPiFChLPCL;SY#*{X?m zk;>n%$z(KMTmcgAHR#+{UPcYJ^ljx^_-01%1TrWgkQgxaWjc0HlL0DQ8q?e#4O+I4 z?vn%0AugVQIEm^wa0#eR(@h){@XvzU1<@05(32h%y^bpfn`aA$SQ{Ri=ANw2oLWz&sbwQ(U&bEw)*<=v60_tnQ;?4=a??3hkM)Q&RQ$rX1T{P zq#>Y4%emw-P;Fa|Doagf8A2>(4vl?gEOp)jrEhkDVw6=tsMP@F25V15AMFX9)dsq` zVQ;l_i(%e+vbm;_2SiKIB`S){(%ce=SR*dU<}h;upSj9ihY46Mxl}XAGLY0=={-FA zQ)b*n#u>=%5vSv5JL9ja={d`0L(OQbb3f&dwj<0pJKFHPqLZAx`INfCNG{eh&MtcW zt$GM_Yf}|GEh;D^;r07ON9&Bc0AerH*VIiE)BrJ8cr79 zclh-~p5WKQ48NXzgV@B%{4120g$|?nTjB4YJ-wvExQ^ZCZ97$kaVWuLekO?%M-L|JdYZrbZ&d==R1B{=kCy9G8+_s1TBK3~b}>Iy z!`Q%)9Pe`oD-)ad`fT2*Ht+s6Z-tw8{lS^MW3qYgHrNb!Ljx^Y_1?P{?vv!z{At#) z$(ldhrAf^>5`g-e{pY6t2}cvsckPtEG@I2t!ItO$dRu$uGw%H?sJ0BGOnUp#V4%<)abXbNr@NwUlh#J(zZ9$d8D#g}h+VKg3E6;a9ulsV!{}1%OxvTQ=obvwz zy-(>>K9^V3a`>l-^=sxc@8d7cc{lvi=JC&t=elUm^*Q(?abCzHXg>eF&{g?MbILdV zborTGmG@=KFI9W?%6%Ul@&hOt`#4IwI-|SNG}y(b^DC0jz8_6zvpIG3S>E1dRGtSgDVEDRF*-j!JT7`7@xTiJ55^zB&{5W64@~Mkx8K z?o3-pvke(syrEk#_M*?3aF2=(=!rv%SryzYpvj{4E&LGF;Me{IC*eO=^A)SB2Cb|X zXa_1?j{fCefKmHKlfCf9K=sD(@!A`y_AdzFg13r=wtvB5oCT+@sUP&gg!oC_{0mmF zB$zXJ#f19!MXbIrDx47So$kXTvI!ve;%}xaDt5lU+bFthw@J@S4)_hVKOH4cThT>4 zM)@xgA26E7losoD9ok6k{t0z));tz?xAbGo+g0QWBnz((#ETB(G5vwZWMj-}8tZjK zFq^#o&@!_bSEUAGW1s_n?zCe7;v^0MgQS$P2tqsWVzw<@ z&H6PM|IXc*DR#c3N!CaZ-bJhHP`m{v#HAtsV;@PWxqyOh@&}kobX>K~+50Y2CjF{G z*|p!`9g`e5ZWE1?ow|D6ls=*4{pyrZ^=DJ|5oi!`7w%_+M~1$kzVZSX zqcA3a&|&KNW7YG4_!|F$*XZZD!RoidXUN=Ay*lz{y>G>YIB?lJ2?huKH@NtVZg9cR zjUFEyv^Eq!ViwyN!Nra~H*7PlNY>1uzucCr!^j57wB9kW0^QQ@_!sn`U?T-5P$vD7 z*Rk$zF1;$r^uM6jpg(k`R}n}!wW>Z*^uf3PzoFNepwf|Ezoa`|dOb$ZcTKN>6zq~- z8A0ySFHpUMsKME8Uq!+TTU`3et1*5hqR_ zqiwVhcmeu8)FJ&+Fa2Jd9`BGI^wOg?{n8HUgS_-{Ha*xOy_c6>YtsjHNKfHYZtyF! z>AgCnukg|fY3!B84f~Q$- zVdbhX5AchHifKE5XBP)0d14xBSCaB5iEBx*VziGM62ms)3m*s?OdDgf)5f6)xryw| zW#qHgv}Lj-FBFXQD*Lm-IpcTkdainANw_y_0_+7n+s9#g8Br#=d#L8=-C`q#`PxQQ zG7l8AjyT0fJha%%%5LGkbKu9%EQ_B~*+xb(EuG7A-hd7m_&NFi7w{vi1jWw3fqeMV zhckN(n@5(EM$iv7htG!KcfST5lT~k^TG0Van_ut72bwZ$b1nJy2vnz$JemaZ=@X$F zrWZTAqgLUPm&xJTamlYUm0qLpj)=cfH-^Ybb!RlhUliZLA#yA#X+UqA7D!A6-gU*! zQa05LJihuvLoX{1hpiM<+?+&rVi9W#M44w4JAcT@{5F}(!e{ZfB79nGc&#rseVDI4 zeiaHLUoL1HUS9~MGq5~7aelb1C4Fmv*_Gb~p8gY?evB_Xi{i(mqnhXHbV+pc6!F#Y zA?mW!1nD{%|Ek#Z%5IT+T8E$FOW!U*BzrIhUZy>nguTdv^#pe_8``GT{wDcHhG?}K z6pFVyttfo*Z~Q&q**{>?z_>EG*!idZew$9|;5x^>m?~HuDf2h|+dfqg7K*3XjQYa* z_!UF^O;7L^lv#LiOG6xe;cKRuP@Q4pQ2ZM#iDpN=B08h7fcQgXj?M@bv^CVr7E5=S z?zX2*uxzs4s@@VA8X6iYj(i-O0cqZ+mSfUuO%3(&R0brz5|#Od+t-_YzUt$lZ)zAs zGx|zhY`ZUVS6hRjWqR&hYF78e8tAtf!{qBy;cuQwZalua`I|52skPx0UrOQXEPaFV zHP+>(F79%-QNd<%0^n63B}VpV$ zgr7Su`-OZcTHnCZrSrNyt$_=P>ii8DZkFOQ)3XlY5v=~Nf7WdzS?^|rZ8jI*o2_*Z zXk?gaU&yzWHNd=X_3^J=KrWEzj8h6CrvtSFa8rYMWy34-kV!wEWUk8R(UH%@Dx9L;xtT!&Jo9Z^dSQ`FqAmNIQ_Y#oNa3hkYkz1%ouap=X zFbp5m!7JRbYroo6-C!-G7#9i>wI!S=W99SxYX>a;epUSe>ti4EjIAHnYjFfoJiIhe zePU_&5+PfULXU%>*}IHV5`nha--H^*Cd(1d%ciRPsL@C~ZG~>@t2~UXX)u@spTC)% ztagiHfR1mzfY8T!cIs`^A6iIEyf?Z;hLs;KlZScqo)$5c@@lb^vWoWEG$;iJ$R zmM+-lLX}B5bt+KJ_yB1db`FT=9{S`p))|8hfoODJE`$SaFk$rDrU>vP5n?OWE`p!Qu+9M-BZ zC!^$9qM@v{!8Z8lV0CMF*H{A$M#QAwPeL5gltV=FZ4)nh9tZD?Adu##?HEuXCxM+Vik zj%@S=hhBiUUo(Kgy&~I4Z+^$eZVkr}MmBQOhkyR0(Cqs19*KtjEV<7Lp&yk=?qQrL zLMwXBS}GR*P`r7GFWe*0eV0H2r@0xW6kHkm5Wl+}$nSbgDErnel;{g3F9FA%_5KEF zRlc{`U-HifKJ}g2$e*)9y_T!Kx?XREsxL0**xGZHsz;?lxL(foWt`1TW{8(lH}Kt+ z_t#Y-a3wvZT9q)wFjzG6DhFp&1rQ%eIEvQ-9-6`KyMks=2ZnrKgJoU(ZHpb4$s;dZ zR1YAU_c{1uJt=8lWl%Fn7KgIfO+BzKdL2S{pQhLS=7;ug$_IrDHuh7u(@X!I4hunz7a$l4pRole=$}f}z`vs*;1x;b(S*?Hm z7g93hRTZipU*>OO)vYI#j14@+rc24V3MFF{+)@YEH71F)t%=sRQR+J6@yFJZCP&qR z^kJGWrmOq^sr;K8)b;oCSU(#l(O|5#BvgG&xqp^JeZj<;VutweTwdrI?B0hp0L*Z_ zVFa$vc%R6GA>Rj~#Qp?^sOR1t>@;=lqbs7*Pr*U_&p`6eAKlpWdiY*crs<)Ergr?- z>1fpsB7=oUd`mWhk_wWY+mdOgFQENBzA+yrLhmJa#0s|P%d7f9gVm^hkPHJIk_@a&2{dx zbbWjsK5(0tp!qB{1efdT(|Y>Q7d}6wJ!DsZm6Gdk8c2zt&S!D_fSU7bJpu64OO;Ya z632!Tr&LCYTpj0q+<;5!+F!FZzqOq`K`0oWmwJgu`tAP()ayK`Po%;ID)UcWwB2Vy z_QqBf7+2x=@Oppq7?M5JJrsxEnuqHPy0yQ#nkP+2sY)K^s@;*^maF#X!~Y9B*RYFV zTL0J7N*+!Bvuuz?oZ_Oic7tz9PU7xjA|(~Uj#p;(yIz#6w)U3uIBA_TnvH$_x1{H; zx2ts;TRTXTA7H(mUEi(R+4?c^{m$2S+P}hN9NrDPWpMEpv|G-FCwiaUy+0mFC(P?+ z2u)9hFe_H!0?DC|ULE#gK^m-3;YI zacs+9IWyrgtLTPjHZtRBokBmI(8xJaP8G4h@-$v2qw$UjB~D}}xU4i-?f9EMrjdc6 zM{H*m?{Ct1n-J1RopmI2y-Jm$k*RAp>I%tlE}XugSY7NBM4Z-k_Ax7|!RcxoPFuFy2@e6!+RU)(vB|ZdjJOl@G2Qx9y^8 zf0J>GPY#%~$VF3?oM$UBrOu@k{{92*@-jf~`q}+CqF;|Y*wN%<$}@LDF;r;DvWBv^ zn9L~^qG#E$8gPMEQlPsDwCe}710spPF=`ou8&|$q%bEg(XDn`G)icJdCW~8{EN*hY zk;QE+z)TX?!eM{27Png1>u-|Lv0>$KXEm6J$0|*4D-@EmV}`Y=Wksg=YBZZG0}caM zGqk?PNOdF!O<7N!N+n{M<8bk>GvnJznmq`E2CW07kv=>@=YUzCX)(yECrT-8oAJ7I zuDjOq%v0iww&BmVSdK_Y zflzs=PN4>?;T6m_%%JSUd`f+WeyAy-_?q@nl@MSUO*j>eFnplGdh$UANpIAljERxQ z;bO+l)Ld)0+=EQDED>Ji^%!4dIX^>uk;U@ODoa0a)+N`L*ZLxl+4Olf{XV5vr2nLJ zuxSwYep+V)o@XS*<_{N@69PZ(+ADOROM_6_EFyr}q)8Jo$%X{8$Z5d~Q{@h;nBeSCyxT!=hcr}U*JUEk@TbX%4c%u}HH z%;IqGtUWX~6C>2d_$kM@Js9G~00N{0nELA6RSpv$$}MU-YlmkfO<3-%XzLQv z*4(kSsB`p3%KKt5Vfakh7bz?J!FQ1WI~TbXA00?eL&03EjZqxp_!o$MwuQKa)AY=Q zIOvC1D!Mq;{oBC9Y<3x#&KQs~Ri<7Bj90nfCq{1c8eJwF?Lc9tM6aNBwr z=^03r1d=D;%hDyXC(2GFKtNK}Lol2&bF8M8nRxgrv-4g*sI{TFg*7-HjsC#*@vBxT zj%QDfx-6$uY@$51hi_DAN;;4JDMLIt6ihyXS zG}^iQFVsSZ9~?-O2Abau4-6!yk8{{sX=llYfF53aPR{vPOhfY7VRpbP;JbvUx_CbFJFghN@r@-0ru(<`{ldX=o@tAm7NS z&+Eps$>Y4ahwVnCA0};=Jd>h6`YSPVuA0-WeHHW7d8z3j!F+7CT+qKf_Iup2-%43h z2(U1-EcFCmMgGy+r9&H+Q&IFwQpZ+IL$57F2PkR-cT7nLdpO# zrb%sLVB>r-ZJblMFVD7}9nu>rAWnc-or3{4%onZ-B_D`VlE@IB0m;Pm$=aJ3zEl#q z2fI=Qh0>Y(dr*ZEmvFxsK+-Q}2f!FLkojuny@BMF_d1*lfu$hJo8&W%{qb(vW`#ll zW>9DPfV}ZhcZRgmVka!dg{&vITyiYD$ei(+lfVN9A{?scNj&41Ana)fio$2aho>Hx zUmnbwUo-<{Unk5BB@RljaYcd&jzoV>H@@xo(X9iXaiPWfvP^_`z7U@$hR#%Ka~`-MhIICHdW&-^4R(I5Kg|e3Q?53;qdolVINZOXIF#_m*87L`@!$9!6`<3+ zdLM4_Il;LY2f>6dm^`gG{&nnw$&tO)qs6sNn}5qWd$gZD4L)XGjs8&LZ@yh%$nr_Z zw_N5*#*Y`T6z32D>Xv@W)t3$2!t~!={U_*OWYcfA>A~@OElkgH(?i^9ykc<6TvW*2 z6q}$izN#*^{<`2B>$%>yXQ)>TI`Fg)nNw^1O|o4RU-AV1jXIhdY-{0fJ2~TDlaF@z zn{j4S-{?qlPKBj11e|YONvp8dm51my$VBE8lvrMaCR+8O+ zSHy076)|wXUlWXPqA!B+m7(M@$5ZK6?3n)8Cw|riSKvpS!3QF2mcD{DaStq1=q#%s zL%@Hcn9B6!)OTxVMbzt$Q2pU|*6+C7T*0DA9aW-f8l-lf&2d*5;jV&&To$#L+dshTmi{O9Yv1PdgGPnZ3aBEp z0*Vkv?8PD3bq@kh!yb7@EQv*$hMd9F8H&F$xZOFFC+CJKV%fU0r2o2BfRId$ta;_C zUW``TA7{J61MxoPv(G5sEsw%1gyS1ITUi`i?+di`G&Qx2WGa_b9a*N|wviQ@Mrc)q zzv(?Nh<&=Nzxi<<5O~fph&8o%6#R_7tgp1Em?Wmr5~q1jlMR<~=OuCmV;c|`IyMqt z798}V=rt-}Zp6U(+&{n4X_ojzsjXGbfW&Ex9Wawi2OBPzy*R9_1@!yw!J;ly-){!#>P39-|nnLpqM8@Ejo*M;Ujyr@ukiayqKc_syY~7Y{%{YRPR`# zwOzHFfBv;}T;iB=XBbd~Z^-v;sQRO+(Z=Q#;e#Md|4?^EjfPHHY<&+}iT7B0WHh?F zzv<7iN|2|Rdv>kQW4>YFC8e|0&f&KB#(CYsYhvsB#J2Pyt{0=0?w9VJUZTnxlNZ;} zn9Fh4Ox6r0z++?lllaou`XVYGqAiBqV;>c>ro^4y{P^|HsP5exisr|_^C1ekuK-CncY4(?Q$GL=ZDL8m;I>ezu?FC z?T=*U{5gneQ98cx#*lAI`}&iTx~o-X9NjQAj$7w2Lso(H&a^^{)-7_o>7vH?22U$z{2*<{F^0xsAKDj5v(CAKc;iys7kV>suo~=B8gB8ms2Kw7!%`u50y)$eD=%RN>@8U3j1X`RG)RRI2Tx}aS}t_LzqXXLImZPO zSI^Td{gmd3$C=>SWDCV#CD?$l(1lgFXH=k9VfhmA!K4hKN1wrBq4+^8An3zvkIX=} zW#%V#!0t`oX1d<(_fR}g%0M-pce>Fc8M&}UC|;U!s7x5DRH~n89f_g%Ct{m;MQXVw zg5O>aH&8d27!E=zOf*asir1ETkhdU5G!bjRpIOvQzk$3dVTdN(72F*qW7cgg#f$yT z?JyMjF6W|n2tJ=(D80c z)d$W=bSERAqKEA}cUkE)D-hIYa28D>E%gM?X_t%a!6*{^y2@$7_h=@ zsltuhdOoE4p-M$;OOMD)pb=SSj!>5J7tdqWq#H8J{CkATOX?8wZ*ndyRKC`n4s+6X zy7RBuJ4nyT^j{YK4O#dE)nuTU(GnbAU;gH|b^b|bl&=lMF(-k7ITKcHceZ~?44CTgB9l?ME-vqh%KzivVTJhH^|FV3#cVL0cgfjG zdqo`kD2;IpA3q3IrPG&73^ZyuDos1TclvU~(%JGgEdb9|Nv$`QIga~2ipfgf7xJAY z&`m*NILMOBaMxm$kgaJm4g;ISxmvt(VfRs-aNv{IGR@X{rQzzxj0j) z(fJL3)W+afyfi`i#^kIhc%~n9&LqhmDm$EN(fr4}q`WAAx~OprQXRsU1c=jj6NSz6 zvdkGwp488&D_7yo(|5ZVaP8rfu#!5uarkHOMg0eGmO-mDTBAvW5|Y;E5~>UjOG-^kV5s{j+|lN zR9?w9POgP90#IvNAW?MTLh&9)N(UrIZ)-Qh5jQmQ6kJ9^@{i@_h%w0a`4u+fvoNg< z8i9T1k#Z4_{$mPt#uyKvubdj}Vw!JFxGRb(i?pX9p~S&IEGQ_LOXBQbn&}Lozr&e9 z{`fNL>W?nURNwr2(!@YX3O*wTeXU=UPnOF^Gms2Hjgxj)6_LH{aOsXhB3w-RP?W!^ zFzpc=u?0}@1v-b`M5}jR1VO5K6g`z@A7$JX7;j}E&3670CKnmP8BT}00x3CQpEI`6 z)gr|Am(mKfp;0y{@^+Qbd#<)XZC(3YiXPm& zjMkBBe+y>sTrg9P@@l;v7~+u>a#0ho_AMy%IhLDeivU17YUsYd&L^AczUta-?dg5U z(xxm@=E-7!)3+a;pVznL0MGKpP|Fwo=FcgFqH+C;VjVZ$bKM2!8DGXHNE4<2`?_vh zdrHoa61LL<$>++Wyol13FMwscGY0#BI*7f45nSQuRDn?rGJiQW51BhAs_=7`(dcXe zV-j{I@f%;-8O^4%sUuwn-2bxJ$bozQNqKNj0Is1!7a$bk%{lUtTwPFlci#A!2Gi;E zKmM;BM$u%;na+8;n2tQtqM^~W`%_+b`16r@g3+Vy+?$c7A#xI&uAwneK?KS?{Zr#( zv>hLYlI-j_#n@6PseRi<3Y+orF{Txbmzid~glA^QN?7p$5RuLShHqrNLY@W~C}sFp zlE^5jnA($#+n0_shgk0d6f_(P@@GD2ke9MWTUbSCWN_t2VC5OgI&Klg@JJZmHks$gc0pk zjT16i9v_>>IYrc%$298j$S5;3#1(l%!+_c+18O32!yEI|`3yW}>g)tB{J8VzMH)i{ zr$h#e8ht^~#qA}=n;qB9!}9=M3P7@ci0C9MuHlLZ;R-wFyJYPD0g1at-rw{U;8Vv*!Y29-G$5-1G}q($_+n~( z{^FgNe@x}IzYI$M{g>*)-w9z}v{$vHH(GFVz(0PWGvj^d!l|KT(J3doNY*{ARU+`u z13sR!&RhICkMl>ue>hmTX1let!T z1$J=SBR%7~upphK+!u&z>ayv_cD?*>7wTe^Qu zQU?qU1KKxb#ncp-lEMUU{M&S+UF)l|x^iOgaj=t`&%;5ciJOMyt>AH6Zy5@-? zag4tyPL|Xk_ygoCgcel(9==WQ0kmsujbq&JKS#B4rJL${Z(IjKDTv{r>64ukZ8_6* zzmR9 zy^qpH#AScFa`3ynzG_~>YQb}lcrCaBCibQm1`FB7UO*ZL05F7!+(u-ry!K2IBPdI1J%1HkFH2#Wu?%`hy{QdIr_h<3n_OOS4xhKS} zHbAL6&;MzxW}l<(Z-PH1-#IMO;yeY1xWnMD8OTqdIvdJZ!CnVg7FD?0wS;#uGKQ7P zIqmCnw`1oT!$@g-Q$7#Lyw4?Em&>PFb=D(DCkH!Z<>Mn;9?fTIr2)@j-LC74zE zP%(tIhn)jM5u%ZvP#NFerOwm8gvefd#z0`W*dRbf|Am8S*CAB&5^HEXyboeN^x8!o zjb=?UUe(0c$$a(=eRp1J3OG)fD{f<Tz^9bM&a79N5%Lep`+BciTf=YpxrX zt%5N{wmVgetH972fEUxE)P0o3-_e5?ty%jt_oGLCHmB${9zeeyxPSBDR*k69oXben zitQ~xrcoej<{ZPB{+{3|Gcb|tZ!fxy!mUL&2^K@`IcDpH52${&e~PB^6;mD^JH6zY zd9a(@;~t{FSoZq+#lGPSKaTxbYk!v8AFC#;8@$=+g{FmV&9)+EYt!xv*6gO5BR%I7 z?ZU&-QQ-LL(cn0KYz~eaf!12&@Rz{jY;k7CWG=r%Yg&~fa`>F0MLa-}5m4k-@HTu? z6=d*3k$B3Ic`?Jt&}6FW`xd1oLzAefZbj1y7y%QlIlv%!#GInNEs`7^Df;3;56M>>4U+Xnz|wR3cv!At zG&_yCSiZABRC%4h!t!a058HI0`Mg20C47G7BaDOvckmaS^=OY!CEXj=`hFcw3cTsU zP&%u9)Wm>{lLZ|pF0DNZpXEjr-Ga6u-{#id?Enmyu4*gS+ z?H?u)uYW4P!K&%?P?mR$J~?{SH&V!qz=_s03uNTTIYl#h06|DobXo?%EhAk7zwwt8 zGS@jX&vsEfS|~m~CI`hDgJPA7;=(L4U7={;;dJN@lbrLslJC))FIhzUB0rx~^cfGJ z#qqnMXEaKs?o^%1n1*gJ%a>#IP0c>1eOxU=>7GjPD$T{~0U)#%{Yd~}ng!Y1c7HBc zzD!dl$(P^oK&_H5^_g0a(^ABfFA*v>ef2X{_~02iggC}jc(_~PbGAa{2z)nqMKCtA zxM1OBj3|G@;?5#M^4qkZb+|8bDDCF~OzPDBzj^KdNJ|Ra{s!ogR-qfw+IwedEQb*f)1m) z71{7&jsjGWt^b!Qr20qm;MRXgrvA5vX6qm8)!$e3&pADZ9($Vl{kiogYPEW24_On& zdAVofG+r$~ywCF5@WUD&Tz>e+U%j?JezM08?~id?{(ID1rzE%LKT)!^=yn0h<%hAf zJ69pH<6%d%<}&4s95<)vVjigS49I;C_*>++%;MIb5~?R7ivpilXxr zx(w1h!Y03VB-z#+t391@f%vP=pu!I7@8n1&7*1P?U1iz5LIb->)wlbqlb1m8A4Ye~ zA715LJW4vUpr7jHR~-o?EB^x3dx!T_EQDSCoc1a? zsl?we!2h(1z2?{we6wGg@-9(;&Q-}|&3S@*r-5r$8V4KvB$V&N1OWNIV1bWnkUE1r z=g@+p?wK6p4>37jHaVQ5$w2Zpqha>lt{<+$zG1v*d`p(b(VCAR6CKFY@we-C_8=4w!3$~=BV4_~P(prBjz&Hti{{GAz!`M9=qu+SlWE}Lg0%S4qM+~n zP`v68^Y$@s?}g$Am+__}4%A59a(QWt9~q3FZzxX>l)_YHl-ek*E?M(63w!5q)0!|L z$%~%8htEc^S6NB28a>bmcJF3VwD8os8rNEBd#BBb*`FwyktzFMOaHq~S?5u5L!_D1 z2$8PiM?{Ko6gD@cg?4PRv}5Y=k{$v*iGLRzfA1c_LezbqlYx?0Jnk zANIV6U#EsYSz*ajhxY&G>W=M??`hj_erdmbFLQs#y1$q(Oum5oJI?)OuI0PiewzhR zVX`6~&2OdqJJHQ*+HaGFxJk9{?fkS;iwbC5K}5a@@I)wab+NG* zX(62W{YJyfCu0%C)Y|I&fK^X^e*{_Jw18POt1wIH=(83~_xkuI4k5}u5J=p(MaAPI zYW*cQF67Z)a?5h_bH`fq^A|@y{my5b@ys%Rzf;gYdSpi(W2Gc@?1&5<=G^F`vTV9% z4>pstyM)QE(wQI2Xiugs5@K@Tbv`-CXsYa=Bk$#n#;KVB_v}G`$eCyd9q|S^0*9T8 z3r~E%$9)}Ufjyn?K&j*85J`D6j|{+;Ev&pY2J2PyhypI;DvM3!%lVp*q8yNy$`>Yp zEt{@Jj<6G}mwWX>@b!7-W6k;@NG-O_l}FALm9|j_%Azs3AiTHP2Q;0AkE3&XmGpRj z^Ec2B>H2#ll(VZT;cuR!Z;7HwMknh#nIE~DR4<>hSNhPbE++lb=!IyI#5EXOeD&A- zEt#6;&>b2|O|3vq58CNRI)%}CuHhk=n2AjdcSNFeY8(6SxCu!OYSwwl#>9l;XXL+u za}MUSUycM9qRmUh>7VrEVrk6mJjosVgHV$A`Xta*t}lDtS-_uIqW8wv= zT&>Cl0^LlmJEh&Md z4aXXwzeK8lJSmnq|M-q;HLt`7VHjXDiy!U2lz^nm1fRhKV;aa06GUA)1roQJIIw|) zyCK{CHg^5VAiYHVb7JTOFUF_z+vL3 z6R4#med;TRK3-iRokcTbdrs-?H>! zI-?MCqUi&9otEC0zs2c2_&YYuy>JZ`>E8UUOrtP2)P}}CSKc4^q3%P1T6@IhqS%0F zsV^s*NCWe1JqmB4f!UM&S{};uP{u=r9x8ZXLX{&=qM=p~0u#_fEe{MRJg0aVtA|lM zjMKwd9uQa>IEjYIdYH(=G(AkN^OrQ=390>q>=4)cZ1+9NZ}hqHclhCyMpE3BG>IxX zEs*>p2VN1oU&ycAwO_@C7K9J*&u=n?P(~w1!U6<}LQQ2C$T2YDFDlAbAhx|Mki5b- zWo7DBpt$?B$%c)wEnoU?ke^Ck$Ko< zgT9ywt1s2y{IoGj8|$Wx%}=XVnu5@%Ty1{ZK&6$rX{GsTy45RH=%xko(>`J7rwpe9 z6Kg`Y`TiSpL0C$1PW6q;PkU5pkGp9V`DwpX8ZyU}o1C8(AuY}QbTCtX!nsPgNC~6z z6SSY54l1EIKjCLeFg%AhYcAgMhfeRQ5@q=bx&}CqfUyY!DUr*10uJYal^h(-1Cux$ z&I8gL4(FkQ2Q+8nq1I5@7@vV|Ear~Gf{t<*%@_mC~oV#+7I3w0zMoc^Ns}xED#N<}6;G)y_tq^j6;R zo6eNu>50U^`)}5dRQH*?o0(#^RR$yM(1IK5|A8*Si zQFf7JRkr*Uro2@D^b1V!SK?_vRej8ox9{)Dx!vJW+Tv`g1#+_HKL?xSFU%UN??C3G zXHW+UoYf>-eTdD@jo8Z))v>nrSbL%WMk9tf5S|<`@Nx=|ANEYZx2FBGKq5#N7W!mE z`Yr3!C7ftJCpGf`1aDojCv?99HmGj7Oa(om8*4#C8QYUr&KXHD4(Oe;G#G1{Y!Z1u zHrX%8FZ+P_vkwS6`+!6<4^GtkfE9*Vt(GVh_*dq6T$if-F*lj#a* z*@n7K>!0bZpT19&nQrS#U(_v}Nc))di=m!$_>Ct(%9r^=`A0J4Z?xrCTR)?jERE^* z`Dg@o#g={{Z+((Ce_MZ|l{$Ta{Krm7*iYkDbS*=SDRLfiFUxu;pw_|YTs+B>1RpT- zwKJy%p6Vy!yChRvo!y+l*k%z zStmad81(U70KMh79MEwC`g#l6-xQ)Ss5d6`Bh>rgx5CCk!0ar{WXy$zV!-m>@a^ts zVuQ%-pZ{+3#{>May1gmF2qwH13S=Qd1J#5E;XUaqi`ML~8vM;DR0VU2`tu?^_5D2o zg4Qe`5)j4y$HrDfYrbVU?u+a`r)UEYSW=Yr-(*c6Miah(QJkvWCbW{Pwde&Eax>2& zGp)FBj@OFKSmN0cr30R&yWA$|QlOMvnSpwTpsuUt*`xVn19hUHs{W%b$YU}fKj~=* z1IUS4kpJBckh@!u^#XZQF35ofhm`D zzin=^-hrUQx@es}J|0GvX0qfiSL#LB!$QgF9O==qirDs_`){1+SA&aonW%~uBRA$5 zWi47WgJn8BG^a#8G=dl(>7f8i6x(swy$)mXjV-&DL8AdJ2AKQI8s%$_WVyRer& zCT8mOhP9WrLB?q<29d!ngIBovhk^H_g|erv`i-v!5@$EYZw~a@h@q8}XYU5uTAAzH zInptB#pWfo9F}~=ZIBpmi3rncxt6lUf$^6f9`HZr=*B8BU%WIvu`pWm*E>zqj-kWn z6y3p#ns(s;nik}fZCVp-DDxNXs}SC%eP7U(J8$2s>gX=o7l;Y3tAU@zBU&@iBC(*K zkl4*d;=TSLv64@@^qnWTrv58_vi?(dUO)Qd!+;j0b2-AcFJYnn-_RQZTaxEd*$#Ja zq_;89p*d3Eav!7kh&v3?x107J$cu=+L1S(=_&K+|3)O?By)$^)IlV71J>8j~GE^R0 z60Mp42UG2`T}9@Hc~P|yRr?N~77YPI=gePFAp^bXOZ8D4YXizG9x!Yn#M`&# zKaSNpWZ&{=&AArx`3#^rMW?xt_Yvf)_~f5|8KF9i44Wv;v0H%Sfso-4Tk2nZRB9hy zg1#?>lKhubt$a0w5)&)@j{zXIw8$;@IhswRmS98A9@ar_S!qBgV zr=8Ppe^P_J-t06UgRyP!>qWGLFbfjkxjkA$kJ9zN3VQ5|P<%@+JJlAL+a9*SX}137 zdGgOMiLZ%$#`eh$9m2v*_l?k`4X+! zc$;bcHAWbH!iz}u0IZa{#T7<&|IW5wfNlGE`ib@{Ul-$5Ez6Zh6&ZM6K$D%rdzIA8 zeo&5lv*Z6nIx54NRu1ibn`zYq!$E#t)T*~>P-;4#@OfNe+LLx`HS%yt?8EEWg&O}l zm`fhENFFY+^6tE}|0YwRTcM4_4#T9>J34l4df3)Vl-6v# zr;6wB$&SS+9qr0Lwad`+3)=7#?Ns$y1FSxStm_`3lIKO7JPD1INwpB9H_%d6hTlpEJ{j^VHF*tGgn9 zArzbuM@h#!*FyZWA>wH+#C-(u-+b~f$RGbm6FdHGsi~Of82`L@;~y+i3BKB1n;$3* z;GsnyqI{w&YSU2k;GBK5=EL8awp?Xktl>p%xlaIQ^NH?j@$3PHeANzVK|QT5vP z^R#pR^($W&{Aa2m=dJEUqYz1G4bF8}kib9}B- zzMbOJFN;sEeAcLq*0kJWuvu+5N--5YuPSLS$?LO81H(4Ov>}^7|AQYcgEb1_o=-j?;3tFgp3_de>>NtO7*mayi z$FLJ-_}uy9N9KxK{v^Cx47{DmyS-GYn!!1Hl z(+3B*2#Fn2w|jjs*U*2VX{r1#X7IFgd@k5IKFF)RHSAit(s%E%Q2)QscXKA3nwt#$ zPd3dxiWkv;TQSWQ7aDDCf^F||_|Vcn$8onlbj1`dT1Ue_x(D zj<<}ywox6tQ+@3z0m69GxFV>dTyc9~SjDKLq~KA8E*UwjqcRcfOcM&szFi z!PCy^bE@)r{z$p{QV#wo)W#;0J7wfB?d-V0zY*N#6wTm8_>UC+7x4+h$V|eUIjt2` zAOs6_E~F|Z~C$Q1=zD7rEOI{JS`Bu zToAV!5OV~=ZN%vo%IFLZq5j~|Mt}kUOcQ{XLjmSd;WPsvC;%c^e_MJ$28W?Lz=0{L zAoU3321J!W%*X|?s{zr&1MwOo7b3iVwSGj!M|yDt?P`i{{hp%#r@bqIj;c!2ud-m+ zEI?XW+fo4&gphz>0-~v~RY3(JwuoyIvJpEZv68U$bQ=gkin0{#7Hy|n5tr$)7imT# zGKxuLYg)Q#V>3vP8d0$xC7Q-IA}*QlyUR;ep>xL5ea_4|^WHgC|NZX0|6Trfw|AHS z=0>x&-$@Dl9{(ct&64#2i{!n%AHl!K@?$c@$3=*I>2F|z=&ti2@W@p_`?|pI zjOO1W_#f2##gf0o;%_;H7lv<8c{zMIQK!kdHiZjKSIKl$Gu-D!N9EKb!p+g)_LbrG zvvOL3f?&2sL@~-F_o`^_K7u-ehr~_z-blFS(y3_m`uhQhvAKUaz^Yk=zR{?&{%)d#L1ILGG8Lx$_0LM{}Pi zxd&U^0lW{h%@q00Cij|X?m>dPkLLa}>qs#9aGlQgb8JD@N$$`82JRWr+^?-;zF)z= z$oB)1`$3C41-rP+*^>K3awkV~Zx-BlYwjB)_l*|!UyxceMRMOk?tPa;No}#<&ez-n zCHDx6dn-z=`CF7DltMST1JT^W1ot4#{RaI7li#e>`MxF@DZeW$q?SnTdC}be*~omq zhJTUoM^l4; z!$bS;Ct+p&r<16RGm$-0C`-ZlPs?fI=Wx1~PFG9OKWaJkN4sT?lD0w1HZT;;D5gY` zS6Wt+u;px*mtlM^MR<$lRP-G<&7o6=bjr1yu7GrRx#_e=I$dfxW%L4=46?)&f9f1A7DCWfg`dU={sWCclB4nMR1@ixy7>o9QAn@)?kbXPrCiW zvTs7hcVT78h@``QlV!hwvYJhMoLx^|Y1#j01okL(_K) z7M>fX-?|2+&e`-6FyIUb0K2{403qb>ijbMxzQ_@R5^G)gqoSA3^!n7gLWbEJVS@3! z8uF9z6tWN*2&-2utB+A85tcmallb+4oyWj%OEWv^_D&Sd;Vy)zd!b)7u)aKwf7X|# zg=omXKA*3=i(u--I(qUoOn}nGFgXox537^D0az^w^>4?rBvu!_Kx%XI1fK15&Z}H)mhMJWmT>1RDNAmi#xWqvCv42Jv&$0mNA#gBXW|xOWW0`-=QK z@|qzWCx3!w5`I$7#Y*$NXo16(3@mx}u(t_|#ml0#}rymLR|cLxsmg6D#7!+fw?>&`((Ug-7|bFYChJCwHqB$&?w zxP?QK`D>W@d6pOWL(h!TCSUd`=Q?gAg3|Kua#Q`xz+0F*ZhzlX-0kUo8WWzgu+9GU z=owFU*UMZ2I)EjK$W7EE%cvsPKtn7HNfDef%WPpJvi)KBZzF1sr&p|EEVBY{xe`t8FmQ zY)ehE7htm|_B@Jv5van+=b!@GRVG*6)lEhtKU)TNu~XqS3^fqYr;t0u_#-S}qfq?~ zNERydhm0sVa=*p!m}Cf|+JYfZG7LjYEK7vxlMD}741URQzl>poWO&i3OQAVjGL%^i zZplz63P%43N-vVM_c&wVPDb+tMHn18*0|>3gj7y$_b| z9rg6hQwS?8-wUPh+okU>kZff45j6eo9aZp+G3k#l6zK^605rS^f6@C{p;VA67#>XI z{Os>=aXFgKe)3>!0NY()?Fn5!n4kPzJ!M^#M^E+{+%DD|9>n+W4KQ)}bWh`ZSTBi) z0{Nz={XkW*s$Ff674+v#%hX~==WLqiq}IW%PCg_aVC%}1#)xM=HwA(e(9Fr?4` zjd%OJjUP1t4?r|hk=C=YSe9xXIR(W}| zE-E^XZg4*O6KB&r`s;mK*20%L!PlaiKJLAUDO^*oGZHlHq1DwD)2u5ex~R9>HQ|o7pVcsChBB6aRPQ zo(3xv5lXhvWcwM|yq^xzb^~d*fp)>%(>RJdEy4NYR&yQ#5}}+fbJn!XmqBTkH{obr zCs-1^7sY0U_sN*qDmbt6=>)_dlUM35Fo(c{dt)DO`|23CCc1r*3^_FJOZ0}2_{OOQ zZXk2M)Ys$qqc`xmH_)aN!ST6y2!0_ZZ|~SRrqk^i<>rWWyNGU_8(=}y@)hNAFcKxp zqyY)YDJMTvqaolp7-O{iMcTu#cm*M2w^zRm&6Oinv#6O$fOL( z{{!<>l9JMrC}w}Lc@*}7(H9zlBWEusDh{vNLKl>UfJ%;kiur3;IiE8k=b-E*q5#6J zCyoouyWx!cF0cX1d;kL-wcfezFV3bZNS-fL;J`XjD7OIzVxe1j4LY{&j`-bD!x^XZ&TeMDxeZOk z6rKjwO+-sKZmh!^9x}aE=$aMpptrdK!_S^-@Ib3MlNOC_Rz;;Eck?E;@3v47nqP?$x36h%>IE7VRakB7zTrlcw|G=C5nzLyh z{q?keRL=d9c>VrXN`is`e#>ciofIpR}4c z!eZkBM?)(#-k;;Vvlj%%JT~r2z_l}YC0}_cKQ+980& zzYYd#rVAPHICsr)n0rw*t>ohMMwNQ!+6u&HcEAUu`Kl;yC5TVTFHOdrsP-Gh^&dR* zOZjGD7FN1{vGMxj=;Zl`E6`VXk_^no1wAY2qQMChbSI=ujq_EV`tRrO+=eW zrQ2BHEM!R@r$9czg$iU53=#nMMZ(-mIph$028F@oPbK(3fyD%GDFE#p4)aw7mJ;ky zAcbIu0yr?@Fn_54PJuYghZS%U+^awa!8!oZPTv;uF5bW^i(D`3fLFIQ&b51Kouj)X zGmRzl(_y+{`~_Vvc0Pv}-Ip#XUHBl|kO%W7?Pnc8Ua(KhCnv{E&Zfuka(uYqXw7a8 zucCGWV*uB@1TR<&q3?ZPj3!rgh!OELe12o-krK8>#A<-sWHA&Jc#^|(*xW~EQYTzmPP%!6SD+}!Nm6(7^aCBE=5lC z0QXdp&ci{;ut+lOkPMT)U>+P`5Ch!ZmhVTT?^Nk~qx8K1TSeVFlIfcR+|8En+okUo zd{rYcOEC|C#BAwi*xbyF0q*v}A|2s}K*RL{j9g^#XG;E^n6L!@2pQXWe3-g-3}tM) z!lwpsOdPYHc7FouRRFhzwnHl!j^y^dUF@B0fg=u@LG|npG03_$1&Rh|1veQLFCey z(}dDmD8pl+ifRRfKE9qg3Z8$2ju0FGM>(Cos|sZ~dk|0H)#DEL-$tDPc|Bww&*2;!U1_yTy* zh;O_v(5b)mll$Mwh&iuy?>nMZIE4CyvNIii#btF!oOz4(Q;sirv>d;ZeulzNWV_0u zxE1>pw$hmpLu`F({2*4pb`I6qulooE${WDtfU8mr=Q;yixN>#?j)_k@*SYo~IF>d3 zXb4<>1Ec%JR0N7$eW(;!xQ(PA5Sj+=B|{y-i9?)22`0NmI#YIyhohJ;M8>yaR1)q$ z3y|4j_G6%WeC-L&ACMdTUfPj0XY${xE5W^iXE15_Az0_aHtb0}-y1Bz;mLOA+Si3; zzcf$i%7NS;*tj1S-rybh#=Ky=1M1(Q?Erdw#awz+_`PI6p%DpQ9Mkm#ZXGf|I4aE- zD06v(S9?PJFqCP94tS`wZn!c5aZkl)(G!}1?U?w~bvDn##Vj+hHwyro(|?cvlq8S` zxy}%$&yGQ@U^tPQ$s4-6aYsG;ree78M{doE^6~Zu?5E-892@Xt|GvJzH+Tz<=AqUJ8}7k-7FvVfJMocQ&r0W$?;TLTgNTHkx1&JQ(ULJ&l;0K&ynN$jNPV!ExBM_Yu1l z$QJrVj>A(i; zjNHYvS!lzZO+z78ytf$r8%S6R?jK}bLmn_NY*I82H7 zwOD$Tk|=Lqa4`vKMn!X@I8a*5PF~>sNU~6(;tQnu(jvIS1ydF^1F*RZ;#}&AY2zuF zb-YnGyeowg82Qm+5}w&QfB`02zLQuRN4c%T*ayRu6F^bALs~80Ka;*+lD=n4-|veV zL8Uw74U}bs9Nq&O-h#iF@{D^+qslN;56TNAJPlLPP6Q$JJQf!WUcO)f&Jso64V5>J zKf|kNKs?UoxoALgMFZl=*?1e;k6h7ycxG-~Ep+U8uz8aWNE;UMavHwut{Z`Y@dvJ2g#)G;S;-9`+Ilk~)UPy~!-NNcfqVSSIW@ zQ)n&@AH-^&a)pDmdtTbHDq{0M4hj|vR95&=Tw}-jdj%o7gn2_&%t7m!Jd4jT zc=j*i%j9yP`(3P8S^ZDAybpTF;po_(6h5dPs*?j%p&sfeW(wo#Wq!RAbrLFGZ+73p z*?64wz8q6PZ1iHv#}J_cE@~YNLL9I!cZkuA4-;y@%ypIR%gx{`naIMC!ex-zNjurl zQQ#@Ox^qfiXxzDI_LD?+d=wpUW0rw?4@1cdQD9L5d)jzhIL8y{;eWwf_6FypRh*4h z5#iuUrA!3TkD9n!&CT%R33*V=P!BhHk{W)O(C}FzzLeOxV-_h?m@Gzu-hHk3s_OQp zwdMtmsremmV=KC`sRkw?aR>;S1nN)XhK9VfL%2Hwn=m(dvpX?A%Lv5$L?uipe1j(j z{w!vsKN1tPr#W?Eo7LWeaRb2f-I0`cc37ZJJ@^1R&38i_-ejj z+{8c_w~yg&x(7YIUshq@c0$e<>06jGf0xs>2V2@(d%qkV@;Gcm7V{4xyuVrP;|nk$ zy((mC=;4I&8BhBO==5?n{hS_q(#yDZc^$ajh$wc6P< zlI?)Q^rrolmv6`Hex3;zjzKDD+w=v76xy`}X!08-}tx(|+ zJFXu%NUbs(lSYL+&oq3Nj)?`NW|OnI6<6vsVF4-J-65LVjVbO83x-CZf)GM>HWLlw z_E#bFnQ5Kw4ReNb6$`Z!jkMS|J_oCr-oPAJB-6cjPKF}_rT5?!0ihvLtdYTR%ydM* zF67VKxZ4qxsP&a7Q24G2=LN6uQ4xhVa569Lle|DRx`693tc)wqO&OTHL0yYcKLm~T zZ^oDWx}#|U&fvxW5S>AXSXt^{jv9^5;0++v8F0bp8O!%Q()V`hJ6rlb zD!x$J8Hn%U^_K4<>DzlqbToaX?<263D`*%XY-Ll6u45x;xRf9?y-lu@-hvm7#A+xWgy9Odo1;+h>8m4K`NHl83Z9{fD!g}RUJK$nLjuRAbSmM33N zU8D1R@UY153M2+xb6*wtU5&WhJGf9Ga{hD!`yF$6)O_CP=6bf<8ecC;5qdBoL#MYXn|P%$IH!3cQ*aeG+zr3hXCF z2WFT#0^dfAkqR~|3cQ4vUt*?9;2Vki6Auu0Au-17hG_^qmso5x?8FeOdvq@G0J?Vw zJe3%ql*qrp6N&MOjQk5cmiRp4Z316RjOifcU*KWH-yx=O7W@krtU0>4W9U1CGv1H@RL zHOx*lD0u(GF5(V>cM@Mjd`RH!#1|8{2>cZBCB)kV-by@*c#FUf5sxO`B=CL2V~8=h zLwp;FFD0%Kcr9@%aiPGgiPMPl1@;q6gi6;3Q;Bl~K0!Q_ zC16XymVhk*TLQKOYzf#Buq9wiz?Oh50b2sL1Z)Y|60jv;OTd_C16XymVhk*TLQKOYzf#BuqE(+Tmn=y@{fG%@cuq&+VZpI z_iZF#mzQrNt+V~w@;h55-^Rw4-?x$0*?#}O$?t3l*f#&05}=|oKDr!gx=a}~UJ-}a z#^L)ErlK=GR4B&>DyXVSbvRT!$ESrF+3;w8t|p18c#aPhvhl$+4}yO^HQM2!;x;}V z|12pL|!@l4PJ_I6gXF<;~cj7^%pPkB(18WB3M0dn&%;Q==F^`AVi| zztBBy7*xE+=X=z6hlh&e_@o#zBr1yIqr;~nIr&I;Dvsl$<)DH!GLFfRRj`In;Z7xw z#VX-YDJSGJSTRCln7~>NGW37ke`ki4P;8jXRr=N|x>?aD6>U~@ucC()J+5e%qP^ac z`95FKF^W!6bhe_46fIM9xuWY8-K^-7iZ&~{SJA_Y9#^zW(O&PW_!S+a=oCd~E4oP0 zGDVjwx?a)Eiax1mv!Z(yJ*?<)MY|O3^`44f(J_inQFOMVixe$Wbh)DI72T}plZrMg zx>wP|iXKhk$!O0PQPdvpV&Efie?nq>U-EIoNk;Q#CFYrnz&4EP^pptdH- zhGn!0pNbY~e<)GfQ_&%xams(8@((B;R%`mtkF&o?*%zvC&XwMchm?Iof()OEA^F^{ z!r!9&Q(-2bPw`{;4a%O1Hu*4q+HX_g|Jr`nFl_mr$yQ8{O(rvHF5R_mAw%c&M{@5q2#IC#SuC^arQ=f zMO8&zL6P5Iw8}_FsjATCy3&<(MtXTQ{)(%YEi0{(D7~f<27hT~ku;IrNMBN0YowQ~ zs;aH?8|kG>3(EXO%SsEDmH?Gk)u*e{TmV>6Rafe-f#;&SA_kN0uU5f^yX4+&5AV^mPQ^?VaWb!Zz3YU}Ek2nWGlTwPIBP*zz{BSS+F1^%L{lImp=msQq7R>EgZ zRk<|O!cT2=y&rjiR7w#6iURUg>MtrUEm&4vQV$UdmQugJ+F!7&w6+#5(yF4WtlF}v zs;;XjTV?TngMf_xUN**cBCo#E<)xLG>BZ48i#H-tBBB+L(pg?zQBqr1h|o<7XY0y_m8DVLR3p#jnS2n%iBi~bqFuQz>$xRUyt`R zJ*^xxzm8v1wr5P2=GWs(P4)N^Uc{rr(YOf)hNb!S_*qjue%5?C{aXH1fE&%P$IqJT z@iY0!6VK1~m2NuznymnMH1+6#yR<$_{)50wek$_i)AGw$I%Ju|`roAuie3vE9siLb zM24Yh(nSJVe_HO^;$;}3`8%>DzowPV+NY#C|Js)0dUEtsBrOR^GmdF2In?KaW;b$R zisskjdrf=33nWW?`u{DCUw;p18Ylj<kx}GpZ zKAKM=!&nJJbpBl`UQG|_ft_NA=l=-yx^1Et{T*dd298IUFU_yt!-a5*j=y7~%(tdF zWR7|?zqT3=Lo~nsKGxI|TYS&T|7`FxOf5enN5XgeTRF;g#+ Date: Thu, 13 Jan 2022 03:08:38 +0000 Subject: [PATCH 19/56] Remove penalty for attesting to unknown head (#2903) ## Issue Addressed - Resolves https://github.com/sigp/lighthouse/issues/2902 ## Proposed Changes As documented in https://github.com/sigp/lighthouse/issues/2902, there are some cases where we will score peers very harshly for sending attestations to an unknown head. This PR removes the penalty when an attestation for an unknown head is received, queued for block look-up, then popped from the queue without the head block being known. This prevents peers from being penalized for an unknown block when that peer was never actually asked for the block. Peer penalties should still be applied to the peers who *do* get the request for the block and fail to respond with a valid block. As such, peers who send us attestations to non-existent heads should eventually be booted. ## Additional Info - [ ] Need to confirm that a timeout for a bbroot request will incur a penalty. --- .../src/beacon_processor/worker/gossip_methods.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 2b6ac02b622..9ece18d02cf 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1532,12 +1532,9 @@ impl Worker { } } else { // We shouldn't make any further attempts to process this attestation. - // Downscore the peer. - self.gossip_penalize_peer( - peer_id, - PeerAction::LowToleranceError, - "attn_unknown_head", - ); + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. self.propagate_validation_result( message_id, peer_id, From e8887ffea052c831f1be5f7b9570b0a5ca4f7998 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 13 Jan 2022 22:39:58 +0000 Subject: [PATCH 20/56] Rust 1.58 lints (#2906) ## Issue Addressed Closes #2616 ## Proposed Changes * Fixes for new Rust 1.58.0 lints * Enable the `fn_to_numeric_cast_any` (#2616) --- Makefile | 1 + beacon_node/execution_layer/src/engine_api/http.rs | 3 +-- beacon_node/src/config.rs | 2 +- validator_client/src/initialized_validators.rs | 5 +---- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 494f325d269..a4b880b8065 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,7 @@ test-full: cargo-fmt test-release test-debug test-ef # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: cargo clippy --workspace --tests -- \ + -D clippy::fn_to_numeric_cast_any \ -D warnings \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 96a50ee2e01..c7c60a90062 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -233,8 +233,7 @@ mod test { if request_json != expected_json { panic!( "json mismatch!\n\nobserved: {}\n\nexpected: {}\n\n", - request_json.to_string(), - expected_json.to_string() + request_json, expected_json, ) } self diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ce2f65e70b4..f65e6471fdc 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -714,7 +714,7 @@ pub fn set_network_config( None } }) { - addr.push_str(&format!(":{}", enr_udp_port.to_string())); + addr.push_str(&format!(":{}", enr_udp_port)); } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 57585e2672f..72e651f7d18 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -282,10 +282,7 @@ pub fn load_pem_certificate>(pem_path: P) -> Result Result { - Url::parse(base_url)?.join(&format!( - "api/v1/eth2/sign/{}", - voting_public_key.to_string() - )) + Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`. From 6883e1bfb616a04f01b894efae9a7640098d9106 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 14 Jan 2022 00:38:04 +0000 Subject: [PATCH 21/56] Fix broken links in book (#2912) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Issue Addressed https://github.com/sigp/lighthouse/issues/2889 ## Additional Info I have checked that linkcheck has succeeded on the book built locally. 👌 ```shell $ cd book $ mdbook serve --open ... 2022-01-14 01:13:40 [INFO] (mdbook::book): Book building has started 2022-01-14 01:13:40 [INFO] (mdbook::book): Running the html backend $ linkcheck http://localhost:3000 Perfect. Checked 4495 links, 80 destination URLs (76 ignored). ``` Also I'll tackle running linkcheck on CI in another pull request. --- book/src/faq.md | 8 ++------ book/src/intro.md | 2 +- book/src/pi.md | 2 +- book/src/redundancy.md | 2 +- book/src/validator-inclusion.md | 2 +- 5 files changed, 6 insertions(+), 10 deletions(-) diff --git a/book/src/faq.md b/book/src/faq.md index ae43aec20e9..419f95dcbd3 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -55,14 +55,10 @@ voting period the validator might have to wait ~3.4 hours for next voting period. In times of very, very severe network issues, the network may even fail to vote in new Eth1 blocks, stopping all new validator deposits! -> Note: you can see the list of validators included in the beacon chain using -> our REST API: [/beacon/validators/all](./http/beacon.md#beaconvalidatorsall) - #### 2. Waiting for a validator to be activated If a validator has provided an invalid public key or signature, they will -_never_ be activated or even show up in -[/beacon/validators/all](./http/beacon.html#beaconvalidatorsall). +_never_ be activated. They will simply be forgotten by the beacon chain! But, if those parameters were correct, once the Eth1 delays have elapsed and the validator appears in the beacon chain, there's _another_ delay before the validator becomes "active" @@ -143,7 +139,7 @@ See [here](./slashing-protection.md#misplaced-slashing-database). If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) -If you are updating by rebuilding from source, see [here.](./installation-source.md#updating-lighthouse) +If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: diff --git a/book/src/intro.md b/book/src/intro.md index d3a95c86311..b31deeef884 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -20,7 +20,7 @@ You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). -- Utilize the whole stack by starting a [local testnet](./local-testnets.md). +- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. diff --git a/book/src/pi.md b/book/src/pi.md index 6bc274c9a36..24796d394e3 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation.md#dependencies-ubuntu). +Install the [Ubuntu Dependencies](installation-source.md#ubuntu). (I.e., run the `sudo apt install ...` command at that link). > Tips: diff --git a/book/src/redundancy.md b/book/src/redundancy.md index a50e3243748..b01a01dd268 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -86,7 +86,7 @@ now processing, validating, aggregating and forwarding *all* attestations, whereas previously it was likely only doing a fraction of this work. Without these flags, subscription to attestation subnets and aggregation of attestations is only performed for validators which [explicitly request -subscriptions](subscribe-api). +subscriptions][subscribe-api]. There are 64 subnets and each validator will result in a subscription to *at least* one subnet. So, using the two aforementioned flags will result in diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 72e2e379c72..67e17fecad9 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -98,7 +98,7 @@ curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H Returns a per-validator summary of how that validator performed during the current epoch. -The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of these +The [Global Votes](#global) endpoint is the summation of all of these individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". From db95255aebf429850a3785fc5ddc03f8410e3b9c Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 14 Jan 2022 04:07:17 +0000 Subject: [PATCH 22/56] Remove gitter from readme (#2914) We dont check gitter, so shouldn't refer users to it. --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 8c536752349..00900b8c3d7 100644 --- a/README.md +++ b/README.md @@ -66,8 +66,7 @@ of the Lighthouse book. ## Contact The best place for discussion is the [Lighthouse Discord -server](https://discord.gg/cyAszAh). Alternatively, you may use the -[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). +server](https://discord.gg/cyAszAh). Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. From 6f4102aab6a11aaf21e2f9df041343f7318fca85 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 14 Jan 2022 05:42:47 +0000 Subject: [PATCH 23/56] Network performance tuning (#2608) There is a pretty significant tradeoff between bandwidth and speed of gossipsub messages. We can reduce our bandwidth usage considerably at the cost of minimally delaying gossipsub messages. The impact of delaying messages has not been analyzed thoroughly yet, however this PR in conjunction with some gossipsub updates show considerable bandwidth reduction. This PR allows the user to set a CLI value (`network-load`) which is an integer in the range of 1 of 5 depending on their bandwidth appetite. 1 represents the least bandwidth but slowest message recieving and 5 represents the most bandwidth and fastest received message time. For low-bandwidth users it is likely to be more efficient to use a lower value. The default is set to 3, which currently represents a reduced bandwidth usage compared to previous version of this PR. The previous lighthouse versions are equivalent to setting the `network-load` CLI to 4. This PR is awaiting a few gossipsub updates before we can get it into lighthouse. --- Cargo.lock | 27 +++--- .../lighthouse_network/src/behaviour/mod.rs | 7 +- beacon_node/lighthouse_network/src/config.rs | 87 +++++++++++++++++-- .../lighthouse_network/src/discovery/mod.rs | 7 +- .../src/peer_manager/mod.rs | 2 +- beacon_node/lighthouse_network/src/service.rs | 3 +- beacon_node/src/cli.rs | 9 ++ beacon_node/src/config.rs | 7 ++ 8 files changed, 120 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec56aab4992..bc53b134fae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,7 +111,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "once_cell", "version_check", ] @@ -2200,9 +2200,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if", "libc", @@ -2945,7 +2945,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.3", + "getrandom 0.2.4", "instant", "lazy_static", "libp2p-core 0.31.0", @@ -4065,9 +4065,9 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" @@ -4740,7 +4740,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -4810,7 +4810,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "redox_syscall", ] @@ -4851,15 +4851,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c4e0a76dc12a116108933f6301b95e83634e0c47b0afbed6abbaa0601e99258" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -5614,9 +5615,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "snap" @@ -6633,7 +6634,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "serde", ] diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 32a87166b2f..61ba855f6a4 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -2,7 +2,7 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; use crate::config::gossipsub_config; -use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; +use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent}; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, @@ -52,6 +52,9 @@ use types::{ pub mod gossipsub_scoring_parameters; +/// The number of peers we target per subnet for discovery queries. +pub const TARGET_SUBNET_PEERS: usize = 2; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// Identifier of requests sent by a peer. @@ -227,7 +230,7 @@ impl Behaviour { max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(ctx.fork_context.clone()); + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); // If metrics are enabled for gossipsub build the configuration let gossipsub_metrics = ctx diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 789242e8d49..4cafcf62b1f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -20,8 +20,6 @@ use types::{ForkContext, ForkName}; const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M /// The maximum transmit size of gossip messages in bytes post-merge. const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh. -pub const MESH_N_LOW: usize = 6; /// The cache time is set to accommodate the circulation time of an attestation. /// @@ -116,6 +114,10 @@ pub struct Config { /// runtime. pub import_all_attestations: bool, + /// A setting specifying a range of values that tune the network parameters of lighthouse. The + /// lower the value the less bandwidth used, but the slower messages will be received. + pub network_load: u8, + /// Indicates if the user has set the network to be in private mode. Currently this /// prevents sending client identifying information over identify. pub private: bool, @@ -197,6 +199,7 @@ impl Default for Config { client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, + network_load: 3, private: false, subscribe_all_subnets: false, import_all_attestations: false, @@ -207,8 +210,72 @@ impl Default for Config { } } +/// Controls sizes of gossipsub meshes to tune a Lighthouse node's bandwidth/performance. +pub struct NetworkLoad { + pub name: &'static str, + pub mesh_n_low: usize, + pub outbound_min: usize, + pub mesh_n: usize, + pub mesh_n_high: usize, + pub gossip_lazy: usize, + pub history_gossip: usize, +} + +impl From for NetworkLoad { + fn from(load: u8) -> NetworkLoad { + match load { + 1 => NetworkLoad { + name: "Low", + mesh_n_low: 1, + outbound_min: 1, + mesh_n: 3, + mesh_n_high: 4, + gossip_lazy: 3, + history_gossip: 12, + }, + 2 => NetworkLoad { + name: "Low", + mesh_n_low: 2, + outbound_min: 2, + mesh_n: 4, + mesh_n_high: 8, + gossip_lazy: 3, + history_gossip: 12, + }, + 3 => NetworkLoad { + name: "Average", + mesh_n_low: 3, + outbound_min: 2, + mesh_n: 5, + mesh_n_high: 10, + gossip_lazy: 3, + history_gossip: 12, + }, + 4 => NetworkLoad { + name: "Average", + mesh_n_low: 4, + outbound_min: 3, + mesh_n: 8, + mesh_n_high: 12, + gossip_lazy: 3, + history_gossip: 12, + }, + // 5 and above + _ => NetworkLoad { + name: "High", + mesh_n_low: 5, + outbound_min: 3, + mesh_n: 10, + mesh_n_high: 15, + gossip_lazy: 5, + history_gossip: 12, + }, + } + } +} + /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing let fast_gossip_message_id = @@ -250,17 +317,21 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { )[..20], ) }; + + let load = NetworkLoad::from(network_load); + GossipsubConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(8) - .mesh_n_low(MESH_N_LOW) - .mesh_n_high(12) - .gossip_lazy(6) + .mesh_n(load.mesh_n) + .mesh_n_low(load.mesh_n_low) + .mesh_outbound_min(load.outbound_min) + .mesh_n_high(load.mesh_n_high) + .gossip_lazy(load.gossip_lazy) .fanout_ttl(Duration::from_secs(60)) .history_length(12) .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large - .history_gossip(3) + .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation .validation_mode(ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 33e8c2c1704..34c29a44d17 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,7 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::{config, metrics}; +use crate::behaviour::TARGET_SUBNET_PEERS; +use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ @@ -47,8 +48,6 @@ pub use subnet_predicate::subnet_predicate; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; -/// Target number of peers we'd like to have connected to a given long-lived subnet. -pub const TARGET_SUBNET_PEERS: usize = config::MESH_N_LOW; /// Target number of peers to search for given a grouped subnet query. const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6; /// Number of times to attempt a discovery request. @@ -692,7 +691,7 @@ impl Discovery { return false; } - let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; + let target_peers = TARGET_SUBNET_PEERS.saturating_sub(peers_on_subnet); trace!(self.log, "Discovery query started for subnet"; "subnet_query" => ?subnet_query, "connected_peers_on_subnet" => peers_on_subnet, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 318bdfcdf31..6b8f6fff608 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,6 +1,6 @@ //! Implementation of Lighthouse's peer management system. -use crate::discovery::TARGET_SUBNET_PEERS; +use crate::behaviour::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index cbb11cae4bb..0ccdd28fdff 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -1,6 +1,7 @@ use crate::behaviour::{ save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, }; +use crate::config::NetworkLoad; use crate::discovery::enr; use crate::multiaddr::Protocol; use crate::rpc::{ @@ -107,7 +108,7 @@ impl Service { &log, )); - info!(log, "Libp2p Service"; "peer_id" => %enr.peer_id()); + info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); let discovery_string = if config.disable_discovery { "None".into() } else { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 57de6c1b914..4c2960c9d6a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -104,6 +104,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") .takes_value(true), ) + .arg( + Arg::with_name("network-load") + .long("network-load") + .value_name("INTEGER") + .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") + .default_value("3") + .set(clap::ArgSettings::Hidden) + .takes_value(true), + ) .arg( Arg::with_name("disable-upnp") .long("disable-upnp") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f65e6471fdc..df5cf143704 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -626,6 +626,13 @@ pub fn set_network_config( config.discovery_port = port; } + if let Some(value) = cli_args.value_of("network-load") { + let network_load = value + .parse::() + .map_err(|_| format!("Invalid integer: {}", value))?; + config.network_load = network_load; + } + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; From 1c667ad3cae662cfccd596e5fb4ffd8be142fc10 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 14 Jan 2022 05:42:48 +0000 Subject: [PATCH 24/56] PeerDB Status unknown bug fix (#2907) ## Issue Addressed The PeerDB was getting out of sync with the number of disconnected peers compared to the actual count. As this value determines how many we store in our cache, over time the cache was depleting and we were removing peers immediately resulting in errors that manifest as unknown peers for some operations. The error occurs when dialing a peer fails, we were not correctly updating the peerdb counter because the increment to the counter was placed in the wrong order and was therefore not incrementing the count. This PR corrects this. --- .../src/peer_manager/peerdb.rs | 100 +++++++++++++----- 1 file changed, 71 insertions(+), 29 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index f70f35b689d..bd735c02eb3 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -609,28 +609,8 @@ impl PeerDB { /// A peer is being dialed. // VISIBILITY: Only the peer manager can adjust the connection state - // TODO: Remove the internal logic in favour of using the update_connection_state() function. - // This will be compatible once the ENR parameter is removed in the imminent behaviour tests PR. pub(super) fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option) { - let info = self.peers.entry(*peer_id).or_default(); - if let Some(enr) = enr { - info.set_enr(enr); - } - - if let Err(e) = info.set_dialing_peer() { - error!(self.log, "{}", e; "peer_id" => %peer_id); - } - - // If the peer was banned, remove the banned peer and addresses. - if info.is_banned() { - self.banned_peers_count - .remove_banned_peer(info.seen_ip_addresses()); - } - - // If the peer was disconnected, reduce the disconnected peer count. - if info.is_disconnected() { - self.disconnected_peers = self.disconnected_peers().count().saturating_sub(1); - } + self.update_connection_state(peer_id, NewConnectionState::Dialing { enr }); } /// Sets a peer as connected with an ingoing connection. @@ -686,7 +666,9 @@ impl PeerDB { // connection state for an unknown peer. if !matches!( new_state, - NewConnectionState::Connected { .. } | NewConnectionState::Disconnecting { .. } + NewConnectionState::Connected { .. } + | NewConnectionState::Disconnecting { .. } + | NewConnectionState::Dialing { .. } ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); @@ -708,7 +690,11 @@ impl PeerDB { // Handle all the possible state changes match (info.connection_status().clone(), new_state) { - /* Handle the transition to a connected state */ + /* CONNECTED + * + * + * Handles the transition to a connected state + */ ( current_state, NewConnectionState::Connected { @@ -765,7 +751,47 @@ impl PeerDB { } } - /* Handle the transition to the disconnected state */ + /* DIALING + * + * + * Handles the transition to a dialing state + */ + (old_state, NewConnectionState::Dialing { enr }) => { + match old_state { + PeerConnectionStatus::Banned { .. } => { + warn!(self.log, "Dialing a banned peer"; "peer_id" => %peer_id); + self.banned_peers_count + .remove_banned_peer(info.seen_ip_addresses()); + } + PeerConnectionStatus::Disconnected { .. } => { + self.disconnected_peers = self.disconnected_peers.saturating_sub(1); + } + PeerConnectionStatus::Connected { .. } => { + warn!(self.log, "Dialing an already connected peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Dialing { .. } => { + warn!(self.log, "Dialing an already dialing peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Disconnecting { .. } => { + warn!(self.log, "Dialing a disconnecting peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Unknown => {} // default behaviour + } + // Update the ENR if one is known. + if let Some(enr) = enr { + info.set_enr(enr); + } + + if let Err(e) = info.set_dialing_peer() { + error!(self.log, "{}", e; "peer_id" => %peer_id); + } + } + + /* DISCONNECTED + * + * + * Handle the transition to the disconnected state + */ (old_state, NewConnectionState::Disconnected) => { // Remove all subnets for disconnected peers. info.clear_subnets(); @@ -799,7 +825,11 @@ impl PeerDB { } } - /* Handle the transition to the disconnecting state */ + /* DISCONNECTING + * + * + * Handles the transition to a disconnecting state + */ (PeerConnectionStatus::Banned { .. }, NewConnectionState::Disconnecting { to_ban }) => { error!(self.log, "Disconnecting from a banned peer"; "peer_id" => %peer_id); info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); @@ -821,7 +851,11 @@ impl PeerDB { info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); } - /* Handle transitioning to the banned state */ + /* BANNED + * + * + * Handles the transition to a banned state + */ (PeerConnectionStatus::Disconnected { .. }, NewConnectionState::Banned) => { // It is possible to ban a peer that is currently disconnected. This can occur when // there are many events that score it poorly and are processed after it has disconnected. @@ -879,7 +913,11 @@ impl PeerDB { return Some(BanOperation::ReadyToBan(banned_ips)); } - /* Handle the connection state of unbanning a peer */ + /* UNBANNED + * + * + * Handles the transition to an unbanned state + */ (old_state, NewConnectionState::Unbanned) => { if matches!(info.score_state(), ScoreState::Banned) { error!(self.log, "Unbanning a banned peer"; "peer_id" => %peer_id); @@ -899,8 +937,7 @@ impl PeerDB { // Increment the disconnected count and reduce the banned count self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); - self.disconnected_peers = - self.disconnected_peers().count().saturating_add(1); + self.disconnected_peers = self.disconnected_peers.saturating_add(1); } } } @@ -1059,6 +1096,11 @@ enum NewConnectionState { /// Whether the peer should be banned after the disconnect occurs. to_ban: bool, }, + /// We are dialing this peer. + Dialing { + /// An optional known ENR for the peer we are dialing. + enr: Option, + }, /// The peer has been disconnected from our local node. Disconnected, /// The peer has been banned and actions to shift the peer to the banned state should be From ceeab02e3a6a4f518a89392f76c7574fcd17d6df Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 14 Jan 2022 07:20:54 +0000 Subject: [PATCH 25/56] Lazy hashing for SignedBeaconBlock in sync (#2916) ## Proposed Changes Allocate less memory in sync by hashing the `SignedBeaconBlock`s in a batch directly, rather than going via SSZ bytes. Credit to @paulhauner for finding this source of temporary allocations. --- Cargo.lock | 1 + .../network/src/sync/range_sync/batch.rs | 3 +-- consensus/ssz_types/Cargo.toml | 1 + consensus/ssz_types/src/bitfield.rs | 4 +++- consensus/ssz_types/src/fixed_vector.rs | 4 +++- consensus/ssz_types/src/variable_list.rs | 4 +++- consensus/types/src/attestation.rs | 6 +++++- consensus/types/src/beacon_block.rs | 9 ++++++--- consensus/types/src/beacon_block_body.rs | 9 ++++++--- consensus/types/src/deposit.rs | 4 +++- consensus/types/src/deposit_data.rs | 4 +++- consensus/types/src/execution_payload.rs | 4 +++- consensus/types/src/graffiti.rs | 2 +- consensus/types/src/signed_beacon_block.rs | 9 ++++++--- .../types/src/signed_beacon_block_header.rs | 18 +----------------- consensus/types/src/signed_voluntary_exit.rs | 4 +++- consensus/types/src/sync_aggregate.rs | 6 +++++- consensus/types/src/voluntary_exit.rs | 4 +++- crypto/bls/src/generic_aggregate_signature.rs | 13 +++++++++++++ crypto/bls/src/generic_signature.rs | 8 ++++++++ crypto/bls/src/generic_signature_bytes.rs | 7 +++++++ crypto/bls/src/impls/fake_crypto.rs | 8 ++++++++ 22 files changed, 93 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc53b134fae..bef8b437ba7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1701,6 +1701,7 @@ name = "eth2_ssz_types" version = "0.2.2" dependencies = [ "arbitrary", + "derivative", "eth2_serde_utils", "eth2_ssz", "serde", diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 70e27b5a0ac..e0b15cb498e 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,7 +1,6 @@ use crate::sync::RequestId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; -use ssz::Encode; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; @@ -390,7 +389,7 @@ impl Attempt { #[allow(clippy::ptr_arg)] fn new(peer_id: PeerId, blocks: &Vec>) -> Self { let mut hasher = std::collections::hash_map::DefaultHasher::new(); - blocks.as_ssz_bytes().hash(&mut hasher); + blocks.hash(&mut hasher); let hash = hasher.finish(); Attempt { peer_id, hash } } diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 4d4b073f4a1..b71de4ccdbe 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -17,6 +17,7 @@ eth2_serde_utils = "0.1.1" eth2_ssz = "0.4.1" typenum = "1.12.0" arbitrary = { version = "1.0", features = ["derive"], optional = true } +derivative = "2.1.1" [dev-dependencies] serde_json = "1.0.58" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index afecd8ce7d8..dfad3aedcb8 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -1,6 +1,7 @@ use crate::tree_hash::bitfield_bytes_tree_hash_root; use crate::Error; use core::marker::PhantomData; +use derivative::Derivative; use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -87,7 +88,8 @@ pub type BitVector = Bitfield>; /// The internal representation of the bitfield is the same as that required by SSZ. The lowest /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = ""))] pub struct Bitfield { bytes: Vec, len: usize, diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 8b8d660fb9e..ca5d40f14fa 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -44,7 +45,8 @@ pub use typenum; /// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); /// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); /// ``` -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct FixedVector { vec: Vec, diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 242a55b2c94..1414d12c8c3 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -46,7 +47,8 @@ pub use typenum; /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); /// ``` -#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct VariableList { vec: Vec, diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 66d9e78a85b..1c9ec3bc4da 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,3 +1,4 @@ +use derivative::Derivative; use safe_arith::ArithError; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,7 +24,10 @@ pub enum Error { /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index a83be72a06e..e524f0c1272 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -5,6 +5,7 @@ use crate::beacon_block_body::{ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -19,15 +20,16 @@ use tree_hash_derive::TreeHash; variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), ), @@ -36,7 +38,8 @@ use tree_hash_derive::TreeHash; tree_hash(enum_behaviour = "transparent") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index d3d005462fd..c4df4f27717 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -15,22 +16,24 @@ use tree_hash_derive::TreeHash; variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index 4b201360abf..a347cf675cf 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -12,7 +12,9 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Deposit { pub proof: FixedVector, pub data: DepositData, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d984f168f1b..6c5444e110f 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -11,7 +11,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 1b29fb34f7b..2fb253f12c1 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,4 +1,5 @@ use crate::{test_utils::TestRandom, *}; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -9,8 +10,9 @@ pub type Transaction = VariableList; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, )] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct ExecutionPayload { pub parent_hash: Hash256, diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index cecd6c2018e..f5f74b601b0 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -12,7 +12,7 @@ use tree_hash::TreeHash; pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. -#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 383805f97fb..8d7df0cb02c 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,5 +1,6 @@ use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; @@ -41,19 +42,21 @@ impl From for Hash256 { variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, - TreeHash + TreeHash, + Derivative, ), + derivative(PartialEq, Hash(bound = "E: EthSpec")), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), serde(bound = "E: EthSpec") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index df7888ec25b..dc786beb6e9 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,11 +2,8 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -15,26 +12,13 @@ use tree_hash_derive::TreeHash; /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] -#[derivative(PartialEq, Eq)] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, pub signature: Signature, } -/// Implementation of non-crypto-secure `Hash`, for use with `HashMap` and `HashSet`. -/// -/// Guarantees `header1 == header2 -> hash(header1) == hash(header2)`. -/// -/// Used in the slasher. -impl Hash for SignedBeaconBlockHeader { - fn hash(&self, state: &mut H) { - self.message.hash(state); - self.signature.as_ssz_bytes().hash(state); - } -} - impl SignedBeaconBlockHeader { /// Verify that this block header was signed by `pubkey`. pub fn verify_signature( diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 49a9b534559..69f0e6e2c9f 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -10,7 +10,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, pub signature: Signature, diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 781c67374eb..2292b021118 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -1,6 +1,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; +use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -20,7 +21,10 @@ impl From for Error { } #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 66d2f009479..cc10632d07c 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -12,7 +12,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 2001de042b4..fdb59626fb2 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -9,6 +9,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -264,6 +265,18 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +/// Hashes the `self.serialize()` bytes. +#[allow(clippy::derive_hash_xor_eq)] +impl Hash for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl fmt::Display for GenericAggregateSignature where Sig: TSignature, diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index f3aeeb5598f..10ef75fc680 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -7,6 +7,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -145,6 +146,13 @@ impl> TreeHash for GenericSignature> Hash for GenericSignature { + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl> fmt::Display for GenericSignature { impl_display!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index b5c0284971c..aa33c90d0c3 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -9,6 +9,7 @@ use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -84,6 +85,12 @@ impl PartialEq for GenericSignatureBytes { } } +impl Hash for GenericSignatureBytes { + fn hash(&self, hasher: &mut H) { + self.bytes.hash(hasher); + } +} + /// Serializes the `GenericSignature` in compressed form, storing the bytes in the newly created `Self`. impl From> for GenericSignatureBytes where diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 35582df380e..f2d8b79b986 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -113,6 +113,14 @@ impl PartialEq for Signature { } } +impl Eq for Signature {} + +impl std::hash::Hash for Signature { + fn hash(&self, hasher: &mut H) { + self.0.hash(hasher); + } +} + #[derive(Clone)] pub struct AggregateSignature([u8; SIGNATURE_BYTES_LEN]); From c11253a82f5d93a8bf26b92efda958b7c31e5728 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 14 Jan 2022 07:20:55 +0000 Subject: [PATCH 26/56] Remove grandparents from snapshot cache (#2917) ## Issue Addressed NA ## Proposed Changes In https://github.com/sigp/lighthouse/pull/2832 we made some changes to the `SnapshotCache` to help deal with the one-block reorgs seen on mainnet (and testnets). I believe the change in #2832 is good and we should keep it, but I think that in its present form it is causing the `SnapshotCache` to hold onto states that it doesn't need anymore. For example, a skip slot will result in one more `BeaconSnapshot` being stored in the cache. This PR adds a new type of pruning that happens after a block is inserted to the cache. We will remove any snapshot from the cache that is a *grandparent* of the block being imported. Since we know the grandparent has two valid blocks built atop it, it is not at risk from a one-block re-org. ## Additional Info NA --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++ beacon_node/beacon_chain/src/metrics.rs | 18 ++++++++ .../beacon_chain/src/snapshot_cache.rs | 44 +++++++++++++++++-- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f2a2271542b..4198425a7e7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2796,6 +2796,7 @@ impl BeaconChain { beacon_block_root: block_root, }, None, + &self.spec, ) }) .unwrap_or_else(|e| { @@ -3740,6 +3741,12 @@ impl BeaconChain { .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .map(|mut snapshot_cache| { snapshot_cache.prune(new_finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); }) .unwrap_or_else(|| { error!( diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 32dfc266f34..28eacad5590 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,8 +4,12 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; +use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +/// The maximum time to wait for the snapshot cache lock during a metrics scrape. +const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); + lazy_static! { /* * Block Processing @@ -18,6 +22,10 @@ lazy_static! { "beacon_block_processing_successes_total", "Count of blocks processed without error" ); + pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( + "beacon_block_processing_snapshot_cache_size", + "Count snapshots in the snapshot cache" + ); pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result = try_create_int_counter( "beacon_block_processing_snapshot_cache_misses", "Count of snapshot cache misses" @@ -913,6 +921,16 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); + if let Some(snapshot_cache) = beacon_chain + .snapshot_cache + .try_write_for(SNAPSHOT_CACHE_TIMEOUT) + { + set_gauge( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + snapshot_cache.len() as i64, + ) + } + set_gauge_by_usize( &OP_POOL_NUM_ATTESTATIONS, attestation_stats.num_attestations, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 4f7124de341..f4bbae8a32e 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,4 +1,5 @@ use crate::BeaconSnapshot; +use itertools::process_results; use std::cmp; use std::time::Duration; use types::{ @@ -164,9 +165,25 @@ impl SnapshotCache { } } + /// The block roots of all snapshots contained in `self`. + pub fn beacon_block_roots(&self) -> Vec { + self.snapshots.iter().map(|s| s.beacon_block_root).collect() + } + + /// The number of snapshots contained in `self`. + pub fn len(&self) -> usize { + self.snapshots.len() + } + /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see /// struct-level documentation for more info). - pub fn insert(&mut self, snapshot: BeaconSnapshot, pre_state: Option>) { + pub fn insert( + &mut self, + snapshot: BeaconSnapshot, + pre_state: Option>, + spec: &ChainSpec, + ) { + let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { beacon_block: snapshot.beacon_block, beacon_block_root: snapshot.beacon_block_root, @@ -174,6 +191,25 @@ impl SnapshotCache { pre_state, }; + // Remove the grandparent of the block that was just inserted. + // + // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the + // cache small by removing any states that already have more than one descendant. + // + // Remove the grandparent first to free up room in the cache. + let grandparent_result = + process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { + iter.map(|(_slot, root)| root) + .find(|root| *root != item.beacon_block_root && *root != parent_root) + }); + if let Ok(Some(grandparent_root)) = grandparent_result { + let head_block_root = self.head_block_root; + self.snapshots.retain(|snapshot| { + let root = snapshot.beacon_block_root; + root == head_block_root || root != grandparent_root + }); + } + if self.snapshots.len() < self.max_len { self.snapshots.push(item); } else { @@ -384,7 +420,7 @@ mod test { *snapshot.beacon_state.slot_mut() = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - cache.insert(snapshot, None); + cache.insert(snapshot, None, &spec); assert_eq!( cache.snapshots.len(), @@ -402,7 +438,7 @@ mod test { // 2 2 // 3 3 assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None); + cache.insert(get_snapshot(42), None, &spec); assert_eq!(cache.snapshots.len(), CACHE_SIZE); assert!( @@ -462,7 +498,7 @@ mod test { // Over-fill the cache so it needs to eject some old values on insert. for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None); + cache.insert(get_snapshot(u64::max_value() - i), None, &spec); } // Ensure that the new head value was not removed from the cache. From a26b8802da79734ffc4057cda25a5cd6334d1b11 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 16 Jan 2022 23:25:25 +0000 Subject: [PATCH 27/56] Release v2.1.0-rc.0 (#2905) ## Issue Addressed NA ## Proposed Changes Bump version tags to `v2.1.0-rc.0`. ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 2 +- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bef8b437ba7..e00a3dd8e5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -331,7 +331,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.0.1" +version = "2.1.0-rc.0" dependencies = [ "beacon_chain", "clap", @@ -497,7 +497,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.0.1" +version = "2.1.0-rc.0" dependencies = [ "beacon_node", "clap", @@ -2825,7 +2825,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.0.1" +version = "2.1.0-rc.0" dependencies = [ "account_utils", "bls", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.0.1" +version = "2.1.0-rc.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 80f9182efe7..5ec1f2f412e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.0.1" +version = "2.1.0-rc.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5892f59f562..60f99329580 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.0.1-", + prefix = "Lighthouse/v2.1.0-rc.0-", fallback = "unknown" ); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index af58d5e8c4d..46a16fb269c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.0.1" +version = "2.1.0-rc.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9725155e9c3..bd8c755b185 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.0.1" +version = "2.1.0-rc.0" authors = ["Sigma Prime "] edition = "2018" autotests = false From a836e180f9ad51f31767c5ffb33bebdeff1a9f3f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 17 Jan 2022 03:25:30 +0000 Subject: [PATCH 28/56] Release v2.1.0-rc.1 (#2921) ## Proposed Changes New release candidate to address Windows build failure for rc.0 --- .github/workflows/release.yml | 13 +++++++++++++ Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 2 +- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 7 files changed, 22 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bca28dbe2a4..4c57b8b1e7f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -130,6 +130,19 @@ jobs: profile: minimal override: true + # ============================== + # Windows dependencies + # ============================== + + - uses: KyleMayes/install-llvm-action@v1 + if: startsWith(matrix.arch, 'x86_64-windows') + with: + version: "13.0" + directory: ${{ runner.temp }}/llvm + - name: Set LIBCLANG_PATH + if: startsWith(matrix.arch, 'x86_64-windows') + run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV + # ============================== # Builds # ============================== diff --git a/Cargo.lock b/Cargo.lock index e00a3dd8e5c..e393d6ea185 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -331,7 +331,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" dependencies = [ "beacon_chain", "clap", @@ -497,7 +497,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" dependencies = [ "beacon_node", "clap", @@ -2825,7 +2825,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" dependencies = [ "account_utils", "bls", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5ec1f2f412e..f8d8c8be5ca 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 60f99329580..ddb258d76fe 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.1.0-rc.0-", + prefix = "Lighthouse/v2.1.0-rc.1-", fallback = "unknown" ); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 46a16fb269c..d4ab41a3b2a 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index bd8c755b185..9511c1b496d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.1.0-rc.0" +version = "2.1.0-rc.1" authors = ["Sigma Prime "] edition = "2018" autotests = false From 9ed92d6e7830893670e2176a4e3bfd571f344cbf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 17 Jan 2022 06:09:58 +0000 Subject: [PATCH 29/56] Use "release candidate" in book (#2920) ## Issue Addressed NA ## Proposed Changes Since we use the `rc` (release candidate) tag in our version strings, it seems consistent if we also use "release candidate" in the book rather than "pre-release". Notably, Github adds a "pre-release" tag to release when we request. I think it's OK that Github uses that term whilst we consistently use "release candidate". Our docs indicate that the terms are interchangeable. ## Additional Info I hope to use the new docs link in the `v2.1.0` release, so it would be nice if we can merge this soon :pray: --- book/src/advanced-pre-releases.md | 40 ++--------------------- book/src/advanced-release-candidates.md | 43 +++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 38 deletions(-) create mode 100644 book/src/advanced-release-candidates.md diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index 3d1b14d1b18..b90bd631d41 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,40 +1,4 @@ # Pre-Releases -[sigp/lighthouse]: https://github.com/sigp/lighthouse -[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest -[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases -[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 -[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 - -From time-to-time, Lighthouse *pre-releases* will be published on the [sigp/lighthouse] repository. -These releases have passed the usual automated testing, however the developers would like to see it -running "in the wild" in a variety of configurations before declaring it an official, stable -release. Pre-releases are also used by developers to get feedback from users regarding the -ergonomics of new features or changes. - -Github will clearly show such releases as a "Pre-release" and they *will not* show up on -[sigp/lighthouse/releases/latest]. However, pre-releases *will* show up on the -[sigp/lighthouse/releases] page, so **please pay attention to avoid the pre-releases when you're -looking for stable Lighthouse**. - -### Examples - -[`v1.4.0-rc.0`] has `rc` (release candidate) in the version string and is therefore a pre-release. This -release is *not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). - -However, [`v1.4.0`] is considered stable since it is not marked as a pre-release and does not -contain `rc` in the version string. This release is intended for use on mainnet. - -## When to use a pre-release - -Users may wish to try a pre-release for the following reasons: - -- To preview new features before they are officially released. -- To help detect bugs and regressions before they reach production. -- To provide feedback on annoyances before they make it into a release and become harder to change or revert. - -## When *not* to use a pre-release - -It is not recommended to use pre-releases for any critical tasks on mainnet (e.g., staking). To test -critical features, try one of the testnets (e.g., Prater). - +Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +be used interchangeably. diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md new file mode 100644 index 00000000000..842bc484041 --- /dev/null +++ b/book/src/advanced-release-candidates.md @@ -0,0 +1,43 @@ +# Release Candidates + +[sigp/lighthouse]: https://github.com/sigp/lighthouse +[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest +[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases +[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 +[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 + +From time-to-time, Lighthouse *release candidates* will be published on the [sigp/lighthouse] +repository. These releases have passed the usual automated testing, however the developers would +like to see it running "in the wild" in a variety of configurations before declaring it an official, +stable release. Release candidates are also used by developers to get feedback from users regarding the +ergonomics of new features or changes. + +Github will clearly show such releases as a "Pre-release" and they *will not* show up on +[sigp/lighthouse/releases/latest]. However, release candidates *will* show up on the +[sigp/lighthouse/releases] page, so **please pay attention to avoid the release candidates when +you're looking for stable Lighthouse**. + +From time to time, Lighthouse may use the terms "release candidate" and "pre release" +interchangeably. A pre release is identical to a release candidate. + +### Examples + +[`v1.4.0-rc.0`] has `rc` in the version string and is therefore a release candidate. This release is +*not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). + +However, [`v1.4.0`] is considered stable since it is not marked as a release candidate and does not +contain `rc` in the version string. This release is intended for use on mainnet. + +## When to use a release candidate + +Users may wish to try a release candidate for the following reasons: + +- To preview new features before they are officially released. +- To help detect bugs and regressions before they reach production. +- To provide feedback on annoyances before they make it into a release and become harder to change or revert. + +## When *not* to use a release candidate + +It is not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). +To test critical features, try one of the testnets (e.g., Prater). + From ef7351ddfecbb35cd23dde11835d58d4d4e689a8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 19 Jan 2022 00:24:19 +0000 Subject: [PATCH 30/56] Update to spec v1.1.8 (#2893) ## Proposed Changes Change the canonical fork name for the merge to Bellatrix. Keep other merge naming the same to avoid churn. I've also fixed and enabled the `fork` and `transition` tests for Bellatrix, and the v1.1.7 fork choice tests. Additionally, the `BellatrixPreset` has been added with tests. It gets served via the `/config/spec` API endpoint along with the other presets. --- beacon_node/beacon_chain/tests/merge.rs | 10 +- .../lighthouse_network/tests/common/mod.rs | 2 +- beacon_node/src/config.rs | 2 +- .../mainnet/config.yaml | 4 +- .../prater/config.yaml | 4 +- .../pyrmont/config.yaml | 4 +- consensus/state_processing/src/genesis.rs | 8 +- .../src/per_slot_processing.rs | 6 +- consensus/state_processing/src/upgrade.rs | 2 +- .../state_processing/src/upgrade/merge.rs | 4 +- .../types/presets/mainnet/bellatrix.yaml | 21 ++++ .../types/presets/minimal/bellatrix.yaml | 21 ++++ consensus/types/src/beacon_block.rs | 2 +- consensus/types/src/chain_spec.rs | 54 +++++------ consensus/types/src/config_and_preset.rs | 6 +- consensus/types/src/fork_context.rs | 9 +- consensus/types/src/fork_name.rs | 17 +++- consensus/types/src/lib.rs | 2 +- consensus/types/src/preset.rs | 37 +++++++ lcli/src/new_testnet.rs | 2 +- .../environment/tests/testnet_dir/config.yaml | 4 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 15 +-- testing/ef_tests/src/cases/fork.rs | 10 +- testing/ef_tests/src/cases/fork_choice.rs | 13 +-- testing/ef_tests/src/cases/operations.rs | 1 - testing/ef_tests/src/cases/transition.rs | 8 +- testing/ef_tests/src/handler.rs | 96 ++++++------------- testing/ef_tests/tests/tests.rs | 18 ++-- 29 files changed, 213 insertions(+), 171 deletions(-) create mode 100644 consensus/types/presets/mainnet/bellatrix.yaml create mode 100644 consensus/types/presets/minimal/bellatrix.yaml diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 35dda493e19..43ee2372b65 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -30,11 +30,11 @@ fn verify_execution_payload_chain(chain: &[ExecutionPayload]) { #[should_panic] fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); - let merge_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.merge_fork_epoch = Some(merge_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); let genesis_pow_block_hash = generate_pow_block( spec.terminal_total_difficulty, @@ -95,12 +95,12 @@ fn merge_with_terminal_block_hash_override() { fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); - let merge_fork_epoch = Epoch::new(8); - let merge_fork_slot = merge_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.merge_fork_epoch = Some(merge_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); let mut execution_payloads = vec![]; diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 7397fe7ea98..7deb2108b07 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -29,7 +29,7 @@ pub fn fork_context() -> ForkContext { // Set fork_epoch to `Some` to ensure that the `ForkContext` object // includes altair in the list of forks chain_spec.altair_fork_epoch = Some(types::Epoch::new(42)); - chain_spec.merge_fork_epoch = Some(types::Epoch::new(84)); + chain_spec.bellatrix_fork_epoch = Some(types::Epoch::new(84)); ForkContext::new::(types::Slot::new(0), Hash256::zero(), &chain_spec) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index df5cf143704..20408229311 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -469,7 +469,7 @@ pub fn get_config( } client_config.chain.max_network_size = - lighthouse_network::gossip_max_size(spec.merge_fork_epoch.is_some()); + lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); if cli_args.is_present("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 4d17356ced3..b889b828870 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 74240 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index aa375ab2ea4..72a106f36a0 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 1919188 ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge -MERGE_FORK_VERSION: 0x02001020 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02001020 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml index b5f8415805f..913671c2bea 100644 --- a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 432000 ALTAIR_FORK_VERSION: 0x01002009 ALTAIR_FORK_EPOCH: 61650 # Merge -MERGE_FORK_VERSION: 0x02002009 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02002009 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03002009 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 1bb88c84d16..fb2c9bfa7d0 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,7 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_merge}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -58,13 +58,13 @@ pub fn initialize_beacon_state_from_eth1( // Similarly, perform an upgrade to the merge if configured from genesis. if spec - .merge_fork_epoch + .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) { - upgrade_to_merge(&mut state, spec)?; + upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. - state.fork_mut().previous_version = spec.merge_fork_version; + state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 454cee5ffb1..9018db65bcd 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,4 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_merge}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -52,8 +52,8 @@ pub fn per_slot_processing( upgrade_to_altair(state, spec)?; } // If the Merge fork epoch is reached, perform an irregular state upgrade. - if spec.merge_fork_epoch == Some(state.current_epoch()) { - upgrade_to_merge(state, spec)?; + if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { + upgrade_to_bellatrix(state, spec)?; } } diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index fda1a714af3..fdf13c82818 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -2,4 +2,4 @@ pub mod altair; pub mod merge; pub use altair::upgrade_to_altair; -pub use merge::upgrade_to_merge; +pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c41987609e3..2e4ed441a47 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -5,7 +5,7 @@ use types::{ }; /// Transform a `Altair` state into an `Merge` state. -pub fn upgrade_to_merge( +pub fn upgrade_to_bellatrix( pre_state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { @@ -24,7 +24,7 @@ pub fn upgrade_to_merge( slot: pre.slot, fork: Fork { previous_version: pre.fork.current_version, - current_version: spec.merge_fork_version, + current_version: spec.bellatrix_fork_version, epoch, }, // History diff --git a/consensus/types/presets/mainnet/bellatrix.yaml b/consensus/types/presets/mainnet/bellatrix.yaml new file mode 100644 index 00000000000..7ae61b732f1 --- /dev/null +++ b/consensus/types/presets/mainnet/bellatrix.yaml @@ -0,0 +1,21 @@ +# Mainnet preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/presets/minimal/bellatrix.yaml b/consensus/types/presets/minimal/bellatrix.yaml new file mode 100644 index 00000000000..3417985fad1 --- /dev/null +++ b/consensus/types/presets/minimal/bellatrix.yaml @@ -0,0 +1,21 @@ +# Minimal preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index e524f0c1272..0026db0ee7c 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -69,7 +69,7 @@ impl<'a, T: EthSpec> SignedRoot for BeaconBlockRef<'a, T> {} impl BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.merge_fork_epoch == Some(T::genesis_epoch()) { + if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { Self::Merge(BeaconBlockMerge::empty(spec)) } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { Self::Altair(BeaconBlockAltair::empty(spec)) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 70845877d9f..f5ed2717c51 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -132,12 +132,12 @@ pub struct ChainSpec { /* * Merge hard fork params */ - pub inactivity_penalty_quotient_merge: u64, - pub min_slashing_penalty_quotient_merge: u64, - pub proportional_slashing_multiplier_merge: u64, - pub merge_fork_version: [u8; 4], + pub inactivity_penalty_quotient_bellatrix: u64, + pub min_slashing_penalty_quotient_bellatrix: u64, + pub proportional_slashing_multiplier_bellatrix: u64, + pub bellatrix_fork_version: [u8; 4], /// The Merge fork epoch is optional, with `None` representing "Merge never happens". - pub merge_fork_epoch: Option, + pub bellatrix_fork_epoch: Option, pub terminal_total_difficulty: Uint256, pub terminal_block_hash: Hash256, pub terminal_block_hash_activation_epoch: Epoch, @@ -217,7 +217,7 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.merge_fork_epoch { + match self.bellatrix_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, _ => match self.altair_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, @@ -231,7 +231,7 @@ impl ChainSpec { match fork_name { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, - ForkName::Merge => self.merge_fork_version, + ForkName::Merge => self.bellatrix_fork_version, } } @@ -240,7 +240,7 @@ impl ChainSpec { match fork_name { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, - ForkName::Merge => self.merge_fork_epoch, + ForkName::Merge => self.bellatrix_fork_epoch, } } @@ -249,7 +249,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, - BeaconState::Merge(_) => self.inactivity_penalty_quotient_merge, + BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -261,7 +261,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, - BeaconState::Merge(_) => self.proportional_slashing_multiplier_merge, + BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -273,7 +273,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, - BeaconState::Merge(_) => self.min_slashing_penalty_quotient_merge, + BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -526,13 +526,13 @@ impl ChainSpec { /* * Merge hard fork params */ - inactivity_penalty_quotient_merge: u64::checked_pow(2, 24) + inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) .expect("pow does not overflow"), - min_slashing_penalty_quotient_merge: u64::checked_pow(2, 5) + min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) .expect("pow does not overflow"), - proportional_slashing_multiplier_merge: 3, - merge_fork_version: [0x02, 0x00, 0x00, 0x00], - merge_fork_epoch: None, + proportional_slashing_multiplier_bellatrix: 3, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], + bellatrix_fork_epoch: None, terminal_total_difficulty: Uint256::MAX .checked_sub(Uint256::from(2u64.pow(10))) .expect("subtraction does not overflow") @@ -583,8 +583,8 @@ impl ChainSpec { altair_fork_version: [0x01, 0x00, 0x00, 0x01], altair_fork_epoch: None, // Merge - merge_fork_version: [0x02, 0x00, 0x00, 0x01], - merge_fork_epoch: None, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], + bellatrix_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -632,10 +632,10 @@ pub struct Config { pub altair_fork_epoch: Option>, #[serde(with = "eth2_serde_utils::bytes_4_hex")] - merge_fork_version: [u8; 4], + bellatrix_fork_version: [u8; 4], #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] - pub merge_fork_epoch: Option>, + pub bellatrix_fork_epoch: Option>, #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, @@ -734,9 +734,9 @@ impl Config { altair_fork_epoch: spec .altair_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - merge_fork_version: spec.merge_fork_version, - merge_fork_epoch: spec - .merge_fork_epoch + bellatrix_fork_version: spec.bellatrix_fork_version, + bellatrix_fork_epoch: spec + .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, @@ -779,8 +779,8 @@ impl Config { genesis_delay, altair_fork_version, altair_fork_epoch, - merge_fork_epoch, - merge_fork_version, + bellatrix_fork_epoch, + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -808,8 +808,8 @@ impl Config { genesis_delay, altair_fork_version, altair_fork_epoch: altair_fork_epoch.map(|q| q.value), - merge_fork_epoch: merge_fork_epoch.map(|q| q.value), - merge_fork_version, + bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 16d36c850c3..18c559ca2cc 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,4 +1,4 @@ -use crate::{AltairPreset, BasePreset, ChainSpec, Config, EthSpec}; +use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; @@ -14,6 +14,8 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, + #[serde(flatten)] + pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] @@ -25,12 +27,14 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = HashMap::new(); Self { config, base_preset, altair_preset, + bellatrix_preset, extra_fields, } } diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 88a2f31264b..52b9294c8ca 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -36,11 +36,14 @@ impl ForkContext { } // Only add Merge to list of forks if it's enabled - // Note: `merge_fork_epoch == None` implies merge hasn't been activated yet on the config. - if spec.merge_fork_epoch.is_some() { + // Note: `bellatrix_fork_epoch == None` implies merge hasn't been activated yet on the config. + if spec.bellatrix_fork_epoch.is_some() { fork_to_digest.push(( ForkName::Merge, - ChainSpec::compute_fork_digest(spec.merge_fork_version, genesis_validators_root), + ChainSpec::compute_fork_digest( + spec.bellatrix_fork_version, + genesis_validators_root, + ), )); } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 54cc7a2451e..4a2e7620874 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -25,17 +25,17 @@ impl ForkName { match self { ForkName::Base => { spec.altair_fork_epoch = None; - spec.merge_fork_epoch = None; + spec.bellatrix_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.merge_fork_epoch = None; + spec.bellatrix_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.merge_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } } @@ -112,7 +112,7 @@ impl FromStr for ForkName { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, - "merge" => ForkName::Merge, + "bellatrix" | "merge" => ForkName::Merge, _ => return Err(()), }) } @@ -123,7 +123,7 @@ impl Display for ForkName { match self { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), - ForkName::Merge => "merge".fmt(f), + ForkName::Merge => "bellatrix".fmt(f), } } } @@ -181,4 +181,11 @@ mod test { assert_eq!(ForkName::from_str("NO_NAME"), Err(())); assert_eq!(ForkName::from_str("no_name"), Err(())); } + + #[test] + fn fork_name_bellatrix_or_merge() { + assert_eq!(ForkName::from_str("bellatrix"), Ok(ForkName::Merge)); + assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); + assert_eq!(ForkName::Merge.to_string(), "bellatrix"); + } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5b1d3707ae8..5e27b667481 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -125,7 +125,7 @@ pub use crate::indexed_attestation::IndexedAttestation; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 030c1234059..ccda1a06a06 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -150,6 +150,40 @@ impl AltairPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct BellatrixPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub inactivity_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proportional_slashing_multiplier_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bytes_per_transaction: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_transactions_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub bytes_per_logs_bloom: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_extra_data_bytes: u64, +} + +impl BellatrixPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + inactivity_penalty_quotient_bellatrix: spec.inactivity_penalty_quotient_bellatrix, + min_slashing_penalty_quotient_bellatrix: spec.min_slashing_penalty_quotient_bellatrix, + proportional_slashing_multiplier_bellatrix: spec + .proportional_slashing_multiplier_bellatrix, + max_bytes_per_transaction: T::max_bytes_per_transaction() as u64, + max_transactions_per_payload: T::max_transactions_per_payload() as u64, + bytes_per_logs_bloom: T::bytes_per_logs_bloom() as u64, + max_extra_data_bytes: T::max_extra_data_bytes() as u64, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -182,6 +216,9 @@ mod test { let altair: AltairPreset = preset_from_file(&preset_name, "altair.yaml"); assert_eq!(altair, AltairPreset::from_chain_spec::(&spec)); + + let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); + assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); } #[test] diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 630d65963a0..83dcc2e7198 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -63,7 +63,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul } if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? { - spec.merge_fork_epoch = Some(fork_epoch); + spec.bellatrix_fork_epoch = Some(fork_epoch); } let genesis_state_bytes = if matches.is_present("interop-genesis-state") { diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index ac5403efdbf..8424a2fdc34 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 18446744073709551615 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 8c2a0f10e35..3cd6d17c0c4 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.6 +TESTS_TAG := v1.1.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index ce9e1d6b4ed..2eb4ce5407c 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -30,18 +30,11 @@ # LightClientUpdate "tests/.*/.*/ssz_static/LightClientUpdate", # LightClientSnapshot - "tests/minimal/altair/ssz_static/LightClientSnapshot", - "tests/mainnet/altair/ssz_static/LightClientSnapshot", - "tests/minimal/merge/ssz_static/LightClientSnapshot", - "tests/mainnet/merge/ssz_static/LightClientSnapshot", + "tests/.*/.*/ssz_static/LightClientSnapshot", # Merkle-proof tests for light clients - "tests/mainnet/altair/merkle/single_proof", - "tests/minimal/altair/merkle/single_proof", - "tests/mainnet/merge/merkle/single_proof", - "tests/minimal/merge/merkle/single_proof", - # FIXME(merge): Merge transition tests are now available but not yet passing - "tests/mainnet/merge/transition/", - "tests/minimal/merge/transition/", + "tests/.*/.*/merkle/single_proof", + # One of the EF researchers likes to pack the tarballs on a Mac + ".*\.DS_Store.*" ] def normalize_path(path): diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 868e4a0c5ae..ae12447abf3 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::upgrade_to_altair; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -49,10 +49,7 @@ impl Case for ForkTest { fn is_enabled_for_fork(fork_name: ForkName) -> bool { // Upgrades exist targeting all forks except phase0/base. // Fork tests also need BLS. - // FIXME(merge): enable merge tests once available - cfg!(not(feature = "fake_crypto")) - && fork_name != ForkName::Base - && fork_name != ForkName::Merge + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { @@ -61,8 +58,9 @@ impl Case for ForkTest { let spec = &E::default_spec(); let mut result = match fork_name { + ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), - _ => panic!("unknown fork: {:?}", fork_name), + ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ecdfebc2863..608429a9cb2 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -154,15 +154,10 @@ impl Case for ForkChoiceTest { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let tester = Tester::new(self, fork_choice_spec::(fork_name))?; - // TODO(merge): enable these tests before production. - // This test will fail until this PR is merged and released: - // - // https://github.com/ethereum/consensus-specs/pull/2760 - if self.description == "shorter_chain_but_heavier_weight" - // This test is skipped until we can do retrospective confirmations of the terminal - // block after an optimistic sync. - || self.description == "block_lookup_failed" - { + // TODO(merge): re-enable this test before production. + // This test is skipped until we can do retrospective confirmations of the terminal + // block after an optimistic sync. + if self.description == "block_lookup_failed" { return Err(Error::SkippedKnownFailure); }; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index d833846e471..195df7f3822 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -239,7 +239,6 @@ impl Operation for ExecutionPayload { spec: &ChainSpec, extra: &Operations, ) -> Result<(), BlockProcessingError> { - // FIXME(merge): we may want to plumb the validity bool into state processing let valid = extra .execution_metadata .as_ref() diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 8e6ba226731..d2b1bb2c624 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -39,7 +39,8 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(metadata.fork_epoch); } ForkName::Merge => { - spec.merge_fork_epoch = Some(metadata.fork_epoch); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } } @@ -73,10 +74,7 @@ impl Case for TransitionTest { fn is_enabled_for_fork(fork_name: ForkName) -> bool { // Upgrades exist targeting all forks except phase0/base. // Transition tests also need BLS. - // FIXME(merge): Merge transition tests are now available but not yet passing - cfg!(not(feature = "fake_crypto")) - && fork_name != ForkName::Base - && fork_name != ForkName::Merge + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index a1d5b0916df..636119cdbaf 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -2,7 +2,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name; use crate::type_name::TypeName; use derivative::Derivative; -use std::fs; +use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; @@ -31,30 +31,27 @@ pub trait Handler { } fn run_for_fork(&self, fork_name: ForkName) { - let fork_name_str = match fork_name { - ForkName::Base => "phase0", - ForkName::Altair => "altair", - ForkName::Merge => "merge", - }; + let fork_name_str = fork_name.to_string(); let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("consensus-spec-tests") .join("tests") .join(Self::config_name()) - .join(fork_name_str) + .join(&fork_name_str) .join(Self::runner_name()) .join(self.handler_name()); // Iterate through test suites + let as_directory = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + }; let test_cases = fs::read_dir(&handler_path) .expect("handler dir exists") - .flat_map(|entry| { - entry - .ok() - .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) - }) + .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) - .flat_map(Result::ok) + .filter_map(as_directory) .map(|test_case_dir| { let path = test_case_dir.path(); let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); @@ -439,63 +436,21 @@ impl Handler for FinalityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceGetHeadHandler(PhantomData); - -impl Handler for ForkChoiceGetHeadHandler { - type Case = cases::ForkChoiceTest; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "fork_choice" - } - - fn handler_name(&self) -> String { - "get_head".into() - } - - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // These tests check block validity (which may include signatures) and there is no need to - // run them with fake crypto. - cfg!(not(feature = "fake_crypto")) - } +pub struct ForkChoiceHandler { + handler_name: String, + _phantom: PhantomData, } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceOnBlockHandler(PhantomData); - -impl Handler for ForkChoiceOnBlockHandler { - type Case = cases::ForkChoiceTest; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "fork_choice" - } - - fn handler_name(&self) -> String { - "on_block".into() - } - - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // These tests check block validity (which may include signatures) and there is no need to - // run them with fake crypto. - cfg!(not(feature = "fake_crypto")) +impl ForkChoiceHandler { + pub fn new(handler_name: &str) -> Self { + Self { + handler_name: handler_name.into(), + _phantom: PhantomData, + } } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceOnMergeBlockHandler(PhantomData); - -impl Handler for ForkChoiceOnMergeBlockHandler { +impl Handler for ForkChoiceHandler { type Case = cases::ForkChoiceTest; fn config_name() -> &'static str { @@ -507,15 +462,20 @@ impl Handler for ForkChoiceOnMergeBlockHandler { } fn handler_name(&self) -> String { - "on_merge_block".into() + self.handler_name.clone() } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Merge block tests are only enabled for Bellatrix or later. + if self.handler_name == "on_merge_block" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) - // These tests only exist for the merge. - && fork_name == ForkName::Merge } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 2201bc5ee86..bdefec0014d 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -413,20 +413,26 @@ fn finality() { #[test] fn fork_choice_get_head() { - ForkChoiceGetHeadHandler::::default().run(); - ForkChoiceGetHeadHandler::::default().run(); + ForkChoiceHandler::::new("get_head").run(); + ForkChoiceHandler::::new("get_head").run(); } #[test] fn fork_choice_on_block() { - ForkChoiceOnBlockHandler::::default().run(); - ForkChoiceOnBlockHandler::::default().run(); + ForkChoiceHandler::::new("on_block").run(); + ForkChoiceHandler::::new("on_block").run(); } #[test] fn fork_choice_on_merge_block() { - ForkChoiceOnMergeBlockHandler::::default().run(); - ForkChoiceOnMergeBlockHandler::::default().run(); + ForkChoiceHandler::::new("on_merge_block").run(); + ForkChoiceHandler::::new("on_merge_block").run(); +} + +#[test] +fn fork_choice_ex_ante() { + ForkChoiceHandler::::new("ex_ante").run(); + ForkChoiceHandler::::new("ex_ante").run(); } #[test] From 95b3183cb41f4d2fc97becf8a1c97c68af3a9760 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 20 Jan 2022 01:31:53 +0000 Subject: [PATCH 31/56] Make /config/spec backwards compat for VC (#2934) ## Proposed Changes Restore compatibility with beacon nodes using the `MERGE` naming by: 1. Adding defaults for the Bellatrix `Config` fields 2. Not attempting to read (or serve) the Bellatrix preset on `/config/spec`. I've confirmed that this works with Infura, and just logs a warning: ``` Jan 20 10:51:31.078 INFO Connected to beacon node endpoint: https://eth2-beacon-mainnet.infura.io/, version: teku/v22.1.0/linux-x86_64/-eclipseadoptium-openjdk64bitservervm-java-17 Jan 20 10:51:31.344 WARN Beacon node config does not match exactly, advice: check that the BN is updated and configured for any upcoming forks, endpoint: https://eth2-beacon-mainnet.infura.io/ Jan 20 10:51:31.344 INFO Initialized beacon node connections available: 1, total: 1 ``` --- consensus/types/src/chain_spec.rs | 13 +++++++++++++ consensus/types/src/config_and_preset.rs | 10 +++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f5ed2717c51..0bd0acb9633 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -631,8 +631,12 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_epoch")] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option>, @@ -669,6 +673,15 @@ pub struct Config { deposit_contract_address: Address, } +fn default_bellatrix_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + +fn default_bellatrix_fork_epoch() -> Option> { + None +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 18c559ca2cc..d367cfc49d2 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -14,9 +14,9 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - #[serde(flatten)] - pub bellatrix_preset: BellatrixPreset, - + // TODO(merge): re-enable + // #[serde(flatten)] + // pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -27,14 +27,14 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); - let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); + // TODO(merge): re-enable + let _bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = HashMap::new(); Self { config, base_preset, altair_preset, - bellatrix_preset, extra_fields, } } From 79db2d4deb6a47947699d8a4a39347c19ee6e5d6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 20 Jan 2022 03:39:41 +0000 Subject: [PATCH 32/56] v2.1.0 (#2928) ## Issue Addressed NA ## Proposed Changes Bump to `v2.1.0`. ## Additional Info NA --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 2 +- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e393d6ea185..bdb82218718 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -331,7 +331,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.1.0-rc.1" +version = "2.1.0" dependencies = [ "beacon_chain", "clap", @@ -497,7 +497,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.1.0-rc.1" +version = "2.1.0" dependencies = [ "beacon_node", "clap", @@ -2825,7 +2825,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.1.0-rc.1" +version = "2.1.0" dependencies = [ "account_utils", "bls", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.1.0-rc.1" +version = "2.1.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f8d8c8be5ca..eecef0349e9 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.1.0-rc.1" +version = "2.1.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index ddb258d76fe..6f2baf132c2 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.1.0-rc.1-", + prefix = "Lighthouse/v2.1.0-", fallback = "unknown" ); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d4ab41a3b2a..a6062e5b8cb 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.1.0-rc.1" +version = "2.1.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9511c1b496d..787b992a22d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.1.0-rc.1" +version = "2.1.0" authors = ["Sigma Prime "] edition = "2018" autotests = false From d06f87486a5d0a2f29053fe4b19b743dea9d865b Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 20 Jan 2022 09:14:19 +0000 Subject: [PATCH 33/56] Support duplicate keys in HTTP API query strings (#2908) ## Issues Addressed Closes #2739 Closes #2812 ## Proposed Changes Support the deserialization of query strings containing duplicate keys into their corresponding types. As `warp` does not support this feature natively (as discussed in #2739), it relies on the external library [`serde_array_query`](https://github.com/sigp/serde_array_query) (written by @michaelsproul) This is backwards compatible meaning that both of the following requests will produce the same output: ``` curl "http://localhost:5052/eth/v1/events?topics=head,block" ``` ``` curl "http://localhost:5052/eth/v1/events?topics=head&topics=block" ``` ## Additional Info Certain error messages have changed slightly. This only affects endpoints which accept multiple values. For example: ``` {"code":400,"message":"BAD_REQUEST: invalid query: Invalid query string","stacktraces":[]} ``` is now ``` {"code":400,"message":"BAD_REQUEST: unable to parse query","stacktraces":[]} ``` The serve order of the endpoints `get_beacon_state_validators` and `get_beacon_state_validators_id` have flipped: ```rust .or(get_beacon_state_validators_id.boxed()) .or(get_beacon_state_validators.boxed()) ``` This is to ensure proper error messages when filter fallback occurs due to the use of the `and_then` filter. ## Future Work - Cleanup / remove filter fallback behaviour by substituting `and_then` with `then` where appropriate. - Add regression tests for HTTP API error messages. ## Credits - @mooori for doing the ground work of investigating possible solutions within the existing Rust ecosystem. - @michaelsproul for writing [`serde_array_query`](https://github.com/sigp/serde_array_query) and for helping debug the behaviour of the `warp` filter fallback leading to incorrect error messages. --- Cargo.lock | 11 +++++ beacon_node/http_api/src/lib.rs | 47 +++++++++++-------- common/eth2/src/types.rs | 81 +++++++++++++++++++++++++++------ common/warp_utils/Cargo.toml | 1 + common/warp_utils/src/lib.rs | 1 + common/warp_utils/src/query.rs | 22 +++++++++ 6 files changed, 130 insertions(+), 33 deletions(-) create mode 100644 common/warp_utils/src/query.rs diff --git a/Cargo.lock b/Cargo.lock index bdb82218718..4d487ae701a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5266,6 +5266,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_array_query" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89c6e82b1005b33d5b2bbc47096800e5ad6b67ef5636f9c13ad29a6935734a7" +dependencies = [ + "serde", + "serde_urlencoded", +] + [[package]] name = "serde_cbor" version = "0.11.2" @@ -6823,6 +6833,7 @@ dependencies = [ "lighthouse_metrics", "safe_arith", "serde", + "serde_array_query", "state_processing", "tokio", "types", diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 85c464466c1..b0907a30c1b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -55,7 +55,10 @@ use warp::http::StatusCode; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter}; -use warp_utils::task::{blocking_json_task, blocking_task}; +use warp_utils::{ + query::multi_key_query, + task::{blocking_json_task, blocking_task}, +}; const API_PREFIX: &str = "eth"; @@ -505,12 +508,13 @@ pub fn serve( .clone() .and(warp::path("validator_balances")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and_then( |state_id: StateId, chain: Arc>, - query: api_types::ValidatorBalancesQuery| { + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { Ok(state @@ -521,7 +525,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -548,11 +552,14 @@ pub fn serve( let get_beacon_state_validators = beacon_states_path .clone() .and(warp::path("validators")) - .and(warp::query::()) .and(warp::path::end()) + .and(multi_key_query::()) .and_then( - |state_id: StateId, chain: Arc>, query: api_types::ValidatorsQuery| { + |state_id: StateId, + chain: Arc>, + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { let epoch = state.current_epoch(); @@ -566,7 +573,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -586,8 +593,8 @@ pub fn serve( let status_matches = query.status.as_ref().map_or(true, |statuses| { - statuses.0.contains(&status) - || statuses.0.contains(&status.superstatus()) + statuses.contains(&status) + || statuses.contains(&status.superstatus()) }); if status_matches { @@ -1721,11 +1728,13 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(network_globals.clone()) .and_then( - |query: api_types::PeersQuery, network_globals: Arc>| { + |query_res: Result, + network_globals: Arc>| { blocking_json_task(move || { + let query = query_res?; let mut peers: Vec = Vec::new(); network_globals .peers @@ -1755,11 +1764,11 @@ pub fn serve( ); let state_matches = query.state.as_ref().map_or(true, |states| { - states.0.iter().any(|state_param| *state_param == state) + states.iter().any(|state_param| *state_param == state) }); let direction_matches = query.direction.as_ref().map_or(true, |directions| { - directions.0.iter().any(|dir_param| *dir_param == direction) + directions.iter().any(|dir_param| *dir_param == direction) }); if state_matches && direction_matches { @@ -2534,16 +2543,18 @@ pub fn serve( let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(chain_filter) .and_then( - |topics: api_types::EventQuery, chain: Arc>| { + |topics_res: Result, + chain: Arc>| { blocking_task(move || { + let topics = topics_res?; // for each topic subscribed spawn a new subscription - let mut receivers = Vec::with_capacity(topics.topics.0.len()); + let mut receivers = Vec::with_capacity(topics.topics.len()); if let Some(event_handler) = chain.event_handler.as_ref() { - for topic in topics.topics.0.clone() { + for topic in topics.topics { let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), @@ -2606,8 +2617,8 @@ pub fn serve( .or(get_beacon_state_fork.boxed()) .or(get_beacon_state_finality_checkpoints.boxed()) .or(get_beacon_state_validator_balances.boxed()) - .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) .or(get_beacon_headers.boxed()) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index be65dd8776c..169a8de59e5 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -428,10 +428,13 @@ pub struct AttestationPoolQuery { pub committee_index: Option, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorsQuery { - pub id: Option>, - pub status: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub status: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -520,27 +523,68 @@ pub struct SyncingData { #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] -pub struct QueryVec(pub Vec); +pub struct QueryVec { + values: Vec, +} + +fn query_vec<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + Ok(Vec::from(QueryVec::from(vec))) +} + +fn option_query_vec<'de, D, T>(deserializer: D) -> Result>, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + if vec.is_empty() { + return Ok(None); + } + + Ok(Some(Vec::from(QueryVec::from(vec)))) +} + +impl From>> for QueryVec { + fn from(vecs: Vec>) -> Self { + Self { + values: vecs.into_iter().flat_map(|qv| qv.values).collect(), + } + } +} impl TryFrom for QueryVec { type Error = String; fn try_from(string: String) -> Result { if string.is_empty() { - return Ok(Self(vec![])); + return Ok(Self { values: vec![] }); } - string - .split(',') - .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) - .collect::, String>>() - .map(Self) + Ok(Self { + values: string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse query".to_string())) + .collect::, String>>()?, + }) + } +} + +impl From> for Vec { + fn from(vec: QueryVec) -> Vec { + vec.values } } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorBalancesQuery { - pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, } #[derive(Clone, Serialize, Deserialize)] @@ -602,9 +646,12 @@ pub struct BeaconCommitteeSubscription { } #[derive(Deserialize)] +#[serde(deny_unknown_fields)] pub struct PeersQuery { - pub state: Option>, - pub direction: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub state: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub direction: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -858,8 +905,10 @@ impl EventKind { } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct EventQuery { - pub topics: QueryVec, + #[serde(deserialize_with = "query_vec")] + pub topics: Vec, } #[derive(Debug, Clone, Copy, PartialEq, Deserialize)] @@ -961,7 +1010,9 @@ mod tests { fn query_vec() { assert_eq!( QueryVec::try_from("0,1,2".to_string()).unwrap(), - QueryVec(vec![0_u64, 1, 2]) + QueryVec { + values: vec![0_u64, 1, 2] + } ); } } diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index f99d7773b9a..09b6f125fce 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -18,3 +18,4 @@ tokio = { version = "1.14.0", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" +serde_array_query = "0.1.0" diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 5f37dde87de..346361b18fe 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -3,5 +3,6 @@ pub mod cors; pub mod metrics; +pub mod query; pub mod reject; pub mod task; diff --git a/common/warp_utils/src/query.rs b/common/warp_utils/src/query.rs new file mode 100644 index 00000000000..c5ed5c5f128 --- /dev/null +++ b/common/warp_utils/src/query.rs @@ -0,0 +1,22 @@ +use crate::reject::custom_bad_request; +use serde::Deserialize; +use warp::Filter; + +// Custom query filter using `serde_array_query`. +// This allows duplicate keys inside query strings. +pub fn multi_key_query<'de, T: Deserialize<'de>>( +) -> impl warp::Filter,), Error = std::convert::Infallible> + Copy +{ + raw_query().then(|query_str: String| async move { + serde_array_query::from_str(&query_str).map_err(|e| custom_bad_request(e.to_string())) + }) +} + +// This ensures that empty query strings are still accepted. +// This is because warp::filters::query::raw() does not allow empty query strings +// but warp::query::() does. +fn raw_query() -> impl Filter + Copy { + warp::filters::query::raw() + .or(warp::any().map(String::default)) + .unify() +} From 0116c8d464f9e0c968b3e776a957ca50f12ba0fa Mon Sep 17 00:00:00 2001 From: eklm Date: Thu, 20 Jan 2022 09:14:21 +0000 Subject: [PATCH 34/56] Change type of extra fields in ConfigAndPreset (#2913) ## Issue Addressed #2900 ## Proposed Changes Change type of extra_fields in ConfigAndPreset so it can contain non string values (inside serde_json::Value) --- Cargo.lock | 1 + consensus/types/Cargo.toml | 1 + consensus/types/src/config_and_preset.rs | 10 ++++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d487ae701a..bbf8de27e02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6495,6 +6495,7 @@ dependencies = [ "safe_arith", "serde", "serde_derive", + "serde_json", "serde_yaml", "slog", "state_processing", diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index ba187fb9a85..bc013fe42d3 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -44,6 +44,7 @@ lazy_static = "1.4.0" parking_lot = "0.11.1" itertools = "0.10.0" superstruct = "0.4.0" +serde_json = "1.0.74" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index d367cfc49d2..affda1a061d 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; use serde_derive::{Deserialize, Serialize}; +use serde_json::Value; use std::collections::HashMap; /// Fusion of a runtime-config with the compile-time preset values. @@ -19,7 +20,7 @@ pub struct ConfigAndPreset { // pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] - pub extra_fields: HashMap, + pub extra_fields: HashMap, } impl ConfigAndPreset { @@ -83,7 +84,7 @@ impl ConfigAndPreset { ), ]; for (key, value) in fields { - self.extra_fields.insert(key.to_uppercase(), value); + self.extra_fields.insert(key.to_uppercase(), value.into()); } } } @@ -107,8 +108,13 @@ mod test { let mut yamlconfig = ConfigAndPreset::from_chain_spec::(&mainnet_spec); let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); + let (k3, v3) = ("SAMPLE_HARDFORK_KEY3", 32); + let (k4, v4) = ("SAMPLE_HARDFORK_KEY4", Value::Null); yamlconfig.extra_fields.insert(k1.into(), v1.into()); yamlconfig.extra_fields.insert(k2.into(), v2.into()); + yamlconfig.extra_fields.insert(k3.into(), v3.into()); + yamlconfig.extra_fields.insert(k4.into(), v4); + serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); let reader = OpenOptions::new() From a8ae9c84189e3098d138da14391ec04c766f00b2 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Thu, 20 Jan 2022 09:14:23 +0000 Subject: [PATCH 35/56] Add linkcheck workflow (#2918) ## Issue Addressed Resolves #2889 ## Additional Info I have checked that the `linkcheck` workflow runs and detects broken links as expected, in https://github.com/ackintosh/lighthouse/pull/1. --- .github/workflows/linkcheck.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/linkcheck.yml diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml new file mode 100644 index 00000000000..c23ee8df36a --- /dev/null +++ b/.github/workflows/linkcheck.yml @@ -0,0 +1,30 @@ +name: linkcheck + +on: + push: + branches: + - unstable + pull_request: + paths: + - 'book/**' + +jobs: + linkcheck: + name: Check broken links + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Create docker network + run: docker network create book + + - name: Run mdbook server + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + + - name: Print logs + run: docker logs book + + - name: Run linkcheck + run: docker run --network book tennox/linkcheck:latest book:3000 From fc7a1a7dc77e1db55d8b63b6dfa84cf8397b7a25 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 20 Jan 2022 09:14:25 +0000 Subject: [PATCH 36/56] Allow disconnected states to introduce new peers without warning (#2922) ## Issue Addressed We emit a warning to verify that all peer connection state information is consistent. A warning is given under one edge case; We try to dial a peer with peer-id X and multiaddr Y. The peer responds to multiaddr Y with a different peer-id, Z. The dialing to the peer fails, but libp2p injects the failed attempt as peer-id Z. In this instance, our PeerDB tries to add a new peer in the disconnected state under a previously unknown peer-id. This is harmless and so this PR permits this behaviour without logging a warning. --- beacon_node/lighthouse_network/src/peer_manager/peerdb.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index bd735c02eb3..cddff1218cd 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -666,9 +666,11 @@ impl PeerDB { // connection state for an unknown peer. if !matches!( new_state, - NewConnectionState::Connected { .. } - | NewConnectionState::Disconnecting { .. } - | NewConnectionState::Dialing { .. } + NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) + | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before + | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer + | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); From f0f327af0c47d6024ca7b3ee06902093830e0c41 Mon Sep 17 00:00:00 2001 From: Rishi Kumar Ray Date: Thu, 20 Jan 2022 09:14:26 +0000 Subject: [PATCH 37/56] Removed all disable_forks (#2925) #2923 Which issue # does this PR address? There's a redundant field on the BeaconChain called disabled_forks that was once part of our fork-aware networking (#953) but which is no longer used and could be deleted. so Removed all references to disabled_forks so that the code compiles and git grep disabled_forks returns no results. ## Proposed Changes Please list or describe the changes introduced by this PR. Removed all references of disabled_forks Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 -- beacon_node/beacon_chain/src/builder.rs | 10 ---------- beacon_node/client/src/builder.rs | 2 -- beacon_node/client/src/config.rs | 3 --- 4 files changed, 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4198425a7e7..4e1d54dc136 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -336,8 +336,6 @@ pub struct BeaconChain { pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, - /// A list of any hard-coded forks that have been disabled. - pub disabled_forks: Vec, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 4662d05d3db..24a9a916bba 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -84,7 +84,6 @@ pub struct BeaconChainBuilder { validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, - disabled_forks: Vec, log: Option, graffiti: Graffiti, slasher: Option>>, @@ -122,7 +121,6 @@ where slot_clock: None, shutdown_sender: None, head_tracker: None, - disabled_forks: Vec::new(), validator_pubkey_cache: None, spec: TEthSpec::default_spec(), chain_config: ChainConfig::default(), @@ -184,13 +182,6 @@ where self.log = Some(log); self } - - /// Sets a list of hard-coded forks that will not be activated. - pub fn disabled_forks(mut self, disabled_forks: Vec) -> Self { - self.disabled_forks = disabled_forks; - self - } - /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -764,7 +755,6 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), - disabled_forks: self.disabled_forks, shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d497af6485c..550d89125eb 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -135,7 +135,6 @@ where let chain_spec = self.chain_spec.clone(); let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); - let disabled_forks = config.disabled_forks.clone(); let chain_config = config.chain.clone(); let graffiti = config.graffiti; @@ -169,7 +168,6 @@ where .store(store) .custom_spec(spec.clone()) .chain_config(chain_config) - .disabled_forks(disabled_forks) .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f4519e05c87..97689622600 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -58,8 +58,6 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, - /// A list of hard-coded forks that will be disabled. - pub disabled_forks: Vec, /// Graffiti to be inserted everytime we create a block. pub graffiti: Graffiti, /// When true, automatically monitor validators using the HTTP API. @@ -98,7 +96,6 @@ impl Default for Config { eth1: <_>::default(), execution_endpoints: None, suggested_fee_recipient: None, - disabled_forks: Vec::new(), graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), From 799aedd6319b13032afdca03d845263e27f098d0 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 20 Jan 2022 23:05:42 +0000 Subject: [PATCH 38/56] Add default config options for transition constants (#2940) ## Issue Addressed Continuation to #2934 ## Proposed Changes Currently, we have the transition fields in the config (`TERMINAL_TOTAL_DIFFICULTY`, `TERMINAL_BLOCK_HASH` and `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH`) as mandatory fields. This is causing compatibility issues with other client BN's (nimbus and teku v22.1.0) which don't return these fields on a `eth/v1/config/spec` api call. Since we don't use this values until the merge, I think it's okay to have default values set for these fields as well to ensure compatibility. --- consensus/types/src/chain_spec.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 0bd0acb9633..f191eb86710 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -611,9 +611,15 @@ pub struct Config { #[serde(default)] pub preset_base: String, + // TODO(merge): remove this default + #[serde(default = "default_terminal_total_difficulty")] #[serde(with = "eth2_serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: Hash256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -682,6 +688,20 @@ fn default_bellatrix_fork_epoch() -> Option> { None } +fn default_terminal_total_difficulty() -> Uint256 { + "115792089237316195423570985008687907853269984665640564039457584007913129638912" + .parse() + .unwrap() +} + +fn default_terminal_block_hash() -> Hash256 { + Hash256::zero() +} + +fn default_terminal_block_hash_activation_epoch() -> Epoch { + Epoch::new(u64::MAX) +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); From f35a33716be6b90e39d5f7bde6b00ec8fbfa7acd Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 21 Jan 2022 06:07:20 +0000 Subject: [PATCH 39/56] Quote validator indices when posting duties (#2943) ## Proposed Changes This PR establishes compatibility between Lighthouse's VC and Nimbus's BN. Lighthouse was previously `POST`ing unquoted lists of validator indices to the attester and sync duties endpoints which were (correctly) not accepted by Nimbus. These lists had slipped through the cracks because they didn't have an explicit wrapper type to add `serde` annotations to. I've added the `ValidatorIndexDataRef` newtype in order to implement the modified serialisation behaviour. ## Testing Combined with https://github.com/sigp/lighthouse/pull/2940, I've confirmed that this PR allows my Lighthouse VC on Prater to validate with the public Nimbus BN listed here: https://github.com/status-im/nimbus-eth2#quickly-test-your-tooling-against-nimbus. I haven't had a block proposal yet, but attestations and sync committee messages are working. ## Additional Info This may also provide compatibility with Prysm BNs but I haven't had a chance to test that yet. --- common/eth2/src/lib.rs | 16 ++++++++++++---- common/eth2/src/types.rs | 7 +++++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bdad6728667..153667d7e95 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1256,8 +1256,12 @@ impl BeaconNodeHttpClient { .push("attester") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.attester_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.attester_duties, + ) + .await } /// `POST validator/aggregate_and_proofs` @@ -1356,8 +1360,12 @@ impl BeaconNodeHttpClient { .push("sync") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.sync_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.sync_duties, + ) + .await } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 169a8de59e5..a761b9ed124 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -591,6 +591,13 @@ pub struct ValidatorBalancesQuery { #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +/// Borrowed variant of `ValidatorIndexData`, for serializing/sending. +#[derive(Clone, Copy, Serialize)] +#[serde(transparent)] +pub struct ValidatorIndexDataRef<'a>( + #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], +); + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, From ca29b580a24adca1dfa647e22160130900e85376 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 24 Jan 2022 12:08:00 +0000 Subject: [PATCH 40/56] Increase target subnet peers (#2948) In the latest release we decreased the target number of subnet peers. It appears this could be causing issues in some cases and so reverting it back to the previous number it wise. A larger PR that follows this will address some other related discovery issues and peer management around subnet peer discovery. --- beacon_node/lighthouse_network/src/behaviour/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 61ba855f6a4..2a799610947 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -53,7 +53,7 @@ use types::{ pub mod gossipsub_scoring_parameters; /// The number of peers we target per subnet for discovery queries. -pub const TARGET_SUBNET_PEERS: usize = 2; +pub const TARGET_SUBNET_PEERS: usize = 6; const MAX_IDENTIFY_ADDRESSES: usize = 10; From b9b3ea70de6e78a586a6860ac32f4f3a9ccb62a6 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 24 Jan 2022 22:33:02 +0000 Subject: [PATCH 41/56] Fix metric name for monitoring (#2950) ## Issue Addressed Resolves #2949 ## Proposed Changes Fix metric naming for libp2p peer count. --- common/monitoring_api/src/gather.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 16965f43cdf..8699a8cf2c9 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -67,11 +67,7 @@ const BEACON_PROCESS_METRICS: &[JsonMetric] = &[ "disk_beaconchain_bytes_total", JsonType::Integer, ), - JsonMetric::new( - "libp2p_peer_connected_peers_total", - "network_peers_connected", - JsonType::Integer, - ), + JsonMetric::new("libp2p_peers", "network_peers_connected", JsonType::Integer), JsonMetric::new( "libp2p_outbound_bytes", "network_libp2p_bytes_total_transmit", From 69288f6164154c870bfeff69ff27dfc6f9fbadb3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 24 Jan 2022 22:33:04 +0000 Subject: [PATCH 42/56] VC: don't warn if BN config doesn't match exactly (#2952) ## Proposed Changes Remove the check for exact equality on the beacon node spec when polling `/config/spec` from the VC. This check was always overzealous, and mostly served to check that the BN was configured for upcoming forks. I've replaced it by explicit checks of the `altair_fork_epoch` and `bellatrix_fork_epoch` instead. ## Additional Info We should come back to this and clean it up so that we can retain compatibility while removing the field `default`s we installed. --- Cargo.lock | 4 ++-- validator_client/src/beacon_node_fallback.rs | 19 ++++++++----------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bbf8de27e02..e16f4996ff7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5994,9 +5994,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 487b5744d07..18780c3092c 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -253,22 +253,19 @@ impl CandidateBeaconNode { "our_genesis_fork" => ?spec.genesis_fork_version, ); return Err(CandidateError::Incompatible); - } else if *spec != beacon_node_spec { + } else if beacon_node_spec.altair_fork_epoch != spec.altair_fork_epoch { warn!( log, - "Beacon node config does not match exactly"; + "Beacon node has mismatched Altair fork epoch"; "endpoint" => %self.beacon_node, - "advice" => "check that the BN is updated and configured for any upcoming forks", + "endpoint_altair_fork_epoch" => ?beacon_node_spec.altair_fork_epoch, ); - debug!( - log, - "Beacon node config"; - "config" => ?beacon_node_spec, - ); - debug!( + } else if beacon_node_spec.bellatrix_fork_epoch != spec.bellatrix_fork_epoch { + warn!( log, - "Our config"; - "config" => ?spec, + "Beacon node has mismatched Bellatrix fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_bellatrix_fork_epoch" => ?beacon_node_spec.bellatrix_fork_epoch, ); } From 5f628a71d4b2a7e7761b30e726338bba07617cd2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 25 Jan 2022 00:46:24 +0000 Subject: [PATCH 43/56] v2.1.1 (#2951) ## Issue Addressed NA ## Proposed Changes - Bump Lighthouse version to v2.1.1 - Update `thread_local` from v1.1.3 to v1.1.4 to address https://rustsec.org/advisories/RUSTSEC-2022-0006 ## Additional Info - ~~Blocked on #2950~~ - ~~Blocked on #2952~~ --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 2 +- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e16f4996ff7..586cdaf1817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -331,7 +331,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.1.0" +version = "2.1.1" dependencies = [ "beacon_chain", "clap", @@ -497,7 +497,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.1.0" +version = "2.1.1" dependencies = [ "beacon_node", "clap", @@ -2825,7 +2825,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.1.0" +version = "2.1.1" dependencies = [ "account_utils", "bls", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.1.0" +version = "2.1.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index eecef0349e9..c8cd5152af0 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.1.0" +version = "2.1.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 6f2baf132c2..a66ff66e5ce 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.1.0-", + prefix = "Lighthouse/v2.1.1-", fallback = "unknown" ); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index a6062e5b8cb..2b9541de3f6 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.1.0" +version = "2.1.1" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 787b992a22d..130322e0e9c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.1.0" +version = "2.1.1" authors = ["Sigma Prime "] edition = "2018" autotests = false From 150931950dd2dd531d0643314ae9e3c46503cdf3 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Wed, 26 Jan 2022 23:14:20 +0000 Subject: [PATCH 44/56] Fix errors from local testnet scripts on MacOS (#2919) ## Issue Addressed Resolves https://github.com/sigp/lighthouse/issues/2763 ## Proposed Changes - Add a workflow which tests that local testnet starts successfully - Added `set` option into the scripts in order to fail fast so that we can notice errors during starting local testnet. - Fix errors on MacOS - The redirect `&>>` is supported since bash v4 but the version bundled in macOS(11.6.1) is v3. https://github.com/sigp/lighthouse/pull/2919/commits/a54f119c9b1839fd0909792d219858e727e120a2 --- .github/workflows/local-testnet.yml | 50 ++++++++++++++++++++ scripts/local_testnet/beacon_node.sh | 2 + scripts/local_testnet/bootnode.sh | 2 + scripts/local_testnet/clean.sh | 2 + scripts/local_testnet/ganache_test_node.sh | 2 + scripts/local_testnet/kill_processes.sh | 2 + scripts/local_testnet/print_logs.sh | 17 +++++++ scripts/local_testnet/reset_genesis_time.sh | 2 + scripts/local_testnet/start_local_testnet.sh | 8 ++-- scripts/local_testnet/stop_local_testnet.sh | 2 + scripts/local_testnet/validator_client.sh | 2 + scripts/local_testnet/vars.env | 3 ++ 12 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/local-testnet.yml create mode 100755 scripts/local_testnet/print_logs.sh diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml new file mode 100644 index 00000000000..f97b271c35f --- /dev/null +++ b/.github/workflows/local-testnet.yml @@ -0,0 +1,50 @@ +# Test that local testnet starts successfully. +name: local testnet + +on: + push: + branches: + - unstable + pull_request: + +jobs: + run-local-testnet: + strategy: + matrix: + os: + - ubuntu-18.04 + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + + - name: Install ganache + run: npm install ganache-cli@latest --global + + # https://github.com/actions/cache/blob/main/examples.md#rust---cargo + - uses: actions/cache@v2 + id: cache-cargo + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install lighthouse + if: steps.cache-cargo.outputs.cache-hit != 'true' + run: make && make install-lcli + + - name: Start local testnet + run: ./start_local_testnet.sh + working-directory: scripts/local_testnet + + - name: Print logs + run: ./print_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 883c6660294..8151aac2490 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -4,6 +4,8 @@ # Starts a beacon node based upon a genesis state created by `./setup.sh`. # +set -Eeuo pipefail + source ./vars.env SUBSCRIBE_ALL_SUBNETS= diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh index bef207a6947..ca02a24140f 100755 --- a/scripts/local_testnet/bootnode.sh +++ b/scripts/local_testnet/bootnode.sh @@ -5,6 +5,8 @@ # Starts a bootnode from the generated enr. # +set -Eeuo pipefail + source ./vars.env echo "Generating bootnode enr" diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index bc4db74c619..b01b1a2dffb 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -4,6 +4,8 @@ # Deletes all files associated with the local testnet. # +set -Eeuo pipefail + source ./vars.env if [ -d $DATADIR ]; then diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 762700dbd63..69edc1e7704 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -Eeuo pipefail + source ./vars.env exec ganache-cli \ diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index c729a1645ae..4f52a5f256e 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Kill processes +set -Eeuo pipefail + # First parameter is the file with # one pid per line. if [ -f "$1" ]; then diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/print_logs.sh new file mode 100755 index 00000000000..2a9e7822a6f --- /dev/null +++ b/scripts/local_testnet/print_logs.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Print the tail of all the logs output from local testnet + +set -Eeuo pipefail + +source ./vars.env + +for f in "$TESTNET_DIR"/*.log +do + [[ -e "$f" ]] || break # handle the case of no *.log files + echo "=============================================================================" + echo "$f" + echo "=============================================================================" + tail "$f" + echo "" +done diff --git a/scripts/local_testnet/reset_genesis_time.sh b/scripts/local_testnet/reset_genesis_time.sh index c7332e327ed..68c8fb6b4cb 100755 --- a/scripts/local_testnet/reset_genesis_time.sh +++ b/scripts/local_testnet/reset_genesis_time.sh @@ -4,6 +4,8 @@ # Resets the beacon state genesis time to now. # +set -Eeuo pipefail + source ./vars.env NOW=$(date +%s) diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index cdae9b2ba2d..7126e4c5dc0 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Start all processes necessary to create a local testnet +set -Eeuo pipefail + source ./vars.env # VC_COUNT is defaulted in vars.env @@ -49,7 +51,7 @@ for (( bn=1; bn<=$BN_COUNT; bn++ )); do done for (( vc=1; vc<=$VC_COUNT; vc++ )); do touch $LOG_DIR/validator_node_$vc.log -done +done # Sleep with a message sleeping() { @@ -67,7 +69,7 @@ execute_command() { EX_NAME=$2 shift shift - CMD="$EX_NAME $@ &>> $LOG_DIR/$LOG_NAME" + CMD="$EX_NAME $@ >> $LOG_DIR/$LOG_NAME 2>&1" echo "executing: $CMD" echo "$CMD" > "$LOG_DIR/$LOG_NAME" eval "$CMD &" @@ -89,7 +91,7 @@ execute_command_add_PID() { # Delay to let ganache-cli to get started execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh -sleeping 2 +sleeping 10 # Delay to get data setup execute_command setup.log ./setup.sh diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 47f390ba766..b1c3188ee3a 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Stop all processes that were started with start_local_testnet.sh +set -Eeuo pipefail + source ./vars.env PID_FILE=$TESTNET_DIR/PIDS.pid diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 6755384be59..5aa75dfe2d0 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -6,6 +6,8 @@ # # Usage: ./validator_client.sh +set -Eeuo pipefail + source ./vars.env DEBUG_LEVEL=${3:-info} diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index f88e9eb716e..208fbb6d856 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -43,3 +43,6 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 + +# Command line arguments for validator client +VC_ARGS="" From 9964f5afe5d810025fe6c3901674f6c92f31ce52 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 26 Jan 2022 23:14:22 +0000 Subject: [PATCH 45/56] Document why we hash downloaded blocks for both sync algs (#2927) ## Proposed Changes Initially the idea was to remove hashing of blocks in backfill sync. After considering it more, we conclude that we need to do it in both (forward and backfill) anyway. But since we forgot why we were doing it in the first place, this PR documents this logic. Future us should find it useful Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> --- .../network/src/sync/backfill_sync/mod.rs | 13 ++++-- beacon_node/network/src/sync/manager.rs | 2 +- .../network/src/sync/range_sync/batch.rs | 42 ++++++++++++++++--- 3 files changed, 47 insertions(+), 10 deletions(-) diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 610081319d6..0c34eef274f 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -54,6 +54,13 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Return type when attempting to start the backfill sync process. @@ -119,7 +126,7 @@ pub struct BackFillSync { /// Batches validated by this chain. validated_batches: u64, - /// We keep track of peer that are participating in the backfill sync. Unlike RangeSync, + /// We keep track of peers that are participating in the backfill sync. Unlike RangeSync, /// BackFillSync uses all synced peers to download the chain from. If BackFillSync fails, we don't /// want to penalize all our synced peers, so we use this variable to keep track of peers that /// have participated and only penalize these peers if backfill sync fails. @@ -539,7 +546,7 @@ impl BackFillSync { "error" => %e, "batch" => self.processing_target); // This is unlikely to happen but it would stall syncing since the batch now has no // blocks to continue, and the chain is expecting a processing result that won't - // arrive. To mitigate this, (fake) fail this processing so that the batch is + // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. self.on_batch_process_result( network, @@ -795,7 +802,7 @@ impl BackFillSync { for attempt in batch.attempts() { // The validated batch has been re-processed if attempt.hash != processed_attempt.hash { - // The re-downloaded version was different + // The re-downloaded version was different. if processed_attempt.peer_id != attempt.peer_id { // A different peer sent the correct batch, the previous peer did not // We negatively score the original peer. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 32f2a263674..960dd12afce 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -131,7 +131,7 @@ pub enum SyncRequestType { RangeSync(Epoch, ChainId), } -/// The result of processing a multiple blocks (a chain segment). +/// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index e0b15cb498e..7239081ad13 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -19,6 +19,34 @@ pub trait BatchConfig { fn max_batch_download_attempts() -> u8; /// The max batch processing attempts. fn max_batch_processing_attempts() -> u8; + /// Hashing function of a batch's attempt. Used for scoring purposes. + /// + /// When a batch fails processing, it is possible that the batch is wrong (faulty or + /// incomplete) or that a previous one is wrong. For this reason we need to re-download and + /// re-process the batches awaiting validation and the current one. Consider this scenario: + /// + /// ```ignore + /// BatchA BatchB BatchC BatchD + /// -----X Empty Empty Y----- + /// ``` + /// + /// BatchA declares that it refers X, but BatchD declares that it's first block is Y. There is no + /// way to know if BatchD is faulty/incomplete or if batches B and/or C are missing blocks. It is + /// also possible that BatchA belongs to a different chain to the rest starting in some block + /// midway in the batch's range. For this reason, the four batches would need to be re-downloaded + /// and re-processed. + /// + /// If batchD was actually good, it will still register two processing attempts for the same set of + /// blocks. In this case, we don't want to penalize the peer that provided the first version, since + /// it's equal to the successfully processed one. + /// + /// The function `batch_attempt_hash` provides a way to compare two batch attempts without + /// storing the full set of blocks. + /// + /// Note that simpler hashing functions considered in the past (hash of first block, hash of last + /// block, number of received blocks) are not good enough to differentiate attempts. For this + /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -30,6 +58,11 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Error type of a batch in a wrong state. @@ -300,7 +333,7 @@ impl BatchInfo { pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { - self.state = BatchState::Processing(Attempt::new(peer, &blocks)); + self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); Ok(blocks) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -386,11 +419,8 @@ pub struct Attempt { } impl Attempt { - #[allow(clippy::ptr_arg)] - fn new(peer_id: PeerId, blocks: &Vec>) -> Self { - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - blocks.hash(&mut hasher); - let hash = hasher.finish(); + fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } } From f2b1e096b2d57abe03e002dea71dab6679a49765 Mon Sep 17 00:00:00 2001 From: Divma Date: Wed, 26 Jan 2022 23:14:23 +0000 Subject: [PATCH 46/56] Code quality improvents to the network service (#2932) Checking how to priorize the polling of the network I moved most of the service code to functions. This change I think it's worth on it's own for code quality since inside the `tokio::select` many tools don't work (cargo fmt, sometimes clippy, and sometimes even the compiler's errors get wack). This is functionally equivalent to the previous code, just better organized --- beacon_node/network/src/service.rs | 888 ++++++++++++++++------------- 1 file changed, 496 insertions(+), 392 deletions(-) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 35cf3fa90eb..c6f68d5faa6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,6 +7,7 @@ use crate::{ NetworkConfig, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; use lighthouse_network::{ @@ -279,7 +280,7 @@ impl NetworkService { log: network_log, }; - spawn_service(executor, network_service); + network_service.spawn_service(executor); Ok((network_globals, network_send)) } @@ -320,428 +321,531 @@ impl NetworkService { result } -} -fn spawn_service( - executor: task_executor::TaskExecutor, - mut service: NetworkService, -) { - let mut shutdown_sender = executor.shutdown_sender(); - - // spawn on the current executor - executor.spawn(async move { - - loop { - // build the futures to check simultaneously - tokio::select! { - _ = service.metrics_update.tick(), if service.metrics_enabled => { - // update various network metrics - metrics::update_gossip_metrics::( - service.libp2p.swarm.behaviour().gs(), - &service.network_globals, - ); - // update sync metrics - metrics::update_sync_metrics(&service.network_globals); + fn send_to_router(&mut self, msg: RouterMessage) { + if let Err(mpsc::error::SendError(msg)) = self.router_send.send(msg) { + debug!(self.log, "Failed to send msg to router"; "msg" => ?msg); + } + } - } - _ = service.gossipsub_parameter_update.tick() => { - if let Ok(slot) = service.beacon_chain.slot() { - if let Some(active_validators) = service.beacon_chain.with_head(|head| { - Ok::<_, BeaconChainError>( - head - .beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - service - .beacon_chain - .epoch() - .ok() - .map(|current_epoch| { - head - .beacon_state - .validators() - .iter() - .filter(|validator| - validator.is_active_at(current_epoch) - ) - .count() - }) - }) - ) - }).unwrap_or(None) { - if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() { - error!( - service.log, - "Failed to update gossipsub parameters"; - "active_validators" => active_validators - ); - } + fn spawn_service(mut self, executor: task_executor::TaskExecutor) { + let mut shutdown_sender = executor.shutdown_sender(); + + // spawn on the current executor + let service_fut = async move { + loop { + tokio::select! { + _ = self.metrics_update.tick(), if self.metrics_enabled => { + // update various network metrics + metrics::update_gossip_metrics::( + self.libp2p.swarm.behaviour().gs(), + &self.network_globals, + ); + // update sync metrics + metrics::update_sync_metrics(&self.network_globals); + } + + _ = self.gossipsub_parameter_update.tick() => self.update_gossipsub_parameters(), + + // handle a message sent to the network + Some(msg) = self.network_recv.recv() => self.on_network_msg(msg, &mut shutdown_sender).await, + + // process any attestation service events + Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), + + // process any sync committee service events + Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + + event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, + + Some(_) = &mut self.next_fork_update => self.update_next_fork(), + + Some(_) = &mut self.next_unsubscribe => { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + info!(self.log, "Unsubscribed from old fork topics"); + self.next_unsubscribe = Box::pin(None.into()); + } + + Some(_) = &mut self.next_fork_subscriptions => { + if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { + let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); + let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); + info!(self.log, "Subscribing to new fork topics"); + self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.next_fork_subscriptions = Box::pin(None.into()); + } + else { + error!(self.log, "Fork subscription scheduled but no fork scheduled"); } } } - // handle a message sent to the network - Some(message) = service.network_recv.recv() => { + metrics::update_bandwidth_metrics(self.libp2p.bandwidth.clone()); + } + }; + executor.spawn(service_fut, "network"); + } + + /// Handle an event received from the network. + async fn on_libp2p_event( + &mut self, + ev: Libp2pEvent, + shutdown_sender: &mut Sender, + ) { + match ev { + Libp2pEvent::Behaviour(event) => match event { + BehaviourEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + BehaviourEvent::PeerConnectedIncoming(_) + | BehaviourEvent::PeerBanned(_) + | BehaviourEvent::PeerUnbanned(_) => { + // No action required for these events. + } + BehaviourEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + BehaviourEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { + peer_id, + id, + request, + }); + } + BehaviourEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { + peer_id, + request_id: id, + response, + }); + } + BehaviourEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + BehaviourEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + BehaviourEvent::PubsubMessage { + id, + source, + message, + .. + } => { match message { - NetworkMessage::SendRequest{ peer_id, request, request_id } => { - service.libp2p.send_request(peer_id, request_id, request); - } - NetworkMessage::SendResponse{ peer_id, response, id } => { - service.libp2p.send_response(peer_id, id, response); - } - NetworkMessage::SendErrorResponse{ peer_id, error, id, reason } => { - service.libp2p.respond_with_error(peer_id, id, error, reason); - } - NetworkMessage::UPnPMappingEstablished { tcp_socket, udp_socket} => { - service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); - // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_socket) = tcp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - // if the discovery service is not auto-updating, update it with the - // UPnP mappings - if !service.discovery_auto_update { - if let Some(udp_socket) = udp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - } - }, - NetworkMessage::ValidationResult { - propagation_source, - message_id, - validation_result, - } => { - trace!(service.log, "Propagating gossipsub message"; - "propagation_peer" => ?propagation_source, - "message_id" => %message_id, - "validation_result" => ?validation_result - ); - service - .libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, message_id, validation_result - ); - } - NetworkMessage::Publish { messages } => { - let mut topic_kinds = Vec::new(); - for message in &messages { - if !topic_kinds.contains(&message.kind()) { - topic_kinds.push(message.kind()); - } - } - debug!( - service.log, - "Sending pubsub messages"; - "count" => messages.len(), - "topics" => ?topic_kinds - ); - service.libp2p.swarm.behaviour_mut().publish(messages); - } - NetworkMessage::ReportPeer { peer_id, action, source, msg } => service.libp2p.report_peer(&peer_id, action, source, msg), - NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = service + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self .attestation_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Attestation validator subscription failed"; "error" => e); - } + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); } - NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = service - .sync_committee_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Sync committee calidator subscription failed"; "error" => e); - } + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } - NetworkMessage::SubscribeCoreTopics => { - if service.shutdown_after_sync { - let _ = shutdown_sender - .send(ShutdownReason::Success( - "Beacon node completed sync. Shutting down as --shutdown-after-sync flag is enabled")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); - return; - } - let mut subscribed_topics: Vec = vec![]; - for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - - // If we are to subscribe to all subnets we do it here - if service.subscribe_all_subnets { - for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { - let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { - let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } + } + } + }, + Libp2pEvent::NewListenAddr(multiaddr) => { + self.network_globals + .listen_multiaddrs + .write() + .push(multiaddr); + } + Libp2pEvent::ZeroListeners => { + let _ = shutdown_sender + .send(ShutdownReason::Failure( + "All listeners are closed. Unable to listen", + )) + .await + .map_err(|e| { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + }); + } + } + } - if !subscribed_topics.is_empty() { - info!( - service.log, - "Subscribed to topics"; - "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() - ); - } - } + /// Handle a message sent to the network service. + async fn on_network_msg( + &mut self, + msg: NetworkMessage, + shutdown_sender: &mut Sender, + ) { + match msg { + NetworkMessage::SendRequest { + peer_id, + request, + request_id, + } => { + self.libp2p.send_request(peer_id, request_id, request); + } + NetworkMessage::SendResponse { + peer_id, + response, + id, + } => { + self.libp2p.send_response(peer_id, id, response); + } + NetworkMessage::SendErrorResponse { + peer_id, + error, + id, + reason, + } => { + self.libp2p.respond_with_error(peer_id, id, error, reason); + } + NetworkMessage::UPnPMappingEstablished { + tcp_socket, + udp_socket, + } => { + self.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); + // If there is an external TCP port update, modify our local ENR. + if let Some(tcp_socket) = tcp_socket { + if let Err(e) = self + .libp2p + .swarm + .behaviour_mut() + .discovery_mut() + .update_enr_tcp_port(tcp_socket.port()) + { + warn!(self.log, "Failed to update ENR"; "error" => e); } } - // process any attestation service events - Some(attestation_service_message) = service.attestation_service.next() => { - match attestation_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + // if the discovery service is not auto-updating, update it with the + // UPnP mappings + if !self.discovery_auto_update { + if let Some(udp_socket) = udp_socket { + if let Err(e) = self + .libp2p + .swarm + .behaviour_mut() + .discovery_mut() + .update_enr_udp_socket(udp_socket) + { + warn!(self.log, "Failed to update ENR"; "error" => e); } } } - // process any sync committee service events - Some(sync_committee_service_message) = service.sync_committee_service.next() => { - match sync_committee_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + } + NetworkMessage::ValidationResult { + propagation_source, + message_id, + validation_result, + } => { + trace!(self.log, "Propagating gossipsub message"; + "propagation_peer" => ?propagation_source, + "message_id" => %message_id, + "validation_result" => ?validation_result + ); + self.libp2p + .swarm + .behaviour_mut() + .report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); + } + NetworkMessage::Publish { messages } => { + let mut topic_kinds = Vec::new(); + for message in &messages { + if !topic_kinds.contains(&message.kind()) { + topic_kinds.push(message.kind()); + } + } + debug!( + self.log, + "Sending pubsub messages"; + "count" => messages.len(), + "topics" => ?topic_kinds + ); + self.libp2p.swarm.behaviour_mut().publish(messages); + } + NetworkMessage::ReportPeer { + peer_id, + action, + source, + msg, + } => self.libp2p.report_peer(&peer_id, action, source, msg), + NetworkMessage::GoodbyePeer { + peer_id, + reason, + source, + } => self.libp2p.goodbye_peer(&peer_id, reason, source), + NetworkMessage::AttestationSubscribe { subscriptions } => { + if let Err(e) = self + .attestation_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Attestation validator subscription failed"; "error" => e); + } + } + NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = self + .sync_committee_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); + } + } + NetworkMessage::SubscribeCoreTopics => { + if self.shutdown_after_sync { + if let Err(e) = shutdown_sender + .send(ShutdownReason::Success( + "Beacon node completed sync. \ + Shutting down as --shutdown-after-sync flag is enabled", + )) + .await + { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + } + return; + } + let mut subscribed_topics: Vec = vec![]; + for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + topic_kind.clone(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); } } } - libp2p_event = service.libp2p.next_event() => { - // poll the swarm - match libp2p_event { - Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDialed(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer dialed to router"); }); - }, - BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => { - // No action required for these events. - }, - BehaviourEvent::PeerDisconnected(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDisconnected(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer disconnect to router"); - }); - }, - BehaviourEvent::RequestReceived{peer_id, id, request} => { - let _ = service - .router_send - .send(RouterMessage::RPCRequestReceived{peer_id, id, request}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - } - BehaviourEvent::ResponseReceived{peer_id, id, response} => { - let _ = service - .router_send - .send(RouterMessage::RPCResponseReceived{ peer_id, request_id: id, response }) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); + // If we are to subscribe to all subnets we do it here + if self.subscribe_all_subnets { + for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { + let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); } - BehaviourEvent::RPCFailed{id, peer_id} => { - let _ = service - .router_send - .send(RouterMessage::RPCFailed{ peer_id, request_id: id}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - - } - BehaviourEvent::StatusPeer(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::StatusPeer(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send re-status peer to router"); - }); - } - BehaviourEvent::PubsubMessage { - id, - source, - message, - .. - } => { - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = service.attestation_service.should_process_attestation( - subnet, - attestation, - ); - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, should_process)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - _ => { - // all else is sent to the router - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, true)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - } - } - } - Libp2pEvent::NewListenAddr(multiaddr) => { - service.network_globals.listen_multiaddrs.write().push(multiaddr); - } - Libp2pEvent::ZeroListeners => { - let _ = shutdown_sender - .send(ShutdownReason::Failure("All listeners are closed. Unable to listen")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); } } - } - Some(_) = &mut service.next_fork_update => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - - let fork_context = &service.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { - info!( - service.log, - "Transitioned to new fork"; - "old_fork" => ?fork_context.current_fork(), - "new_fork" => ?new_fork_name, - ); - fork_context.update_current_fork(*new_fork_name); - - service - .libp2p + let subnet_max = <::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64(); + for subnet_id in 0..subnet_max { + let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p .swarm .behaviour_mut() - .update_fork_version(new_enr_fork_id.clone()); - // Reinitialize the next_fork_update - service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); - - // Set the next_unsubscribe delay. - let epoch_duration = service.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); - let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); - - // Update the `next_fork_subscriptions` timer if the next fork is known. - service.next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&service.beacon_chain).into()); - service.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); - info!(service.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); - } else { - crit!(service.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + .update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + subnet.into(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } + } + + if !subscribed_topics.is_empty() { + info!( + self.log, + "Subscribed to topics"; + "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + ); + } + } + } + } + fn update_gossipsub_parameters(&mut self) { + if let Ok(slot) = self.beacon_chain.slot() { + if let Some(active_validators) = self + .beacon_chain + .with_head(|head| { + Ok::<_, BeaconChainError>( + head.beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + .or_else(|| { + // if active validator cached was not build we count the + // active validators + self.beacon_chain.epoch().ok().map(|current_epoch| { + head.beacon_state + .validators() + .iter() + .filter(|validator| validator.is_active_at(current_epoch)) + .count() + }) + }), + ) + }) + .unwrap_or(None) + { + if self + .libp2p + .swarm + .behaviour_mut() + .update_gossipsub_parameters(active_validators, slot) + .is_err() + { + error!( + self.log, + "Failed to update gossipsub parameters"; + "active_validators" => active_validators + ); } - Some(_) = &mut service.next_unsubscribe => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); - info!(service.log, "Unsubscribed from old fork topics"); - service.next_unsubscribe = Box::pin(None.into()); + } + } + } + + fn on_attestation_service_msg(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); } - Some(_) = &mut service.next_fork_subscriptions => { - if let Some((fork_name, _)) = service.beacon_chain.duration_to_next_fork() { - let fork_version = service.beacon_chain.spec.fork_version_for_name(fork_name); - let fork_digest = ChainSpec::compute_fork_digest(fork_version, service.beacon_chain.genesis_validators_root); - info!(service.log, "Subscribing to new fork topics"); - service.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); - service.next_fork_subscriptions = Box::pin(None.into()); - } - else { - error!(service.log, "Fork subscription scheduled but no fork scheduled"); - } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); } } - metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } } - }, "network"); + } + + fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } + } + } + + fn update_next_fork(&mut self) { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + + let fork_context = &self.fork_context; + if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + info!( + self.log, + "Transitioned to new fork"; + "old_fork" => ?fork_context.current_fork(), + "new_fork" => ?new_fork_name, + ); + fork_context.update_current_fork(*new_fork_name); + + self.libp2p + .swarm + .behaviour_mut() + .update_fork_version(new_enr_fork_id); + // Reinitialize the next_fork_update + self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); + + // Set the next_unsubscribe delay. + let epoch_duration = + self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); + let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + + // Update the `next_fork_subscriptions` timer if the next fork is known. + self.next_fork_subscriptions = + Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); + self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); + info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + } else { + crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + } + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. From 85d73d5443c0000d91e300dbeda6788b9d124e22 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 26 Jan 2022 23:14:24 +0000 Subject: [PATCH 47/56] Set mmap threshold to 128KB in malloc utils (#2937) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/2857 ## Proposed Changes Explicitly set GNU malloc's MMAP_THRESHOLD to 128KB, disabling dynamic adjustments. For rationale see the linked issue. --- common/malloc_utils/src/glibc.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 402cdc27aa8..681849a78ce 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -11,22 +11,20 @@ use std::env; use std::os::raw::c_int; use std::result::Result; -/// The value to be provided to `malloc_mmap_threshold`. +/// The optimal mmap threshold for Lighthouse seems to be around 128KB. /// -/// Value chosen so that values of the validators tree hash cache will *not* be allocated via -/// `mmap`. -/// -/// The size of a single chunk is: -/// -/// NODES_PER_VALIDATOR * VALIDATORS_PER_ARENA * 32 = 15 * 4096 * 32 = 1.875 MiB -const OPTIMAL_MMAP_THRESHOLD: c_int = 2 * 1_024 * 1_024; +/// By default GNU malloc will start with a threshold of 128KB and adjust it upwards, but we've +/// found that the upwards adjustments tend to result in heap fragmentation. Explicitly setting the +/// threshold to 128KB disables the dynamic adjustments and encourages `mmap` usage, which keeps the +/// heap size under control. +const OPTIMAL_MMAP_THRESHOLD: c_int = 128 * 1_024; /// Constants used to configure malloc internals. /// /// Source: /// /// https://github.com/lattera/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/malloc/malloc.h#L115-L123 -const M_MMAP_THRESHOLD: c_int = -4; +const M_MMAP_THRESHOLD: c_int = -3; /// Environment variables used to configure malloc. /// @@ -134,8 +132,8 @@ fn env_var_present(name: &str) -> bool { /// ## Resources /// /// - https://man7.org/linux/man-pages/man3/mallopt.3.html -fn malloc_mmap_threshold(num_arenas: c_int) -> Result<(), c_int> { - into_result(mallopt(M_MMAP_THRESHOLD, num_arenas)) +fn malloc_mmap_threshold(threshold: c_int) -> Result<(), c_int> { + into_result(mallopt(M_MMAP_THRESHOLD, threshold)) } fn mallopt(param: c_int, val: c_int) -> c_int { From 013a3cc3e08cd7dd76df27efa70716d5c0155be4 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 26 Jan 2022 23:14:25 +0000 Subject: [PATCH 48/56] Add flag to disable confirmation when performing voluntary exits (#2955) ## Issue Addressed Currently performing a voluntary exit prompts for manual confirmation. This prevents automation of exits. ## Proposed Changes Add the flag `--no-confirmation` to the account manager when performing voluntary exits to bypass this manual confirmation. --- account_manager/src/validator/exit.rs | 33 ++++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 221c31caf61..ca8cab5bd31 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -21,6 +21,7 @@ pub const KEYSTORE_FLAG: &str = "keystore"; pub const PASSWORD_FILE_FLAG: &str = "password-file"; pub const BEACON_SERVER_FLAG: &str = "beacon-node"; pub const NO_WAIT: &str = "no-wait"; +pub const NO_CONFIRMATION: &str = "no-confirmation"; pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -59,6 +60,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") ) + .arg( + Arg::with_name(NO_CONFIRMATION) + .long(NO_CONFIRMATION) + .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + ) .arg( Arg::with_name(STDIN_INPUTS_FLAG) .takes_value(false) @@ -75,6 +81,7 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); let no_wait = matches.is_present(NO_WAIT); + let no_confirmation = matches.is_present(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; @@ -97,12 +104,14 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< stdin_inputs, ð2_network_config, no_wait, + no_confirmation, ))?; Ok(()) } /// Gets the keypair and validator_index for every validator and calls `publish_voluntary_exit` on it. +#[allow(clippy::too_many_arguments)] async fn publish_voluntary_exit( keystore_path: &Path, password_file_path: Option<&PathBuf>, @@ -111,6 +120,7 @@ async fn publish_voluntary_exit( stdin_inputs: bool, eth2_network_config: &Eth2NetworkConfig, no_wait: bool, + no_confirmation: bool, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config @@ -149,15 +159,22 @@ async fn publish_voluntary_exit( "Publishing a voluntary exit for validator: {} \n", keypair.pk ); - eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); - eprintln!( - "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", - WEBSITE_URL - ); - eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + if !no_confirmation { + eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); + eprintln!("{}\n", PROMPT); + eprintln!( + "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", + WEBSITE_URL + ); + eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + } + + let confirmation = if !no_confirmation { + account_utils::read_input_from_user(stdin_inputs)? + } else { + CONFIRMATION_PHRASE.to_string() + }; - let confirmation = account_utils::read_input_from_user(stdin_inputs)?; if confirmation == CONFIRMATION_PHRASE { // Sign and publish the voluntary exit to network let signed_voluntary_exit = voluntary_exit.sign( From e70daaa3b6ee70eb6a6f2c0a1759062943e08259 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 27 Jan 2022 01:06:02 +0000 Subject: [PATCH 49/56] Implement API for block rewards (#2628) ## Proposed Changes Add an API endpoint for retrieving detailed information about block rewards. For information on usage see [the docs](https://github.com/sigp/lighthouse/blob/block-rewards-api/book/src/api-lighthouse.md#lighthouseblock_rewards), and the source. --- beacon_node/beacon_chain/src/block_reward.rs | 97 +++++++++++++++++++ .../beacon_chain/src/block_verification.rs | 13 +++ beacon_node/beacon_chain/src/errors.rs | 3 + beacon_node/beacon_chain/src/events.rs | 13 +++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/http_api/src/block_rewards.rs | 80 +++++++++++++++ beacon_node/http_api/src/lib.rs | 15 +++ beacon_node/operation_pool/src/lib.rs | 5 +- book/src/api-lighthouse.md | 42 +++++++- common/eth2/src/lib.rs | 2 + common/eth2/src/lighthouse.rs | 3 + common/eth2/src/lighthouse/block_rewards.rs | 54 +++++++++++ common/eth2/src/types.rs | 17 ++++ .../altair/sync_committee.rs | 37 ++++--- 14 files changed, 366 insertions(+), 16 deletions(-) create mode 100644 beacon_node/beacon_chain/src/block_reward.rs create mode 100644 beacon_node/http_api/src/block_rewards.rs create mode 100644 common/eth2/src/lighthouse/block_rewards.rs diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs new file mode 100644 index 00000000000..83b204113fe --- /dev/null +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -0,0 +1,97 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; +use operation_pool::{AttMaxCover, MaxCover}; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch}; + +impl BeaconChain { + pub fn compute_block_reward( + &self, + block: BeaconBlockRef<'_, T::EthSpec>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; + let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; + let mut per_attestation_rewards = block + .body() + .attestations() + .iter() + .map(|att| { + AttMaxCover::new(att, state, total_active_balance, &self.spec) + .ok_or(BeaconChainError::BlockRewardAttestationError) + }) + .collect::, _>>()?; + + // Update the attestation rewards for each previous attestation included. + // This is O(n^2) in the number of attestations n. + for i in 0..per_attestation_rewards.len() { + let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1); + let latest_att = &updated[i]; + + for att in to_update { + att.update_covering_set(latest_att.object(), latest_att.covering_set()); + } + } + + let mut prev_epoch_total = 0; + let mut curr_epoch_total = 0; + + for cover in &per_attestation_rewards { + for &reward in cover.fresh_validators_rewards.values() { + if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() + { + curr_epoch_total += reward; + } else { + prev_epoch_total += reward; + } + } + } + + let attestation_total = prev_epoch_total + curr_epoch_total; + + // Drop the covers. + let per_attestation_rewards = per_attestation_rewards + .into_iter() + .map(|cover| cover.fresh_validators_rewards) + .collect(); + + let attestation_rewards = AttestationRewards { + total: attestation_total, + prev_epoch_total, + curr_epoch_total, + per_attestation_rewards, + }; + + // Sync committee rewards. + let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit + } else { + 0 + }; + + // Total, metadata + let total = attestation_total + sync_committee_rewards; + + let meta = BlockRewardMeta { + slot: block.slot(), + parent_slot: state.latest_block_header().slot, + proposer_index: block.proposer_index(), + graffiti: block.body().graffiti().as_utf8_lossy(), + }; + + Ok(BlockReward { + total, + block_root, + meta, + attestation_rewards, + sync_committee_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c6d937c81e9..a4a1dc31b95 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -53,6 +53,7 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; +use eth2::types::EventKind; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -1165,6 +1166,18 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { metrics::stop_timer(committee_timer); + /* + * If we have block reward listeners, compute the block reward and push it to the + * event handler. + */ + if let Some(ref event_handler) = chain.event_handler { + if event_handler.has_block_reward_subscribers() { + let block_reward = + chain.compute_block_reward(block.message(), block_root, &state)?; + event_handler.register(EventKind::BlockReward(block_reward)); + } + } + /* * Perform `per_block_processing` on the block and state, returning early if the block is * invalid. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 70e288ec265..6920c06039d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -137,6 +137,9 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), + BlockRewardSlotError, + BlockRewardAttestationError, + BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayloadShutdownError(TrySendError), diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 459ccb457f9..6f4415ef4f3 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -15,6 +15,7 @@ pub struct ServerSentEventHandler { chain_reorg_tx: Sender>, contribution_tx: Sender>, late_head: Sender>, + block_reward_tx: Sender>, log: Logger, } @@ -32,6 +33,7 @@ impl ServerSentEventHandler { let (chain_reorg_tx, _) = broadcast::channel(capacity); let (contribution_tx, _) = broadcast::channel(capacity); let (late_head, _) = broadcast::channel(capacity); + let (block_reward_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -42,6 +44,7 @@ impl ServerSentEventHandler { chain_reorg_tx, contribution_tx, late_head, + block_reward_tx, log, } } @@ -67,6 +70,8 @@ impl ServerSentEventHandler { .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), EventKind::LateHead(late_head) => self.late_head.send(EventKind::LateHead(late_head)) .map(|count| trace!(self.log, "Registering server-sent late head event"; "receiver_count" => count)), + EventKind::BlockReward(block_reward) => self.block_reward_tx.send(EventKind::BlockReward(block_reward)) + .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -105,6 +110,10 @@ impl ServerSentEventHandler { self.late_head.subscribe() } + pub fn subscribe_block_reward(&self) -> Receiver> { + self.block_reward_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -136,4 +145,8 @@ impl ServerSentEventHandler { pub fn has_late_head_subscribers(&self) -> bool { self.late_head.receiver_count() > 0 } + + pub fn has_block_reward_subscribers(&self) -> bool { + self.block_reward_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 768a8695515..aff8657e86c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,6 +5,7 @@ mod beacon_chain; mod beacon_fork_choice_store; mod beacon_proposer_cache; mod beacon_snapshot; +pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs new file mode 100644 index 00000000000..154773aa95c --- /dev/null +++ b/beacon_node/http_api/src/block_rewards.rs @@ -0,0 +1,80 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; +use slog::{warn, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; + +pub fn get_block_rewards( + query: BlockRewardsQuery, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let start_slot = query.start_slot; + let end_slot = query.end_slot; + let prior_slot = start_slot - 1; + + if start_slot > end_slot || start_slot == 0 { + return Err(custom_bad_request(format!( + "invalid start and end: {}, {}", + start_slot, end_slot + ))); + } + + let end_block_root = chain + .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; + + let blocks = chain + .store + .load_blocks_to_replay(start_slot, end_slot, end_block_root) + .map_err(|e| beacon_chain_error(e.into()))?; + + let state_root = chain + .state_root_at_slot(prior_slot) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; + + let mut state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + state + .build_all_caches(&chain.spec) + .map_err(beacon_state_error)?; + + let mut block_rewards = Vec::with_capacity(blocks.len()); + + let block_replayer = BlockReplayer::new(state, &chain.spec) + .pre_block_hook(Box::new(|state, block| { + // Compute block reward. + let block_reward = + chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + block_rewards.push(block_reward); + Ok(()) + })) + .state_root_iter( + chain + .forwards_iter_state_roots_until(prior_slot, end_slot) + .map_err(beacon_chain_error)?, + ) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(blocks, None) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "start_slot" => start_slot, + "end_slot" => end_slot, + ); + } + + drop(block_replayer); + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b0907a30c1b..deadf68543d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -7,6 +7,7 @@ mod attester_duties; mod block_id; +mod block_rewards; mod database; mod metrics; mod proposer_duties; @@ -2540,6 +2541,16 @@ pub fn serve( }, ); + let get_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("block_rewards")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|query, chain, log| { + blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + }); + let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) @@ -2576,6 +2587,9 @@ pub fn serve( api_types::EventTopic::LateHead => { event_handler.subscribe_late_head() } + api_types::EventTopic::BlockReward => { + event_handler.subscribe_block_reward() + } }; receivers.push(BroadcastStream::new(receiver).map(|msg| { @@ -2661,6 +2675,7 @@ pub fn serve( .or(get_lighthouse_beacon_states_ssz.boxed()) .or(get_lighthouse_staking.boxed()) .or(get_lighthouse_database_info.boxed()) + .or(get_lighthouse_block_rewards.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 2cc3ffaf6be..c9b252ca116 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -6,15 +6,16 @@ mod metrics; mod persistence; mod sync_aggregate_id; +pub use attestation::AttMaxCover; +pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, }; use crate::sync_aggregate_id::SyncAggregateId; -use attestation::AttMaxCover; use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; -use max_cover::{maximum_cover, MaxCover}; +use max_cover::maximum_cover; use parking_lot::RwLock; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 8ea35f7348a..7836ac14a48 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -407,4 +407,44 @@ The endpoint will return immediately. See the beacon node logs for an indication ### `/lighthouse/database/historical_blocks` Manually provide `SignedBeaconBlock`s to backfill the database. This is intended -for use by Lighthouse developers during testing only. \ No newline at end of file +for use by Lighthouse developers during testing only. + +### `/lighthouse/block_rewards` + +Fetch information about the block rewards paid to proposers for a range of consecutive blocks. + +Two query parameters are required: + +* `start_slot` (inclusive): the slot of the first block to compute rewards for. +* `end_slot` (inclusive): the slot of the last block to compute rewards for. + +Example: + +```bash +curl "http://localhost:5052/lighthouse/block_rewards?start_slot=1&end_slot=32" | jq +``` + +```json +[ + { + "block_root": "0x51576c2fcf0ab68d7d93c65e6828e620efbb391730511ffa35584d6c30e51410", + "attestation_rewards": { + "total": 4941156, + }, + .. + }, + .. +] +``` + +Caveats: + +* Presently only attestation rewards are computed. +* The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] + in the source. +* For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_slot` needs to be loaded from the database, and + loading a state on a boundary is most efficient. + +[block_reward_src]: +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_reward.rs \ No newline at end of file diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 153667d7e95..8dc808c2653 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -9,6 +9,7 @@ #[cfg(feature = "lighthouse")] pub mod lighthouse; +#[cfg(feature = "lighthouse")] pub mod lighthouse_vc; pub mod mixin; pub mod types; @@ -245,6 +246,7 @@ impl BeaconNodeHttpClient { } /// Perform a HTTP POST request, returning a JSON response. + #[cfg(feature = "lighthouse")] async fn post_with_response( &self, url: U, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index a8993a39c5c..10601556fa1 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,5 +1,7 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. +mod block_rewards; + use crate::{ ok_or_error, types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, @@ -12,6 +14,7 @@ use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use store::{AnchorInfo, Split}; +pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs new file mode 100644 index 00000000000..186cbd888cf --- /dev/null +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -0,0 +1,54 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::{Hash256, Slot}; + +/// Details about the rewards paid to a block proposer for proposing a block. +/// +/// All rewards in GWei. +/// +/// Presently this only counts attestation rewards, but in future should be expanded +/// to include information on slashings and sync committee aggregates too. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockReward { + /// Sum of all reward components. + pub total: u64, + /// Block root of the block that these rewards are for. + pub block_root: Hash256, + /// Metadata about the block, particularly reward-relevant metadata. + pub meta: BlockRewardMeta, + /// Rewards due to attestations. + pub attestation_rewards: AttestationRewards, + /// Sum of rewards due to sync committee signatures. + pub sync_committee_rewards: u64, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardMeta { + pub slot: Slot, + pub parent_slot: Slot, + pub proposer_index: u64, + pub graffiti: String, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationRewards { + /// Total block reward from attestations included. + pub total: u64, + /// Total rewards from previous epoch attestations. + pub prev_epoch_total: u64, + /// Total rewards from current epoch attestations. + pub curr_epoch_total: u64, + /// Vec of attestation rewards for each attestation included. + /// + /// Each element of the vec is a map from validator index to reward. + pub per_attestation_rewards: Vec>, +} + +/// Query parameters for the `/lighthouse/block_rewards` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardsQuery { + /// Lower slot limit for block rewards returned (inclusive). + pub start_slot: Slot, + /// Upper slot limit for block rewards returned (inclusive). + pub end_slot: Slot, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a761b9ed124..78567ad83c1 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -10,6 +10,9 @@ use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; +#[cfg(feature = "lighthouse")] +use crate::lighthouse::BlockReward; + /// An API error serializable to JSON. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] @@ -839,6 +842,8 @@ pub enum EventKind { ChainReorg(SseChainReorg), ContributionAndProof(Box>), LateHead(SseLateHead), + #[cfg(feature = "lighthouse")] + BlockReward(BlockReward), } impl EventKind { @@ -852,6 +857,8 @@ impl EventKind { EventKind::ChainReorg(_) => "chain_reorg", EventKind::ContributionAndProof(_) => "contribution_and_proof", EventKind::LateHead(_) => "late_head", + #[cfg(feature = "lighthouse")] + EventKind::BlockReward(_) => "block_reward", } } @@ -904,6 +911,10 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Contribution and Proof: {:?}", e)) })?, ))), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), + )?)), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -929,6 +940,8 @@ pub enum EventTopic { ChainReorg, ContributionAndProof, LateHead, + #[cfg(feature = "lighthouse")] + BlockReward, } impl FromStr for EventTopic { @@ -944,6 +957,8 @@ impl FromStr for EventTopic { "chain_reorg" => Ok(EventTopic::ChainReorg), "contribution_and_proof" => Ok(EventTopic::ContributionAndProof), "late_head" => Ok(EventTopic::LateHead), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventTopic::BlockReward), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -960,6 +975,8 @@ impl fmt::Display for EventTopic { EventTopic::ChainReorg => write!(f, "chain_reorg"), EventTopic::ContributionAndProof => write!(f, "contribution_and_proof"), EventTopic::LateHead => write!(f, "late_head"), + #[cfg(feature = "lighthouse")] + EventTopic::BlockReward => write!(f, "block_reward"), } } } diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 31386a8fb12..8358003e4b4 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -42,19 +42,7 @@ pub fn process_sync_aggregate( } // Compute participant and proposer rewards - let total_active_balance = state.get_total_active_balance()?; - let total_active_increments = - total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? - .safe_mul(total_active_increments)?; - let max_participant_rewards = total_base_rewards - .safe_mul(SYNC_REWARD_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - .safe_div(T::slots_per_epoch())?; - let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; - let proposer_reward = participant_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + let (participant_reward, proposer_reward) = compute_sync_aggregate_rewards(state, spec)?; // Apply participant and proposer rewards let committee_indices = state.get_sync_committee_indices(¤t_sync_committee)?; @@ -73,3 +61,26 @@ pub fn process_sync_aggregate( Ok(()) } + +/// Compute the `(participant_reward, proposer_reward)` for a sync aggregate. +/// +/// The `state` should be the pre-state from the same slot as the block containing the aggregate. +pub fn compute_sync_aggregate_rewards( + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(u64, u64), BlockProcessingError> { + let total_active_balance = state.get_total_active_balance()?; + let total_active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + .safe_mul(total_active_increments)?; + let max_participant_rewards = total_base_rewards + .safe_mul(SYNC_REWARD_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + .safe_div(T::slots_per_epoch())?; + let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; + let proposer_reward = participant_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + Ok((participant_reward, proposer_reward)) +} From 782abdcab5e79294b1e7e41544f23d2a9936aead Mon Sep 17 00:00:00 2001 From: tim gretler Date: Thu, 27 Jan 2022 01:06:04 +0000 Subject: [PATCH 50/56] Outaded flag in lighthouse book (#2965) ## Proposed Changes Outdated flag. Need to use `--wallet-name` instead. --- book/src/validator-create.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 73fff42dfe3..91af60078a4 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -75,7 +75,7 @@ The example assumes that the `wally` wallet was generated from the [wallet](./wallet-create.md) example. ```bash -lighthouse --network pyrmont account validator create --name wally --wallet-password wally.pass --count 1 +lighthouse --network pyrmont account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` This command will: From 2489470d208c0368f474520d63ca4484f316f060 Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 27 Jan 2022 18:08:52 +1100 Subject: [PATCH 51/56] Add attestation performance api --- .../http_api/src/attestation_performance.rs | 210 ++++++++++++++++++ beacon_node/http_api/src/lib.rs | 16 ++ common/eth2/src/lighthouse.rs | 4 + .../src/lighthouse/attestation_performance.rs | 38 ++++ 4 files changed, 268 insertions(+) create mode 100644 beacon_node/http_api/src/attestation_performance.rs create mode 100644 common/eth2/src/lighthouse/attestation_performance.rs diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs new file mode 100644 index 00000000000..ea39cd279c4 --- /dev/null +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -0,0 +1,210 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; +use state_processing::{ + per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError, + per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, +}; +use std::sync::Arc; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; + +const MAX_REQUEST_RANGE_EPOCHS: usize = 100; +const BLOCK_ROOT_CHUNK_SIZE: usize = 100; + +#[derive(Debug)] +enum AttestationPerformanceError { + BlockReplay(BlockReplayError), + BeaconState(BeaconStateError), + ParticipationCache(ParticipationCacheError), + UnableToFindValidator(usize), +} + +impl From for AttestationPerformanceError { + fn from(e: BlockReplayError) -> Self { + Self::BlockReplay(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: ParticipationCacheError) -> Self { + Self::ParticipationCache(e) + } +} + +pub fn get_attestation_performance( + target: String, + query: AttestationPerformanceQuery, + chain: Arc>, +) -> Result, warp::Rejection> { + let spec = &chain.spec; + // We increment by 2 here so that when we build the state from the `prior_slot` it is + // still 1 epoch ahead of the first epoch we want to analyse. + // This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results + // for the correct epoch. + let start_epoch = query.start_epoch + 2; + let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let prior_slot = start_slot - 1; + + let end_epoch = query.end_epoch + 2; + let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); + + // Check query is valid + if start_epoch > end_epoch { + // || start_epoch == 0 { + return Err(custom_bad_request(format!( + "invalid start and end epochs: {}, {}", + query.start_epoch, query.end_epoch + ))); + } + + // The response size can grow exceptionally large therefore we should check that the + // query is within permitted bounds to prevent potential OOM errors. + if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { + return Err(custom_bad_request(format!( + "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", + query.start_epoch, query.end_epoch + ))); + } + + let head_state = chain.head_beacon_state().map_err(beacon_chain_error)?; + + let validator_indices = (0..=head_state.validators().len() as u64).collect(); + + // Either use the global validator set, or the specified index. + let index_range = if target.to_lowercase() == "global" { + validator_indices + } else { + vec![target.parse::().map_err(|_| { + custom_bad_request(format!( + "invalid validator index: {:?}", + target.to_lowercase() + )) + })?] + }; + + // Load block roots. + let mut block_roots: Vec = chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .map_err(beacon_chain_error)? + .collect::, _>>() + .map_err(beacon_chain_error)? + .iter() + .map(|(root, _)| *root) + .collect(); + block_roots.dedup(); + + // Load first block so we can get it's parent. + let first_block_root = block_roots + .first() + .ok_or_else(|| custom_server_error("No blocks roots could be loaded".to_string()))?; + let first_block = chain + .get_block(first_block_root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) + }) + .map_err(beacon_chain_error)?; + + // Load the block of the prior slot which will be used to build the starting state. + let prior_block = chain + .get_block(&first_block.parent_root()) + .and_then(|maybe_block| { + maybe_block + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) + }) + .map_err(beacon_chain_error)?; + + // Load state for block replay. + let state_root = prior_block.state_root(); + let state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + // Allocate an AttestationPerformance vector for each validator in the range. + let mut perfs: Vec = + AttestationPerformance::initialize(index_range.clone()); + + let post_slot_hook = |state: &mut BeaconState, + summary: Option>, + _is_skip_slot: bool| + -> Result<(), AttestationPerformanceError> { + // If a `summary` was not output then an epoch boundary was not crossed + // so we move onto the next slot. + if let Some(summary) = summary { + for (position, i) in index_range.iter().enumerate() { + let index = *i as usize; + + let val = perfs + .get_mut(position) + .ok_or(AttestationPerformanceError::UnableToFindValidator(index))?; + + // We are two epochs ahead since the summary is generated for + // `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return + // data for the epoch before that. + let epoch = state.previous_epoch().as_u64() - 1; + + let is_active = summary.is_active_unslashed_in_previous_epoch(index); + + let received_source_reward = summary.is_previous_epoch_source_attester(index)?; + + let received_head_reward = summary.is_previous_epoch_head_attester(index)?; + + let received_target_reward = summary.is_previous_epoch_target_attester(index)?; + + let inclusion_delay = summary + .previous_epoch_inclusion_info(index) + .map(|info| info.delay); + + let perf = AttestationPerformanceStatistics { + active: is_active, + head: received_head_reward, + target: received_target_reward, + source: received_source_reward, + delay: inclusion_delay, + }; + + val.epochs.insert(epoch, perf); + } + } + Ok(()) + }; + + // Initialize block replayer + let mut replayer = BlockReplayer::new(state, spec) + .no_state_root_iter() + .no_signature_verification() + .minimal_block_root_verification() + .post_slot_hook(Box::new(post_slot_hook)); + + // Iterate through block roots in chunks to reduce load on memory. + for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { + // Load blocks from the block root chunks. + let blocks = block_root_chunks + .iter() + .map(|root| { + chain + .get_block(root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) + }) + .map_err(beacon_chain_error) + }) + .collect::>, _>>()?; + + replayer = replayer + .apply_blocks(blocks, None) + .map_err(|e| custom_server_error(format!("{:?}", e)))?; + } + + drop(replayer); + + Ok(perfs) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index deadf68543d..4617c364a7c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,6 +5,7 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. +mod attestation_performance; mod attester_duties; mod block_id; mod block_rewards; @@ -2549,6 +2550,20 @@ pub fn serve( .and(log_filter.clone()) .and_then(|query, chain, log| { blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + + // GET lighthouse/analysis/attestation_performance/{index} + let get_lighthouse_attestation_performance = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("attestation_performance")) + .and(warp::path::param::()) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|target, query, chain: Arc>| { + blocking_json_task(move || { + attestation_performance::get_attestation_performance(target, query, chain) + }) + }); let get_events = eth1_v1 @@ -2676,6 +2691,7 @@ pub fn serve( .or(get_lighthouse_staking.boxed()) .or(get_lighthouse_database_info.boxed()) .or(get_lighthouse_block_rewards.boxed()) + .or(get_lighthouse_attestation_performance.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 10601556fa1..adf73d8b923 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,5 +1,6 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. +mod attestation_performance; mod block_rewards; use crate::{ @@ -14,6 +15,9 @@ use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use store::{AnchorInfo, Split}; +pub use attestation_performance::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs new file mode 100644 index 00000000000..72ee82c7670 --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_performance.rs @@ -0,0 +1,38 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::Epoch; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceStatistics { + pub active: bool, + pub head: bool, + pub target: bool, + pub source: bool, + pub delay: Option, +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformance { + pub index: u64, + pub epochs: HashMap, +} + +impl AttestationPerformance { + pub fn initialize(indices: Vec) -> Vec { + let mut vec = Vec::with_capacity(indices.len()); + for index in indices { + vec.push(Self { + index, + ..Default::default() + }) + } + vec + } +} + +/// Query parameters for the `/lighthouse/analysis/attestation_performance` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceQuery { + pub start_epoch: Epoch, + pub end_epoch: Epoch, +} From 66c8d42b6703156320af75fedcaf3efe2adcbb3d Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 18 Jan 2022 08:50:12 +1100 Subject: [PATCH 52/56] Implement suggestions --- .../http_api/src/attestation_performance.rs | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index ea39cd279c4..b3a608a95b1 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -7,7 +7,7 @@ use state_processing::{ per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, }; use std::sync::Arc; -use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock}; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; const MAX_REQUEST_RANGE_EPOCHS: usize = 100; @@ -58,7 +58,6 @@ pub fn get_attestation_performance( // Check query is valid if start_epoch > end_epoch { - // || start_epoch == 0 { return Err(custom_bad_request(format!( "invalid start and end epochs: {}, {}", query.start_epoch, query.end_epoch @@ -74,13 +73,11 @@ pub fn get_attestation_performance( ))); } - let head_state = chain.head_beacon_state().map_err(beacon_chain_error)?; - - let validator_indices = (0..=head_state.validators().len() as u64).collect(); - // Either use the global validator set, or the specified index. let index_range = if target.to_lowercase() == "global" { - validator_indices + chain + .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) + .map_err(beacon_chain_error)? } else { vec![target.parse::().map_err(|_| { custom_bad_request(format!( @@ -94,14 +91,12 @@ pub fn get_attestation_performance( let mut block_roots: Vec = chain .forwards_iter_block_roots_until(start_slot, end_slot) .map_err(beacon_chain_error)? - .collect::, _>>() - .map_err(beacon_chain_error)? - .iter() - .map(|(root, _)| *root) - .collect(); + .map(|res| res.map(|(root, _)| root)) + .collect::,_ >>() + .map_err(beacon_chain_error)?; block_roots.dedup(); - // Load first block so we can get it's parent. + // Load first block so we can get its parent. let first_block_root = block_roots .first() .ok_or_else(|| custom_server_error("No blocks roots could be loaded".to_string()))?; From 6ce6e21ede86fa6f9b551058c55ce1cd5fc13782 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 18 Jan 2022 08:55:15 +1100 Subject: [PATCH 53/56] Formatting --- beacon_node/http_api/src/attestation_performance.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index b3a608a95b1..f4c63d9cc63 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -76,8 +76,8 @@ pub fn get_attestation_performance( // Either use the global validator set, or the specified index. let index_range = if target.to_lowercase() == "global" { chain - .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) - .map_err(beacon_chain_error)? + .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) + .map_err(beacon_chain_error)? } else { vec![target.parse::().map_err(|_| { custom_bad_request(format!( @@ -92,7 +92,7 @@ pub fn get_attestation_performance( .forwards_iter_block_roots_until(start_slot, end_slot) .map_err(beacon_chain_error)? .map(|res| res.map(|(root, _)| root)) - .collect::,_ >>() + .collect::, _>>() .map_err(beacon_chain_error)?; block_roots.dedup(); From 2cdf92812123850bf8a20fc5be168f0e9f1e12c1 Mon Sep 17 00:00:00 2001 From: Mac L Date: Tue, 18 Jan 2022 12:40:01 +1100 Subject: [PATCH 54/56] Skip serializing if None --- common/eth2/src/lighthouse/attestation_performance.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs index 72ee82c7670..5ce1d90a38d 100644 --- a/common/eth2/src/lighthouse/attestation_performance.rs +++ b/common/eth2/src/lighthouse/attestation_performance.rs @@ -8,6 +8,7 @@ pub struct AttestationPerformanceStatistics { pub head: bool, pub target: bool, pub source: bool, + #[serde(skip_serializing_if = "Option::is_none")] pub delay: Option, } From 354499acf6c9feb2abbd38ae74d6d5188f6a3f73 Mon Sep 17 00:00:00 2001 From: Mac L Date: Wed, 19 Jan 2022 16:36:28 +1100 Subject: [PATCH 55/56] Add additional check on query --- .../http_api/src/attestation_performance.rs | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index f4c63d9cc63..5cd9894adef 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -56,10 +56,19 @@ pub fn get_attestation_performance( let end_epoch = query.end_epoch + 2; let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); - // Check query is valid + // Ensure end_epoch is smaller than the current epoch - 1. + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + if query.end_epoch >= current_epoch - 1 { + return Err(custom_bad_request(format!( + "end_epoch must be less than the current epoch - 1. current: {}, end: {}", + current_epoch, query.end_epoch + ))); + } + + // Check query is valid. if start_epoch > end_epoch { return Err(custom_bad_request(format!( - "invalid start and end epochs: {}, {}", + "start_epoch must not be larger than end_epoch. start: {}, end: {}", query.start_epoch, query.end_epoch ))); } @@ -81,7 +90,7 @@ pub fn get_attestation_performance( } else { vec![target.parse::().map_err(|_| { custom_bad_request(format!( - "invalid validator index: {:?}", + "Invalid validator index: {:?}", target.to_lowercase() )) })?] @@ -97,9 +106,11 @@ pub fn get_attestation_performance( block_roots.dedup(); // Load first block so we can get its parent. - let first_block_root = block_roots - .first() - .ok_or_else(|| custom_server_error("No blocks roots could be loaded".to_string()))?; + let first_block_root = block_roots.first().ok_or_else(|| { + custom_server_error( + "No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(), + ) + })?; let first_block = chain .get_block(first_block_root) .and_then(|maybe_block| { From 8fc449b1664eb77e06f7b79bc0824ecee26291cd Mon Sep 17 00:00:00 2001 From: Mac L Date: Thu, 27 Jan 2022 18:12:59 +1100 Subject: [PATCH 56/56] Fix rebase --- beacon_node/http_api/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4617c364a7c..b37638f60fc 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2550,6 +2550,7 @@ pub fn serve( .and(log_filter.clone()) .and_then(|query, chain, log| { blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + }); // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") @@ -2563,7 +2564,6 @@ pub fn serve( blocking_json_task(move || { attestation_performance::get_attestation_performance(target, query, chain) }) - }); let get_events = eth1_v1