diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml new file mode 100644 index 00000000000..b7b35d1207c --- /dev/null +++ b/.github/workflows/docker-antithesis.yml @@ -0,0 +1,31 @@ +name: docker antithesis + +on: + push: + branches: + - unstable + +env: + ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} + ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} + ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} + REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} + IMAGE_NAME: lighthouse + TAG: libvoidstar + +jobs: + build-docker: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Update Rust + run: rustup update stable + - name: Dockerhub login + run: | + echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin + - name: Build AMD64 dockerfile (with push) + run: | + docker build \ + --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ + --file ./testing/antithesis/Dockerfile.libvoidstar . + docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml new file mode 100644 index 00000000000..c23ee8df36a --- /dev/null +++ b/.github/workflows/linkcheck.yml @@ -0,0 +1,30 @@ +name: linkcheck + +on: + push: + branches: + - unstable + pull_request: + paths: + - 'book/**' + +jobs: + linkcheck: + name: Check broken links + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Create docker network + run: docker network create book + + - name: Run mdbook server + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + + - name: Print logs + run: docker logs book + + - name: Run linkcheck + run: docker run --network book tennox/linkcheck:latest book:3000 diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml new file mode 100644 index 00000000000..f97b271c35f --- /dev/null +++ b/.github/workflows/local-testnet.yml @@ -0,0 +1,50 @@ +# Test that local testnet starts successfully. +name: local testnet + +on: + push: + branches: + - unstable + pull_request: + +jobs: + run-local-testnet: + strategy: + matrix: + os: + - ubuntu-18.04 + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + + - name: Install ganache + run: npm install ganache-cli@latest --global + + # https://github.com/actions/cache/blob/main/examples.md#rust---cargo + - uses: actions/cache@v2 + id: cache-cargo + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install lighthouse + if: steps.cache-cargo.outputs.cache-hit != 'true' + run: make && make install-lcli + + - name: Start local testnet + run: ./start_local_testnet.sh + working-directory: scripts/local_testnet + + - name: Print logs + run: ./print_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bca28dbe2a4..4c57b8b1e7f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -130,6 +130,19 @@ jobs: profile: minimal override: true + # ============================== + # Windows dependencies + # ============================== + + - uses: KyleMayes/install-llvm-action@v1 + if: startsWith(matrix.arch, 'x86_64-windows') + with: + version: "13.0" + directory: ${{ runner.temp }}/llvm + - name: Set LIBCLANG_PATH + if: startsWith(matrix.arch, 'x86_64-windows') + run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV + # ============================== # Builds # ============================== diff --git a/Cargo.lock b/Cargo.lock index 4fe2b3573fc..586cdaf1817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,7 +111,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "once_cell", "version_check", ] @@ -130,15 +130,6 @@ name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -150,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.47" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d9ff5d688f1c13395289f67db01d4826b46dd694e7580accdc3e8430f2d98e" +checksum = "84450d0b4a8bd1ba4144ce8ce718fbc5d071358b1e5384bace6536b3d1f2d5b3" [[package]] name = "arbitrary" @@ -195,9 +186,9 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -214,7 +205,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -291,7 +282,7 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec 0.19.5", + "bitvec 0.19.6", "bls", "derivative", "environment", @@ -340,7 +331,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.0.1" +version = "2.1.1" dependencies = [ "beacon_chain", "clap", @@ -380,9 +371,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -415,9 +406,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +checksum = "55f93d0ef3363c364d5976646a38f04cf67cfe1d4c8d160cdea02cab2c116b33" dependencies = [ "funty", "radium 0.5.3", @@ -444,7 +435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -458,6 +449,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +dependencies = [ + "generic-array", +] + [[package]] name = "block-padding" version = "0.2.1" @@ -497,7 +497,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.0.1" +version = "2.1.1" dependencies = [ "beacon_node", "clap", @@ -550,9 +550,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byte-slice-cast" @@ -631,11 +631,11 @@ checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cexpr" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 6.1.2", + "nom 7.1.0", ] [[package]] @@ -704,11 +704,11 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.3" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term 0.11.0", + "ansi_term", "atty", "bitflags", "strsim 0.8.0", @@ -769,9 +769,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.46" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" dependencies = [ "cc", ] @@ -807,6 +807,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "convert_case" version = "0.4.0" @@ -849,9 +855,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if", ] @@ -894,9 +900,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if", "crossbeam-utils", @@ -915,9 +921,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if", "crossbeam-utils", @@ -928,9 +934,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if", "lazy_static", @@ -942,6 +948,27 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +dependencies = [ + "generic-array", +] + [[package]] name = "crypto-mac" version = "0.8.0" @@ -970,7 +997,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -995,11 +1022,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377c9b002a72a0b2c1a18c62e2f3864bdfea4a015e3683a96e24aa45dd6c02d1" +checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" dependencies = [ - "nix 0.22.2", + "nix 0.23.1", "winapi", ] @@ -1010,7 +1037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -1018,9 +1045,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" +checksum = "d0d720b8683f8dd83c65155f0530560cba68cd2bf395f6513a483caee57ff7f4" dependencies = [ "darling_core", "darling_macro", @@ -1028,9 +1055,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +checksum = "7a340f241d2ceed1deb47ae36c4144b2707ec7dd0b649f894cb39bb595986324" dependencies = [ "fnv", "ident_case", @@ -1042,9 +1069,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" +checksum = "72c41b3b7352feb3211a0d743dc5700a4e3b60f51bd2b368892d1e0f9a95f44b" dependencies = [ "darling_core", "quote", @@ -1087,7 +1114,7 @@ dependencies = [ "hex", "reqwest", "serde_json", - "sha2", + "sha2 0.9.9", "tree_hash", "types", ] @@ -1098,10 +1125,19 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ - "const-oid", + "const-oid 0.5.2", "typenum", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1146,6 +1182,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +dependencies = [ + "block-buffer 0.10.0", + "crypto-common", + "generic-array", +] + [[package]] name = "directory" version = "0.1.0" @@ -1198,14 +1245,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0-beta.11" +version = "0.1.0-beta.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4968631f2eb03ef8dff74fe355440bcf4bd1c514c4326325fc739640c4ec53" +checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" dependencies = [ "aes", "aes-gcm", "arrayvec 0.7.2", - "digest", + "digest 0.10.1", "enr", "fnv", "futures", @@ -1213,12 +1260,12 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core 0.29.0", + "libp2p-core 0.30.0", "lru", "parking_lot", "rand 0.8.4", "rlp 0.5.1", - "sha2", + "sha2 0.9.9", "smallvec", "tokio", "tokio-stream", @@ -1241,12 +1288,24 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ - "der", - "elliptic-curve", + "der 0.3.5", + "elliptic-curve 0.9.12", "hmac 0.11.0", "signature", ] +[[package]] +name = "ecdsa" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +dependencies = [ + "der 0.5.1", + "elliptic-curve 0.11.6", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.3.0" @@ -1266,7 +1325,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.9", "zeroize", ] @@ -1313,20 +1372,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", - "ff", + "ff 0.9.0", "generic-array", - "group", + "group 0.9.0", "pkcs8", "rand_core 0.6.3", "subtle", "zeroize", ] +[[package]] +name = "elliptic-curve" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "decb3a27ea454a5f23f96eb182af0671c12694d64ecc33dada74edd1301f6cfc" +dependencies = [ + "crypto-bigint", + "der 0.5.1", + "ff 0.11.0", + "generic-array", + "group 0.11.0", + "rand_core 0.6.3", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" -version = "0.8.29" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if", ] @@ -1500,7 +1576,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2", + "sha2 0.9.9", "wasm-bindgen-test", ] @@ -1513,7 +1589,7 @@ dependencies = [ "cpufeatures 0.1.5", "lazy_static", "ring", - "sha2", + "sha2 0.9.9", ] [[package]] @@ -1539,7 +1615,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2", + "sha2 0.9.9", "zeroize", ] @@ -1558,7 +1634,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2", + "sha2 0.9.9", "tempfile", "unicode-normalization", "uuid", @@ -1625,6 +1701,7 @@ name = "eth2_ssz_types" version = "0.2.2" dependencies = [ "arbitrary", + "derivative", "eth2_serde_utils", "eth2_ssz", "serde", @@ -1815,6 +1892,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "ff" version = "0.9.0" @@ -1826,6 +1912,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +dependencies = [ + "rand_core 0.6.3", + "subtle", +] + [[package]] name = "ffi-opaque" version = "2.0.1" @@ -1876,15 +1972,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - -[[package]] -name = "fixedbitset" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" @@ -1960,9 +2050,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -1975,9 +2065,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -1985,15 +2075,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -2003,18 +2093,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg 1.0.1", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -2027,21 +2115,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d383f0425d991a05e564c2f3ec150bd6dde863179c131dd60d8aa73a05434461" dependencies = [ "futures-io", - "rustls 0.20.1", + "rustls 0.20.2", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-timer" @@ -2051,11 +2139,10 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg 1.0.1", "futures-channel", "futures-core", "futures-io", @@ -2063,18 +2150,16 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -2116,9 +2201,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if", "libc", @@ -2175,16 +2260,27 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff 0.11.0", "rand_core 0.6.3", "subtle", ] [[package]] name = "h2" -version = "0.3.7" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ "bytes", "fnv", @@ -2293,7 +2389,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" dependencies = [ - "digest", + "digest 0.9.0", "hmac 0.11.0", ] @@ -2304,7 +2400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", ] [[package]] @@ -2314,7 +2410,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac 0.11.1", - "digest", + "digest 0.9.0", ] [[package]] @@ -2323,7 +2419,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "hmac 0.8.1", ] @@ -2341,13 +2437,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.1", ] [[package]] @@ -2358,7 +2454,7 @@ checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -2433,9 +2529,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.15" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436ec0091e4f20e655156a30a0df3770fe2900aa301e548e08446ec794b6953c" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -2446,8 +2542,8 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", - "pin-project-lite 0.2.7", + "itoa 0.4.8", + "pin-project-lite 0.2.8", "socket2 0.4.2", "tokio", "tower-service", @@ -2496,6 +2592,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "if-addrs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "if-addrs-sys" version = "0.3.2" @@ -2577,9 +2683,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.0.1", "hashbrown", @@ -2641,9 +2747,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -2654,6 +2760,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "js-sys" version = "0.3.55" @@ -2685,9 +2797,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", - "sha2", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", + "sha2 0.9.9", ] [[package]] @@ -2713,7 +2825,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.0.1" +version = "2.1.1" dependencies = [ "account_utils", "bls", @@ -2769,9 +2881,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.107" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "libflate" @@ -2811,9 +2923,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libmdbx" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75aa79307892c0000dd0a8169c4db5529d32ca2302587d552870903109b46925" +checksum = "c9a8a3723c12c5caa3f2a456b645063d1d8ffb1562895fa43746a999d205b0c6" dependencies = [ "bitflags", "byteorder", @@ -2827,18 +2939,17 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782229f90bf7d5b12ee3ee08f7e160ba99f0d75eee7d118d9c1a688b13f6e64a" +version = "0.42.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "atomic", "bytes", "futures", "futures-timer", - "getrandom 0.2.3", + "getrandom 0.2.4", "instant", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -2853,16 +2964,16 @@ dependencies = [ "libp2p-yamux", "multiaddr", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "rand 0.7.3", "smallvec", ] [[package]] name = "libp2p-core" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" +checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" dependencies = [ "asn1_der", "bs58", @@ -2872,19 +2983,19 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "libsecp256k1 0.5.0", + "libsecp256k1 0.7.0", "log", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.10.4", "parking_lot", - "pin-project 1.0.8", - "prost 0.8.0", - "prost-build 0.8.0", - "rand 0.7.3", + "pin-project 1.0.10", + "prost", + "prost-build", + "rand 0.8.4", "ring", "rw-stream-sink", - "sha2", + "sha2 0.9.9", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -2894,9 +3005,8 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asn1_der", "bs58", @@ -2905,20 +3015,22 @@ dependencies = [ "fnv", "futures", "futures-timer", + "instant", "lazy_static", "libsecp256k1 0.7.0", "log", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.11.0", + "p256", "parking_lot", - "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "pin-project 1.0.10", + "prost", + "prost-build", "rand 0.8.4", "ring", "rw-stream-sink", - "sha2", + "sha2 0.10.1", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -2928,12 +3040,11 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8f89d15cb6e3c5bc22afff7513b11bab7856f2872d3cfba86f7f63a06bc498" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "smallvec", "trust-dns-resolver", @@ -2941,9 +3052,8 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98942284cc1a91f24527a8b1e5bc06f7dd22fc6cee5be3d9bf5785bf902eb934" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -2954,44 +3064,42 @@ dependencies = [ "futures-timer", "hex_fmt", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "open-metrics-client", - "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "pin-project 1.0.10", + "prost", + "prost-build", "rand 0.7.3", "regex", - "sha2", + "sha2 0.10.1", "smallvec", "unsigned-varint 0.7.1", ] [[package]] name = "libp2p-identify" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec6d59e3f88435a83797fc3734f18385f6f54e0fe081e12543573364687c7db5" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "lru", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "smallvec", ] [[package]] name = "libp2p-metrics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59f3be49edeecff13ef0d0dc28295ba4a33910611715f04236325d08e4119e0" +version = "0.3.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3000,14 +3108,13 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2cd64ef597f40e14bfce0497f50ecb63dd6d201c61796daeb4227078834fbf" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "nohash-hasher", "parking_lot", @@ -3018,20 +3125,19 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8772c7a99088221bb7ca9c5c0574bf55046a7ab4c319f3619b275f28c8fb87a" +version = "0.34.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "bytes", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.8.4", - "sha2", + "sha2 0.10.1", "snow", "static_assertions", "x25519-dalek", @@ -3040,32 +3146,30 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba1a6ff33e4a274c89a3b1d78b9f34f32af13265cc5c46c16938262d4e945a" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb84d40627cd109bbbf43da9269d4ef75903f42356c88d98b2b55c47c430c792" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-timer", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "rand 0.7.3", "smallvec", @@ -3074,9 +3178,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd93a7dad9b61c39797572e4fb4fdba8415d6348b4e745b3d4cb008f84331ab" +version = "0.26.1" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "quote", "syn", @@ -3084,16 +3187,15 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7399c5b6361ef525d41c11fcf51635724f832baf5819b30d3d873eabb4fbae4b" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "if-addrs", + "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "socket2 0.4.2", "tokio", @@ -3101,14 +3203,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa92005fbd67695715c821e1acfe4d7be9fd2d88738574e93d645c49ec2831c8" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "quicksink", "rw-stream-sink", @@ -3119,36 +3220,16 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7362abb8867d7187e7e93df17f460d554c997fc5c8ac57dc1259057f6889af" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "parking_lot", "thiserror", "yamux", ] -[[package]] -name = "libsecp256k1" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.6.0" @@ -3157,14 +3238,14 @@ checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" dependencies = [ "arrayref", "base64 0.12.3", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.2.2", "libsecp256k1-gen-ecmult 0.2.1", "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.9", "typenum", ] @@ -3176,14 +3257,14 @@ checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", "base64 0.13.0", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.3.0", "libsecp256k1-gen-ecmult 0.3.0", "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", - "sha2", + "sha2 0.9.9", "typenum", ] @@ -3194,7 +3275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3205,7 +3286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3269,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.0.1" +version = "2.1.1" dependencies = [ "account_manager", "account_utils", @@ -3330,12 +3411,13 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", + "open-metrics-client", "parking_lot", "rand 0.7.3", "regex", "serde", "serde_derive", - "sha2", + "sha2 0.9.9", "slog", "slog-async", "slog-term", @@ -3408,9 +3490,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.6" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +checksum = "274353858935c992b13c0ca408752e2121da852d07dec7ce5f108c77dfa14d1f" dependencies = [ "hashbrown", ] @@ -3464,9 +3546,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -3479,9 +3561,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "mdbx-sys" -version = "0.11.1" +version = "0.11.4-git.20210105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6fb0496b0bc2274db9ae3ee92cf97bb29bf40e51b96ec1087a6374c4a42a05d" +checksum = "b21b3e0def3a5c880f6388ed2e33b695097c6b0eca039dae6010527b059f8be1" dependencies = [ "bindgen", "cc", @@ -3497,9 +3579,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg 1.0.1", ] @@ -3544,6 +3626,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.4.4" @@ -3620,10 +3708,10 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "multihash-derive", - "sha2", + "sha2 0.9.9", "unsigned-varint 0.7.1", ] @@ -3692,7 +3780,20 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", + "smallvec", + "unsigned-varint 0.7.1", +] + +[[package]] +name = "multistream-select" +version = "0.11.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project 1.0.10", "smallvec", "unsigned-varint 0.7.1", ] @@ -3730,7 +3831,7 @@ dependencies = [ "genesis", "hashset_delay", "hex", - "if-addrs", + "if-addrs 0.6.7", "igd", "itertools", "lazy_static", @@ -3772,9 +3873,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.22.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3bb9a13fa32bc5aeb64150cd3f32d6cf4c748f8f8a417cce5d2eb976a8370ba" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", @@ -3811,13 +3912,12 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "6.1.2" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ - "bitvec 0.19.5", - "funty", "memchr", + "minimal-lexical", "version_check", ] @@ -3892,9 +3992,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -3911,9 +4011,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "oorandom" @@ -3929,12 +4029,12 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "open-metrics-client" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7337d80c23c2d8b1349563981bc4fb531220733743ba8115454a67b181173f0d" +checksum = "9e224744b2e4da5b241857d2363a13bce60425f7b6ae2a5ff88d4d5557d9cc85" dependencies = [ "dtoa", - "itoa", + "itoa 0.4.8", "open-metrics-client-derive-text-encode", "owning_ref", ] @@ -3966,9 +4066,9 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" @@ -4022,6 +4122,18 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "p256" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0e0c5310031b5d4528ac6534bccc1446c289ac45c47b277d5aa91089c5f74fa" +dependencies = [ + "ecdsa 0.13.4", + "elliptic-curve 0.11.6", + "sec1", + "sha2 0.9.9", +] + [[package]] name = "parity-scale-codec" version = "1.3.7" @@ -4130,49 +4242,39 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset 0.2.0", - "indexmap", -] - [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset 0.4.0", + "fixedbitset", "indexmap", ] [[package]] name = "pin-project" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" dependencies = [ - "pin-project-internal 0.4.28", + "pin-project-internal 0.4.29", ] [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 1.0.8", + "pin-project-internal 1.0.10", ] [[package]] name = "pin-project-internal" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" dependencies = [ "proc-macro2", "quote", @@ -4181,9 +4283,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -4198,9 +4300,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -4214,15 +4316,15 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ - "der", + "der 0.3.5", "spki", ] [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "platforms" @@ -4283,9 +4385,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "primitive-types" @@ -4366,17 +4468,11 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -4408,16 +4504,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "prost" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" -dependencies = [ - "bytes", - "prost-derive 0.8.0", -] - [[package]] name = "prost" version = "0.9.0" @@ -4425,25 +4511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive 0.9.0", -] - -[[package]] -name = "prost-build" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" -dependencies = [ - "bytes", - "heck", - "itertools", - "log", - "multimap", - "petgraph 0.5.1", - "prost 0.8.0", - "prost-types 0.8.0", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -4458,27 +4526,14 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph 0.6.0", - "prost 0.9.0", - "prost-types 0.9.0", + "petgraph", + "prost", + "prost-types", "regex", "tempfile", "which", ] -[[package]] -name = "prost-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.9.0" @@ -4492,16 +4547,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" -dependencies = [ - "bytes", - "prost 0.8.0", -] - [[package]] name = "prost-types" version = "0.9.0" @@ -4509,7 +4554,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost 0.9.0", + "prost", ] [[package]] @@ -4590,9 +4635,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] @@ -4696,7 +4741,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -4766,7 +4811,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "redox_syscall", ] @@ -4807,15 +4852,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -4827,12 +4873,13 @@ dependencies = [ "mime", "native-tls", "percent-encoding", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-util", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -4850,6 +4897,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -4975,9 +5033,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac4581f0fc0e0efd529d069e8189ec7b90b8e7680e21beb35141bdc45f36040" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ "log", "ring", @@ -4987,9 +5045,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "rw-stream-sink" @@ -4998,15 +5056,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ "futures", - "pin-project 0.4.28", + "pin-project 0.4.29", "static_assertions", ] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safe_arith" @@ -5076,7 +5134,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "salsa20", - "sha2", + "sha2 0.9.9", ] [[package]] @@ -5099,6 +5157,18 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +dependencies = [ + "der 0.5.1", + "generic-array", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1" version = "0.20.3" @@ -5110,9 +5180,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" dependencies = [ "cc", ] @@ -5189,13 +5259,23 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] +[[package]] +name = "serde_array_query" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89c6e82b1005b33d5b2bbc47096800e5ad6b67ef5636f9c13ad29a6935734a7" +dependencies = [ + "serde", + "serde_urlencoded", +] + [[package]] name = "serde_cbor" version = "0.11.2" @@ -5208,9 +5288,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -5219,11 +5299,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.71" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -5246,19 +5326,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] @@ -5269,34 +5349,45 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.1", + "digest 0.10.1", +] + [[package]] name = "sha3" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", "opaque-debug", ] @@ -5331,7 +5422,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" dependencies = [ - "digest", + "digest 0.9.0", "rand_core 0.6.3", ] @@ -5409,6 +5500,7 @@ dependencies = [ name = "slashing_protection" version = "0.1.0" dependencies = [ + "arbitrary", "eth2_serde_utils", "filesystem", "lazy_static", @@ -5534,9 +5626,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "snap" @@ -5557,7 +5649,7 @@ dependencies = [ "rand_core 0.6.3", "ring", "rustc_version 0.3.3", - "sha2", + "sha2 0.9.9", "subtle", "x25519-dalek", ] @@ -5626,7 +5718,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ - "der", + "der 0.3.5", ] [[package]] @@ -5741,9 +5833,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecffe12af481bd0b8950f90676d61fb1e5fc33f1f1c41ce5df11e83fb509aaab" +checksum = "4e623e69a04a6352677c1f892027e14e034dfc6c4aabed0a4a0be9c1a0a46cee" dependencies = [ "darling", "itertools", @@ -5763,9 +5855,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.82" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ "proc-macro2", "quote", @@ -5823,13 +5915,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -5902,9 +5994,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] @@ -5960,7 +6052,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2", + "sha2 0.9.9", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -6012,11 +6104,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg 1.0.1", "bytes", "libc", "memchr", @@ -6024,7 +6115,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "signal-hook-registry", "tokio-macros", "winapi", @@ -6032,19 +6123,19 @@ dependencies = [ [[package]] name = "tokio-io-timeout" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", ] [[package]] name = "tokio-macros" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -6079,7 +6170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", "tokio-util", ] @@ -6092,7 +6183,7 @@ checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" dependencies = [ "futures-util", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "tokio", "tungstenite 0.12.0", ] @@ -6105,7 +6196,7 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "tokio", "tungstenite 0.14.0", ] @@ -6121,7 +6212,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "slab", "tokio", ] @@ -6149,7 +6240,7 @@ checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tracing-attributes", "tracing-core", ] @@ -6180,7 +6271,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.8", + "pin-project 1.0.10", "tracing", ] @@ -6195,36 +6286,22 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" dependencies = [ - "ansi_term 0.12.1", - "chrono", + "ansi_term", "lazy_static", "matchers", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] @@ -6381,9 +6458,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" @@ -6418,6 +6495,7 @@ dependencies = [ "safe_arith", "serde", "serde_derive", + "serde_json", "serde_yaml", "slog", "state_processing", @@ -6568,7 +6646,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "serde", ] @@ -6654,9 +6732,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -6700,7 +6778,7 @@ dependencies = [ "mime_guess", "multipart 0.17.1", "percent-encoding", - "pin-project 1.0.8", + "pin-project 1.0.10", "scoped-tls", "serde", "serde_json", @@ -6731,7 +6809,7 @@ dependencies = [ "mime_guess", "multipart 0.18.0", "percent-encoding", - "pin-project 1.0.8", + "pin-project 1.0.10", "scoped-tls", "serde", "serde_json", @@ -6756,6 +6834,7 @@ dependencies = [ "lighthouse_metrics", "safe_arith", "serde", + "serde_array_query", "state_processing", "tokio", "types", @@ -6893,7 +6972,7 @@ dependencies = [ "jsonrpc-core", "log", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "reqwest", "rlp 0.5.1", "secp256k1", @@ -6966,9 +7045,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c475786c6f47219345717a043a37ec04cb4bc185e28853adcc4fa0a947eba630" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ "webpki 0.22.0", ] diff --git a/Makefile b/Makefile index 6856635ebdd..a4b880b8065 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,7 @@ test-full: cargo-fmt test-release test-debug test-ef # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: cargo clippy --workspace --tests -- \ + -D clippy::fn_to_numeric_cast_any \ -D warnings \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ @@ -157,9 +158,10 @@ lint: make-ef-tests: make -C $(EF_TESTS) -# Verifies that state_processing feature arbitrary-fuzz will compile +# Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz + cargo check -p state_processing --features arbitrary-fuzz + cargo check -p slashing_protection --features arbitrary-fuzz # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: diff --git a/README.md b/README.md index 8c536752349..00900b8c3d7 100644 --- a/README.md +++ b/README.md @@ -66,8 +66,7 @@ of the Lighthouse book. ## Contact The best place for discussion is the [Lighthouse Discord -server](https://discord.gg/cyAszAh). Alternatively, you may use the -[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). +server](https://discord.gg/cyAszAh). Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 221c31caf61..ca8cab5bd31 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -21,6 +21,7 @@ pub const KEYSTORE_FLAG: &str = "keystore"; pub const PASSWORD_FILE_FLAG: &str = "password-file"; pub const BEACON_SERVER_FLAG: &str = "beacon-node"; pub const NO_WAIT: &str = "no-wait"; +pub const NO_CONFIRMATION: &str = "no-confirmation"; pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -59,6 +60,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") ) + .arg( + Arg::with_name(NO_CONFIRMATION) + .long(NO_CONFIRMATION) + .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + ) .arg( Arg::with_name(STDIN_INPUTS_FLAG) .takes_value(false) @@ -75,6 +81,7 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); let no_wait = matches.is_present(NO_WAIT); + let no_confirmation = matches.is_present(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; @@ -97,12 +104,14 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< stdin_inputs, ð2_network_config, no_wait, + no_confirmation, ))?; Ok(()) } /// Gets the keypair and validator_index for every validator and calls `publish_voluntary_exit` on it. +#[allow(clippy::too_many_arguments)] async fn publish_voluntary_exit( keystore_path: &Path, password_file_path: Option<&PathBuf>, @@ -111,6 +120,7 @@ async fn publish_voluntary_exit( stdin_inputs: bool, eth2_network_config: &Eth2NetworkConfig, no_wait: bool, + no_confirmation: bool, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config @@ -149,15 +159,22 @@ async fn publish_voluntary_exit( "Publishing a voluntary exit for validator: {} \n", keypair.pk ); - eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); - eprintln!( - "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", - WEBSITE_URL - ); - eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + if !no_confirmation { + eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); + eprintln!("{}\n", PROMPT); + eprintln!( + "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", + WEBSITE_URL + ); + eprintln!("Enter the exit phrase from the above URL to confirm the voluntary exit: "); + } + + let confirmation = if !no_confirmation { + account_utils::read_input_from_user(stdin_inputs)? + } else { + CONFIRMATION_PHRASE.to_string() + }; - let confirmation = account_utils::read_input_from_user(stdin_inputs)?; if confirmation == CONFIRMATION_PHRASE { // Sign and publish the voluntary exit to network let signed_voluntary_exit = voluntary_exit.sign( diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 80f9182efe7..c8cd5152af0 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.0.1" +version = "2.1.1" authors = ["Paul Hauner ", "Age Manning IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check the attestation's epoch matches its target. if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) @@ -716,7 +698,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -986,11 +968,17 @@ fn verify_head_block_is_known( attestation: &Attestation, max_skip_slots: Option, ) -> Result { - if let Some(block) = chain + let block_opt = chain .fork_choice .read() .get_block(&attestation.data.beacon_block_root) - { + .or_else(|| { + chain + .early_attester_cache + .get_proto_block(attestation.data.beacon_block_root) + }); + + if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = max_skip_slots { if attestation.data.slot > block.slot + max_skip_slots { @@ -1013,14 +1001,13 @@ fn verify_head_block_is_known( /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, - attestation: &Attestation, +pub fn verify_propagation_slot_range( + slot_clock: &S, + attestation: &Attestation, ) -> Result<(), Error> { let attestation_slot = attestation.data.slot; - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { @@ -1031,11 +1018,10 @@ pub fn verify_propagation_slot_range( } // Taking advantage of saturating subtraction on `Slot`. - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)? - - T::EthSpec::slots_per_epoch(); + - E::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { attestation_slot, @@ -1242,7 +1228,9 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) { + if !chain.fork_choice.read().contains_block(&target.root) + && !chain.early_attester_cache.contains_block(target.root) + { return Err(Error::UnknownTargetRoot(target.root)); } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 01662efc135..24963a125d2 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -75,7 +75,7 @@ impl From for Error { /// Stores the minimal amount of data required to compute the committee length for any committee at any /// slot in a given `epoch`. -struct CommitteeLengths { +pub struct CommitteeLengths { /// The `epoch` to which the lengths pertain. epoch: Epoch, /// The length of the shuffling in `self.epoch`. @@ -84,7 +84,7 @@ struct CommitteeLengths { impl CommitteeLengths { /// Instantiate `Self` using `state.current_epoch()`. - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let active_validator_indices_len = if let Ok(committee_cache) = state.committee_cache(RelativeEpoch::Current) { @@ -101,8 +101,16 @@ impl CommitteeLengths { }) } + /// Get the count of committees per each slot of `self.epoch`. + pub fn get_committee_count_per_slot( + &self, + spec: &ChainSpec, + ) -> Result { + T::get_committee_count_per_slot(self.active_validator_indices_len, spec).map_err(Into::into) + } + /// Get the length of the committee at the given `slot` and `committee_index`. - fn get( + pub fn get_committee_length( &self, slot: Slot, committee_index: CommitteeIndex, @@ -120,8 +128,7 @@ impl CommitteeLengths { } let slots_per_epoch = slots_per_epoch as usize; - let committees_per_slot = - T::get_committee_count_per_slot(self.active_validator_indices_len, spec)?; + let committees_per_slot = self.get_committee_count_per_slot::(spec)?; let index_in_epoch = compute_committee_index_in_epoch( slot, slots_per_epoch, @@ -172,7 +179,7 @@ impl AttesterCacheValue { spec: &ChainSpec, ) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> { self.committee_lengths - .get::(slot, committee_index, spec) + .get_committee_length::(slot, committee_index, spec) .map(|committee_length| (self.current_justified_checkpoint, committee_length)) } } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0dbff198181..4e1d54dc136 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,6 +12,7 @@ use crate::block_verification::{ IntoFullyVerifiedBlock, }; use crate::chain_config::ChainConfig; +use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; @@ -107,6 +108,9 @@ pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); +/// Defines how old a block can be before it's no longer a candidate for the early attester cache. +const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -328,10 +332,10 @@ pub struct BeaconChain { pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, + /// A cache used when producing attestations whilst the head block is still being imported. + pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, - /// A list of any hard-coded forks that have been disabled. - pub disabled_forks: Vec, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -926,6 +930,28 @@ impl BeaconChain { )? } + /// Returns the block at the given root, if any. + /// + /// Will also check the early attester cache for the block. Because of this, there's no + /// guarantee that a block returned from this function has a `BeaconState` available in + /// `self.store`. The expected use for this function is *only* for returning blocks requested + /// from P2P peers. + /// + /// ## Errors + /// + /// May return a database error. + pub fn get_block_checking_early_attester_cache( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let block_opt = self + .store + .get_block(block_root)? + .or_else(|| self.early_attester_cache.get_block(*block_root)); + + Ok(block_opt) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -1099,6 +1125,7 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash), }) }) @@ -1421,6 +1448,29 @@ impl BeaconChain { ) -> Result, Error> { let _total_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_SECONDS); + // The early attester cache will return `Some(attestation)` in the scenario where there is a + // block being imported that will become the head block, but that block has not yet been + // inserted into the database and set as `self.canonical_head`. + // + // In effect, the early attester cache prevents slow database IO from causing missed + // head/target votes. + match self + .early_attester_cache + .try_attest(request_slot, request_index, &self.spec) + { + // The cache matched this request, return the value. + Ok(Some(attestation)) => return Ok(attestation), + // The cache did not match this request, proceed with the rest of this function. + Ok(None) => (), + // The cache returned an error. Log the error and proceed with the rest of this + // function. + Err(e) => warn!( + self.log, + "Early attester cache failed"; + "error" => ?e + ), + } + let slots_per_epoch = T::EthSpec::slots_per_epoch(); let request_epoch = request_slot.epoch(slots_per_epoch); @@ -2601,8 +2651,44 @@ impl BeaconChain { } } + // If the block is recent enough, check to see if it becomes the head block. If so, apply it + // to the early attester cache. This will allow attestations to the block without waiting + // for the block and state to be inserted to the database. + // + // Only performing this check on recent blocks avoids slowing down sync with lots of calls + // to fork choice `get_head`. + if block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + let new_head_root = fork_choice + .get_head(current_slot, &self.spec) + .map_err(BeaconChainError::from)?; + + if new_head_root == block_root { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } + } else { + warn!( + self.log, + "Early attester block missing"; + "block_root" => ?block_root + ); + } + } + } + // Register sync aggregate with validator monitor - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; @@ -2643,7 +2729,7 @@ impl BeaconChain { block.body().attestations().len() as f64, ); - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { metrics::set_gauge( &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, sync_aggregate.num_set_bits() as i64, @@ -2708,6 +2794,7 @@ impl BeaconChain { beacon_block_root: block_root, }, None, + &self.spec, ) }) .unwrap_or_else(|e| { @@ -3071,7 +3158,7 @@ impl BeaconChain { trace!( self.log, "Produced beacon block"; - "parent" => %block.parent_root(), + "parent" => ?block.parent_root(), "attestations" => block.body().attestations().len(), "slot" => block.slot() ); @@ -3177,10 +3264,10 @@ impl BeaconChain { warn!( self.log, "Beacon chain re-org"; - "previous_head" => %current_head.block_root, + "previous_head" => ?current_head.block_root, "previous_slot" => current_head.slot, - "new_head_parent" => %new_head.beacon_block.parent_root(), - "new_head" => %beacon_block_root, + "new_head_parent" => ?new_head.beacon_block.parent_root(), + "new_head" => ?beacon_block_root, "new_slot" => new_head.beacon_block.slot(), "reorg_distance" => reorg_distance, ); @@ -3188,11 +3275,11 @@ impl BeaconChain { debug!( self.log, "Head beacon block"; - "justified_root" => %new_head.beacon_state.current_justified_checkpoint().root, + "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => %new_head.beacon_state.finalized_checkpoint().root, + "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => %beacon_block_root, + "root" => ?beacon_block_root, "slot" => new_head.beacon_block.slot(), ); }; @@ -3241,11 +3328,15 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash); let is_merge_transition_complete = is_merge_transition_complete(&new_head.beacon_state); drop(lag_timer); + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + // Update the snapshot that stores the head of the chain at the time it received the // block. *self @@ -3528,6 +3619,7 @@ impl BeaconChain { .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash) .unwrap_or_else(Hash256::zero); @@ -3647,6 +3739,12 @@ impl BeaconChain { .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .map(|mut snapshot_cache| { snapshot_cache.prune(new_finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); }) .unwrap_or_else(|| { error!( diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs new file mode 100644 index 00000000000..83b204113fe --- /dev/null +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -0,0 +1,97 @@ +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; +use operation_pool::{AttMaxCover, MaxCover}; +use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; +use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch}; + +impl BeaconChain { + pub fn compute_block_reward( + &self, + block: BeaconBlockRef<'_, T::EthSpec>, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if block.slot() != state.slot() { + return Err(BeaconChainError::BlockRewardSlotError); + } + + let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; + let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; + let mut per_attestation_rewards = block + .body() + .attestations() + .iter() + .map(|att| { + AttMaxCover::new(att, state, total_active_balance, &self.spec) + .ok_or(BeaconChainError::BlockRewardAttestationError) + }) + .collect::, _>>()?; + + // Update the attestation rewards for each previous attestation included. + // This is O(n^2) in the number of attestations n. + for i in 0..per_attestation_rewards.len() { + let (updated, to_update) = per_attestation_rewards.split_at_mut(i + 1); + let latest_att = &updated[i]; + + for att in to_update { + att.update_covering_set(latest_att.object(), latest_att.covering_set()); + } + } + + let mut prev_epoch_total = 0; + let mut curr_epoch_total = 0; + + for cover in &per_attestation_rewards { + for &reward in cover.fresh_validators_rewards.values() { + if cover.att.data.slot.epoch(T::EthSpec::slots_per_epoch()) == state.current_epoch() + { + curr_epoch_total += reward; + } else { + prev_epoch_total += reward; + } + } + } + + let attestation_total = prev_epoch_total + curr_epoch_total; + + // Drop the covers. + let per_attestation_rewards = per_attestation_rewards + .into_iter() + .map(|cover| cover.fresh_validators_rewards) + .collect(); + + let attestation_rewards = AttestationRewards { + total: attestation_total, + prev_epoch_total, + curr_epoch_total, + per_attestation_rewards, + }; + + // Sync committee rewards. + let sync_committee_rewards = if let Ok(sync_aggregate) = block.body().sync_aggregate() { + let (_, proposer_reward_per_bit) = compute_sync_aggregate_rewards(state, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardSyncError)?; + sync_aggregate.sync_committee_bits.num_set_bits() as u64 * proposer_reward_per_bit + } else { + 0 + }; + + // Total, metadata + let total = attestation_total + sync_committee_rewards; + + let meta = BlockRewardMeta { + slot: block.slot(), + parent_slot: state.latest_block_header().slot, + proposer_index: block.proposer_index(), + graffiti: block.body().graffiti().as_utf8_lossy(), + }; + + Ok(BlockReward { + total, + block_root, + meta, + attestation_rewards, + sync_committee_rewards, + }) + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index c6d937c81e9..a4a1dc31b95 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -53,6 +53,7 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; +use eth2::types::EventKind; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; @@ -1165,6 +1166,18 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { metrics::stop_timer(committee_timer); + /* + * If we have block reward listeners, compute the block reward and push it to the + * event handler. + */ + if let Some(ref event_handler) = chain.event_handler { + if event_handler.has_block_reward_subscribers() { + let block_reward = + chain.compute_block_reward(block.message(), block_root, &state)?; + event_handler.register(EventKind::BlockReward(block_reward)); + } + } + /* * Perform `per_block_processing` on the block and state, returning early if the block is * invalid. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 54397a7d556..24a9a916bba 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -84,7 +84,6 @@ pub struct BeaconChainBuilder { validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, - disabled_forks: Vec, log: Option, graffiti: Graffiti, slasher: Option>>, @@ -122,7 +121,6 @@ where slot_clock: None, shutdown_sender: None, head_tracker: None, - disabled_forks: Vec::new(), validator_pubkey_cache: None, spec: TEthSpec::default_spec(), chain_config: ChainConfig::default(), @@ -184,13 +182,6 @@ where self.log = Some(log); self } - - /// Sets a list of hard-coded forks that will not be activated. - pub fn disabled_forks(mut self, disabled_forks: Vec) -> Self { - self.disabled_forks = disabled_forks; - self - } - /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -763,7 +754,7 @@ where block_times_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), - disabled_forks: self.disabled_forks, + early_attester_cache: <_>::default(), shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs new file mode 100644 index 00000000000..56dced94e62 --- /dev/null +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -0,0 +1,161 @@ +use crate::{ + attester_cache::{CommitteeLengths, Error}, + metrics, +}; +use parking_lot::RwLock; +use proto_array::Block as ProtoBlock; +use types::*; + +pub struct CacheItem { + /* + * Values used to create attestations. + */ + epoch: Epoch, + committee_lengths: CommitteeLengths, + beacon_block_root: Hash256, + source: Checkpoint, + target: Checkpoint, + /* + * Values used to make the block available. + */ + block: SignedBeaconBlock, + proto_block: ProtoBlock, +} + +/// Provides a single-item cache which allows for attesting to blocks before those blocks have +/// reached the database. +/// +/// This cache stores enough information to allow Lighthouse to: +/// +/// - Produce an attestation without using `chain.canonical_head`. +/// - Verify that a block root exists (i.e., will be imported in the future) during attestation +/// verification. +/// - Provide a block which can be sent to peers via RPC. +#[derive(Default)] +pub struct EarlyAttesterCache { + item: RwLock>>, +} + +impl EarlyAttesterCache { + /// Removes the cached item, meaning that all future calls to `Self::try_attest` will return + /// `None` until a new cache item is added. + pub fn clear(&self) { + *self.item.write() = None + } + + /// Updates the cache item, so that `Self::try_attest` with return `Some` when given suitable + /// parameters. + pub fn add_head_block( + &self, + beacon_block_root: Hash256, + block: SignedBeaconBlock, + proto_block: ProtoBlock, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), Error> { + let epoch = state.current_epoch(); + let committee_lengths = CommitteeLengths::new(state, spec)?; + let source = state.current_justified_checkpoint(); + let target_slot = epoch.start_slot(E::slots_per_epoch()); + let target = Checkpoint { + epoch, + root: if state.slot() <= target_slot { + beacon_block_root + } else { + *state.get_block_root(target_slot)? + }, + }; + + let item = CacheItem { + epoch, + committee_lengths, + beacon_block_root, + source, + target, + block, + proto_block, + }; + + *self.item.write() = Some(item); + + Ok(()) + } + + /// Will return `Some(attestation)` if all the following conditions are met: + /// + /// - There is a cache `item` present. + /// - If `request_slot` is in the same epoch as `item.epoch`. + /// - If `request_index` does not exceed `item.comittee_count`. + pub fn try_attest( + &self, + request_slot: Slot, + request_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result>, Error> { + let lock = self.item.read(); + let item = if let Some(item) = lock.as_ref() { + item + } else { + return Ok(None); + }; + + let request_epoch = request_slot.epoch(E::slots_per_epoch()); + if request_epoch != item.epoch { + return Ok(None); + } + + let committee_count = item + .committee_lengths + .get_committee_count_per_slot::(spec)?; + if request_index >= committee_count as u64 { + return Ok(None); + } + + let committee_len = + item.committee_lengths + .get_committee_length::(request_slot, request_index, spec)?; + + let attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_len) + .map_err(BeaconStateError::from)?, + data: AttestationData { + slot: request_slot, + index: request_index, + beacon_block_root: item.beacon_block_root, + source: item.source, + target: item.target, + }, + signature: AggregateSignature::empty(), + }; + + metrics::inc_counter(&metrics::BEACON_EARLY_ATTESTER_CACHE_HITS); + + Ok(Some(attestation)) + } + + /// Returns `true` if `block_root` matches the cached item. + pub fn contains_block(&self, block_root: Hash256) -> bool { + self.item + .read() + .as_ref() + .map_or(false, |item| item.beacon_block_root == block_root) + } + + /// Returns the block, if `block_root` matches the cached item. + pub fn get_block(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.block.clone()) + } + + /// Returns the proto-array block, if `block_root` matches the cached item. + pub fn get_proto_block(&self, block_root: Hash256) -> Option { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.proto_block.clone()) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 70e288ec265..6920c06039d 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -137,6 +137,9 @@ pub enum BeaconChainError { AltairForkDisabled, ExecutionLayerMissing, ExecutionForkChoiceUpdateFailed(execution_layer::Error), + BlockRewardSlotError, + BlockRewardAttestationError, + BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayloadShutdownError(TrySendError), diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 459ccb457f9..6f4415ef4f3 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -15,6 +15,7 @@ pub struct ServerSentEventHandler { chain_reorg_tx: Sender>, contribution_tx: Sender>, late_head: Sender>, + block_reward_tx: Sender>, log: Logger, } @@ -32,6 +33,7 @@ impl ServerSentEventHandler { let (chain_reorg_tx, _) = broadcast::channel(capacity); let (contribution_tx, _) = broadcast::channel(capacity); let (late_head, _) = broadcast::channel(capacity); + let (block_reward_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -42,6 +44,7 @@ impl ServerSentEventHandler { chain_reorg_tx, contribution_tx, late_head, + block_reward_tx, log, } } @@ -67,6 +70,8 @@ impl ServerSentEventHandler { .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), EventKind::LateHead(late_head) => self.late_head.send(EventKind::LateHead(late_head)) .map(|count| trace!(self.log, "Registering server-sent late head event"; "receiver_count" => count)), + EventKind::BlockReward(block_reward) => self.block_reward_tx.send(EventKind::BlockReward(block_reward)) + .map(|count| trace!(self.log, "Registering server-sent contribution and proof event"; "receiver_count" => count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -105,6 +110,10 @@ impl ServerSentEventHandler { self.late_head.subscribe() } + pub fn subscribe_block_reward(&self) -> Receiver> { + self.block_reward_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -136,4 +145,8 @@ impl ServerSentEventHandler { pub fn has_late_head_subscribers(&self) -> bool { self.late_head.receiver_count() > 0 } + + pub fn has_block_reward_subscribers(&self) -> bool { + self.block_reward_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5896dbf3d8e..c19bba61268 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -65,7 +65,7 @@ pub fn execute_payload( } ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), }, - Err(_) => Ok(PayloadVerificationStatus::NotVerified), + Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), } } @@ -146,7 +146,7 @@ pub fn validate_execution_payload_for_gossip( chain: &BeaconChain, ) -> Result<(), BlockError> { // Only apply this validation if this is a merge beacon block. - if let Some(execution_payload) = block.body().execution_payload() { + if let Ok(execution_payload) = block.body().execution_payload() { // This logic should match `is_execution_enabled`. We use only the execution block hash of // the parent here in order to avoid loading the parent state during gossip verification. @@ -289,6 +289,7 @@ pub async fn prepare_execution_payload( .message() .body() .execution_payload() + .ok() .map(|ep| ep.block_hash) }; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 513467cef83..aff8657e86c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,10 +5,12 @@ mod beacon_chain; mod beacon_fork_choice_store; mod beacon_proposer_cache; mod beacon_snapshot; +pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; pub mod chain_config; +mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 32ebe70921b..28eacad5590 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,8 +4,12 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; +use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +/// The maximum time to wait for the snapshot cache lock during a metrics scrape. +const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); + lazy_static! { /* * Block Processing @@ -18,6 +22,10 @@ lazy_static! { "beacon_block_processing_successes_total", "Count of blocks processed without error" ); + pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( + "beacon_block_processing_snapshot_cache_size", + "Count snapshots in the snapshot cache" + ); pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result = try_create_int_counter( "beacon_block_processing_snapshot_cache_misses", "Count of snapshot cache misses" @@ -240,6 +248,14 @@ lazy_static! { pub static ref SHUFFLING_CACHE_MISSES: Result = try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request"); + /* + * Early attester cache + */ + pub static ref BEACON_EARLY_ATTESTER_CACHE_HITS: Result = try_create_int_counter( + "beacon_early_attester_cache_hits", + "Count of times the early attester cache returns an attestation" + ); + /* * Attestation Production */ @@ -905,6 +921,16 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); + if let Some(snapshot_cache) = beacon_chain + .snapshot_cache + .try_write_for(SNAPSHOT_CACHE_TIMEOUT) + { + set_gauge( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + snapshot_cache.len() as i64, + ) + } + set_gauge_by_usize( &OP_POOL_NUM_ATTESTATIONS, attestation_stats.num_attestations, diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 4f7124de341..f4bbae8a32e 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,4 +1,5 @@ use crate::BeaconSnapshot; +use itertools::process_results; use std::cmp; use std::time::Duration; use types::{ @@ -164,9 +165,25 @@ impl SnapshotCache { } } + /// The block roots of all snapshots contained in `self`. + pub fn beacon_block_roots(&self) -> Vec { + self.snapshots.iter().map(|s| s.beacon_block_root).collect() + } + + /// The number of snapshots contained in `self`. + pub fn len(&self) -> usize { + self.snapshots.len() + } + /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see /// struct-level documentation for more info). - pub fn insert(&mut self, snapshot: BeaconSnapshot, pre_state: Option>) { + pub fn insert( + &mut self, + snapshot: BeaconSnapshot, + pre_state: Option>, + spec: &ChainSpec, + ) { + let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { beacon_block: snapshot.beacon_block, beacon_block_root: snapshot.beacon_block_root, @@ -174,6 +191,25 @@ impl SnapshotCache { pre_state, }; + // Remove the grandparent of the block that was just inserted. + // + // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the + // cache small by removing any states that already have more than one descendant. + // + // Remove the grandparent first to free up room in the cache. + let grandparent_result = + process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { + iter.map(|(_slot, root)| root) + .find(|root| *root != item.beacon_block_root && *root != parent_root) + }); + if let Ok(Some(grandparent_root)) = grandparent_result { + let head_block_root = self.head_block_root; + self.snapshots.retain(|snapshot| { + let root = snapshot.beacon_block_root; + root == head_block_root || root != grandparent_root + }); + } + if self.snapshots.len() < self.max_len { self.snapshots.push(item); } else { @@ -384,7 +420,7 @@ mod test { *snapshot.beacon_state.slot_mut() = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - cache.insert(snapshot, None); + cache.insert(snapshot, None, &spec); assert_eq!( cache.snapshots.len(), @@ -402,7 +438,7 @@ mod test { // 2 2 // 3 3 assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None); + cache.insert(get_snapshot(42), None, &spec); assert_eq!(cache.snapshots.len(), CACHE_SIZE); assert!( @@ -462,7 +498,7 @@ mod test { // Over-fill the cache so it needs to eject some old values on insert. for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None); + cache.insert(get_snapshot(u64::max_value() - i), None, &spec); } // Ensure that the new head value was not removed from the cache. diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 4bc5b439e12..fa7d4dcfed5 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -273,7 +273,7 @@ impl VerifiedSyncContribution { let subcommittee_index = contribution.subcommittee_index as usize; // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. - verify_propagation_slot_range(chain, contribution)?; + verify_propagation_slot_range(&chain.slot_clock, contribution)?; // Validate subcommittee index. if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { @@ -428,7 +428,7 @@ impl VerifiedSyncCommitteeMessage { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future sync committee messages for later processing. - verify_propagation_slot_range(chain, &sync_message)?; + verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; // Ensure the `subnet_id` is valid for the given validator. let pubkey = chain @@ -516,14 +516,13 @@ impl VerifiedSyncCommitteeMessage { /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, +pub fn verify_propagation_slot_range( + slot_clock: &S, sync_contribution: &U, ) -> Result<(), Error> { let message_slot = sync_contribution.get_slot(); - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot > latest_permissible_slot { @@ -533,8 +532,7 @@ pub fn verify_propagation_slot_range( }); } - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 1ce2411c41d..4d862cbac72 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -122,6 +122,24 @@ fn produces_attestations() { ); assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); + + let early_attestation = { + let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + chain + .early_attester_cache + .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .unwrap(); + chain + .early_attester_cache + .try_attest(slot, index, &chain.spec) + .unwrap() + .unwrap() + }; + + assert_eq!( + attestation, early_attestation, + "early attester cache inconsistent" + ); } } } diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 35dda493e19..43ee2372b65 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -30,11 +30,11 @@ fn verify_execution_payload_chain(chain: &[ExecutionPayload]) { #[should_panic] fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); - let merge_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.merge_fork_epoch = Some(merge_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); let genesis_pow_block_hash = generate_pow_block( spec.terminal_total_difficulty, @@ -95,12 +95,12 @@ fn merge_with_terminal_block_hash_override() { fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); - let merge_fork_epoch = Epoch::new(8); - let merge_fork_slot = merge_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); let mut spec = E::default_spec(); spec.altair_fork_epoch = Some(altair_fork_epoch); - spec.merge_fork_epoch = Some(merge_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); let mut execution_payloads = vec![]; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index d2e673f6071..acb8376dbda 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,7 +31,7 @@ task_executor = { path = "../../common/task_executor" } environment = { path = "../../lighthouse/environment" } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -time = "0.3.3" +time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 30bc34dda49..550d89125eb 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -18,7 +18,7 @@ use eth2::{ }; use execution_layer::ExecutionLayer; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; -use lighthouse_network::NetworkGlobals; +use lighthouse_network::{open_metrics_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use slasher::Slasher; @@ -65,6 +65,7 @@ pub struct ClientBuilder { eth1_service: Option, network_globals: Option>>, network_send: Option>>, + gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, http_api_config: http_api::Config, @@ -96,6 +97,7 @@ where eth1_service: None, network_globals: None, network_send: None, + gossipsub_registry: None, db_path: None, freezer_db_path: None, http_api_config: <_>::default(), @@ -133,7 +135,6 @@ where let chain_spec = self.chain_spec.clone(); let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); - let disabled_forks = config.disabled_forks.clone(); let chain_config = config.chain.clone(); let graffiti = config.graffiti; @@ -167,7 +168,6 @@ where .store(store) .custom_spec(spec.clone()) .chain_config(chain_config) - .disabled_forks(disabled_forks) .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) @@ -448,13 +448,27 @@ where .ok_or("network requires a runtime_context")? .clone(); - let (network_globals, network_send) = - NetworkService::start(beacon_chain, config, context.executor) - .await - .map_err(|e| format!("Failed to start network: {:?}", e))?; + // If gossipsub metrics are required we build a registry to record them + let mut gossipsub_registry = if config.metrics_enabled { + Some(Registry::default()) + } else { + None + }; + + let (network_globals, network_send) = NetworkService::start( + beacon_chain, + config, + context.executor, + gossipsub_registry + .as_mut() + .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + ) + .await + .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); self.network_send = Some(network_send); + self.gossipsub_registry = gossipsub_registry; Ok(self) } @@ -562,13 +576,13 @@ where Ok(self) } - /// Consumers the builder, returning a `Client` if all necessary components have been + /// Consumes the builder, returning a `Client` if all necessary components have been /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] pub fn build( - self, + mut self, ) -> Result>, String> { let runtime_context = self @@ -615,6 +629,7 @@ where chain: self.beacon_chain.clone(), db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), + gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new), log: log.clone(), }); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f4519e05c87..97689622600 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -58,8 +58,6 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, - /// A list of hard-coded forks that will be disabled. - pub disabled_forks: Vec, /// Graffiti to be inserted everytime we create a block. pub graffiti: Graffiti, /// When true, automatically monitor validators using the HTTP API. @@ -98,7 +96,6 @@ impl Default for Config { eth1: <_>::default(), execution_endpoints: None, suggested_fee_recipient: None, - disabled_forks: Vec::new(), graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index ea09b1f7c71..c166024c060 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -23,7 +23,7 @@ bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" eth2_ssz_types = "0.2.2" -lru = "0.6.0" +lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" tree_hash_derive = { path = "../../consensus/tree_hash_derive"} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 96a50ee2e01..c7c60a90062 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -233,8 +233,7 @@ mod test { if request_json != expected_json { panic!( "json mismatch!\n\nobserved: {}\n\nexpected: {}\n\n", - request_json.to_string(), - expected_json.to_string() + request_json, expected_json, ) } self diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs new file mode 100644 index 00000000000..5cd9894adef --- /dev/null +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -0,0 +1,216 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::lighthouse::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; +use state_processing::{ + per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError, + per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, +}; +use std::sync::Arc; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock}; +use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; + +const MAX_REQUEST_RANGE_EPOCHS: usize = 100; +const BLOCK_ROOT_CHUNK_SIZE: usize = 100; + +#[derive(Debug)] +enum AttestationPerformanceError { + BlockReplay(BlockReplayError), + BeaconState(BeaconStateError), + ParticipationCache(ParticipationCacheError), + UnableToFindValidator(usize), +} + +impl From for AttestationPerformanceError { + fn from(e: BlockReplayError) -> Self { + Self::BlockReplay(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for AttestationPerformanceError { + fn from(e: ParticipationCacheError) -> Self { + Self::ParticipationCache(e) + } +} + +pub fn get_attestation_performance( + target: String, + query: AttestationPerformanceQuery, + chain: Arc>, +) -> Result, warp::Rejection> { + let spec = &chain.spec; + // We increment by 2 here so that when we build the state from the `prior_slot` it is + // still 1 epoch ahead of the first epoch we want to analyse. + // This ensures the `.is_previous_epoch_X` functions on `EpochProcessingSummary` return results + // for the correct epoch. + let start_epoch = query.start_epoch + 2; + let start_slot = start_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let prior_slot = start_slot - 1; + + let end_epoch = query.end_epoch + 2; + let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); + + // Ensure end_epoch is smaller than the current epoch - 1. + let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + if query.end_epoch >= current_epoch - 1 { + return Err(custom_bad_request(format!( + "end_epoch must be less than the current epoch - 1. current: {}, end: {}", + current_epoch, query.end_epoch + ))); + } + + // Check query is valid. + if start_epoch > end_epoch { + return Err(custom_bad_request(format!( + "start_epoch must not be larger than end_epoch. start: {}, end: {}", + query.start_epoch, query.end_epoch + ))); + } + + // The response size can grow exceptionally large therefore we should check that the + // query is within permitted bounds to prevent potential OOM errors. + if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS { + return Err(custom_bad_request(format!( + "end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}", + query.start_epoch, query.end_epoch + ))); + } + + // Either use the global validator set, or the specified index. + let index_range = if target.to_lowercase() == "global" { + chain + .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) + .map_err(beacon_chain_error)? + } else { + vec![target.parse::().map_err(|_| { + custom_bad_request(format!( + "Invalid validator index: {:?}", + target.to_lowercase() + )) + })?] + }; + + // Load block roots. + let mut block_roots: Vec = chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .map_err(beacon_chain_error)? + .map(|res| res.map(|(root, _)| root)) + .collect::, _>>() + .map_err(beacon_chain_error)?; + block_roots.dedup(); + + // Load first block so we can get its parent. + let first_block_root = block_roots.first().ok_or_else(|| { + custom_server_error( + "No blocks roots could be loaded. Ensure the beacon node is synced.".to_string(), + ) + })?; + let first_block = chain + .get_block(first_block_root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) + }) + .map_err(beacon_chain_error)?; + + // Load the block of the prior slot which will be used to build the starting state. + let prior_block = chain + .get_block(&first_block.parent_root()) + .and_then(|maybe_block| { + maybe_block + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) + }) + .map_err(beacon_chain_error)?; + + // Load state for block replay. + let state_root = prior_block.state_root(); + let state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + // Allocate an AttestationPerformance vector for each validator in the range. + let mut perfs: Vec = + AttestationPerformance::initialize(index_range.clone()); + + let post_slot_hook = |state: &mut BeaconState, + summary: Option>, + _is_skip_slot: bool| + -> Result<(), AttestationPerformanceError> { + // If a `summary` was not output then an epoch boundary was not crossed + // so we move onto the next slot. + if let Some(summary) = summary { + for (position, i) in index_range.iter().enumerate() { + let index = *i as usize; + + let val = perfs + .get_mut(position) + .ok_or(AttestationPerformanceError::UnableToFindValidator(index))?; + + // We are two epochs ahead since the summary is generated for + // `state.previous_epoch()` then `summary.is_previous_epoch_X` functions return + // data for the epoch before that. + let epoch = state.previous_epoch().as_u64() - 1; + + let is_active = summary.is_active_unslashed_in_previous_epoch(index); + + let received_source_reward = summary.is_previous_epoch_source_attester(index)?; + + let received_head_reward = summary.is_previous_epoch_head_attester(index)?; + + let received_target_reward = summary.is_previous_epoch_target_attester(index)?; + + let inclusion_delay = summary + .previous_epoch_inclusion_info(index) + .map(|info| info.delay); + + let perf = AttestationPerformanceStatistics { + active: is_active, + head: received_head_reward, + target: received_target_reward, + source: received_source_reward, + delay: inclusion_delay, + }; + + val.epochs.insert(epoch, perf); + } + } + Ok(()) + }; + + // Initialize block replayer + let mut replayer = BlockReplayer::new(state, spec) + .no_state_root_iter() + .no_signature_verification() + .minimal_block_root_verification() + .post_slot_hook(Box::new(post_slot_hook)); + + // Iterate through block roots in chunks to reduce load on memory. + for block_root_chunks in block_roots.chunks(BLOCK_ROOT_CHUNK_SIZE) { + // Load blocks from the block root chunks. + let blocks = block_root_chunks + .iter() + .map(|root| { + chain + .get_block(root) + .and_then(|maybe_block| { + maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) + }) + .map_err(beacon_chain_error) + }) + .collect::>, _>>()?; + + replayer = replayer + .apply_blocks(blocks, None) + .map_err(|e| custom_server_error(format!("{:?}", e)))?; + } + + drop(replayer); + + Ok(perfs) +} diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs new file mode 100644 index 00000000000..154773aa95c --- /dev/null +++ b/beacon_node/http_api/src/block_rewards.rs @@ -0,0 +1,80 @@ +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; +use slog::{warn, Logger}; +use state_processing::BlockReplayer; +use std::sync::Arc; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; + +pub fn get_block_rewards( + query: BlockRewardsQuery, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let start_slot = query.start_slot; + let end_slot = query.end_slot; + let prior_slot = start_slot - 1; + + if start_slot > end_slot || start_slot == 0 { + return Err(custom_bad_request(format!( + "invalid start and end: {}, {}", + start_slot, end_slot + ))); + } + + let end_block_root = chain + .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; + + let blocks = chain + .store + .load_blocks_to_replay(start_slot, end_slot, end_block_root) + .map_err(|e| beacon_chain_error(e.into()))?; + + let state_root = chain + .state_root_at_slot(prior_slot) + .map_err(beacon_chain_error)? + .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; + + let mut state = chain + .get_state(&state_root, Some(prior_slot)) + .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) + .map_err(beacon_chain_error)?; + + state + .build_all_caches(&chain.spec) + .map_err(beacon_state_error)?; + + let mut block_rewards = Vec::with_capacity(blocks.len()); + + let block_replayer = BlockReplayer::new(state, &chain.spec) + .pre_block_hook(Box::new(|state, block| { + // Compute block reward. + let block_reward = + chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + block_rewards.push(block_reward); + Ok(()) + })) + .state_root_iter( + chain + .forwards_iter_state_roots_until(prior_slot, end_slot) + .map_err(beacon_chain_error)?, + ) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(blocks, None) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "start_slot" => start_slot, + "end_slot" => end_slot, + ); + } + + drop(block_replayer); + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 4df5c940b9e..b37638f60fc 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,8 +5,10 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. +mod attestation_performance; mod attester_duties; mod block_id; +mod block_rewards; mod database; mod metrics; mod proposer_duties; @@ -55,7 +57,10 @@ use warp::http::StatusCode; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter}; -use warp_utils::task::{blocking_json_task, blocking_task}; +use warp_utils::{ + query::multi_key_query, + task::{blocking_json_task, blocking_task}, +}; const API_PREFIX: &str = "eth"; @@ -505,12 +510,13 @@ pub fn serve( .clone() .and(warp::path("validator_balances")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and_then( |state_id: StateId, chain: Arc>, - query: api_types::ValidatorBalancesQuery| { + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { Ok(state @@ -521,7 +527,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -548,11 +554,14 @@ pub fn serve( let get_beacon_state_validators = beacon_states_path .clone() .and(warp::path("validators")) - .and(warp::query::()) .and(warp::path::end()) + .and(multi_key_query::()) .and_then( - |state_id: StateId, chain: Arc>, query: api_types::ValidatorsQuery| { + |state_id: StateId, + chain: Arc>, + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { let epoch = state.current_epoch(); @@ -566,7 +575,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -586,8 +595,8 @@ pub fn serve( let status_matches = query.status.as_ref().map_or(true, |statuses| { - statuses.0.contains(&status) - || statuses.0.contains(&status.superstatus()) + statuses.contains(&status) + || statuses.contains(&status.superstatus()) }); if status_matches { @@ -1721,11 +1730,13 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(network_globals.clone()) .and_then( - |query: api_types::PeersQuery, network_globals: Arc>| { + |query_res: Result, + network_globals: Arc>| { blocking_json_task(move || { + let query = query_res?; let mut peers: Vec = Vec::new(); network_globals .peers @@ -1755,11 +1766,11 @@ pub fn serve( ); let state_matches = query.state.as_ref().map_or(true, |states| { - states.0.iter().any(|state_param| *state_param == state) + states.iter().any(|state_param| *state_param == state) }); let direction_matches = query.direction.as_ref().map_or(true, |directions| { - directions.0.iter().any(|dir_param| *dir_param == direction) + directions.iter().any(|dir_param| *dir_param == direction) }); if state_matches && direction_matches { @@ -2275,6 +2286,22 @@ pub fn serve( }) }); + // GET lighthouse/nat + let get_lighthouse_nat = warp::path("lighthouse") + .and(warp::path("nat")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0, + )) + }) + }); + // GET lighthouse/peers let get_lighthouse_peers = warp::path("lighthouse") .and(warp::path("peers")) @@ -2515,19 +2542,45 @@ pub fn serve( }, ); + let get_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("block_rewards")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|query, chain, log| { + blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) + }); + + // GET lighthouse/analysis/attestation_performance/{index} + let get_lighthouse_attestation_performance = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("attestation_performance")) + .and(warp::path::param::()) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|target, query, chain: Arc>| { + blocking_json_task(move || { + attestation_performance::get_attestation_performance(target, query, chain) + }) + }); + let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(chain_filter) .and_then( - |topics: api_types::EventQuery, chain: Arc>| { + |topics_res: Result, + chain: Arc>| { blocking_task(move || { + let topics = topics_res?; // for each topic subscribed spawn a new subscription - let mut receivers = Vec::with_capacity(topics.topics.0.len()); + let mut receivers = Vec::with_capacity(topics.topics.len()); if let Some(event_handler) = chain.event_handler.as_ref() { - for topic in topics.topics.0.clone() { + for topic in topics.topics { let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), @@ -2549,6 +2602,9 @@ pub fn serve( api_types::EventTopic::LateHead => { event_handler.subscribe_late_head() } + api_types::EventTopic::BlockReward => { + event_handler.subscribe_block_reward() + } }; receivers.push(BroadcastStream::new(receiver).map(|msg| { @@ -2590,8 +2646,8 @@ pub fn serve( .or(get_beacon_state_fork.boxed()) .or(get_beacon_state_finality_checkpoints.boxed()) .or(get_beacon_state_validator_balances.boxed()) - .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) .or(get_beacon_headers.boxed()) @@ -2622,6 +2678,7 @@ pub fn serve( .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) .or(get_lighthouse_peers_connected.boxed()) .or(get_lighthouse_proto_array.boxed()) @@ -2633,6 +2690,8 @@ pub fn serve( .or(get_lighthouse_beacon_states_ssz.boxed()) .or(get_lighthouse_staking.boxed()) .or(get_lighthouse_database_info.boxed()) + .or(get_lighthouse_block_rewards.boxed()) + .or(get_lighthouse_attestation_performance.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index 66c7a6a6f69..89e6a8e2d10 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -4,6 +4,7 @@ mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_network::open_metrics_client::registry::Registry; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; use slog::{crit, info, Logger}; @@ -39,6 +40,7 @@ pub struct Context { pub chain: Option>>, pub db_path: Option, pub freezer_db_path: Option, + pub gossipsub_registry: Option>, pub log: Logger, } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index c86211f3135..66c961956c8 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,7 @@ use crate::Context; use beacon_chain::BeaconChainTypes; use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_network::open_metrics_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; pub use lighthouse_metrics::*; @@ -51,6 +52,12 @@ pub fn gather_prometheus_metrics( encoder .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); + // encode gossipsub metrics also if they exist + if let Some(registry) = ctx.gossipsub_registry.as_ref() { + if let Ok(registry_locked) = registry.lock() { + let _ = encode(&mut buffer, ®istry_locked); + } + } String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index 633b81115f3..fd8733cfe50 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -25,6 +25,7 @@ async fn returns_200_ok() { chain: None, db_path: None, freezer_db_path: None, + gossipsub_registry: None, log, }); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 7dcccd8ca2b..31dfab271e6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { version = "0.1.0-beta.11", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } @@ -25,7 +25,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" -lru = "0.6.0" +lru = "0.7.1" parking_lot = "0.11.0" sha2 = "0.9.1" snap = "1.0.1" @@ -37,19 +37,22 @@ rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } -superstruct = "0.3.0" +superstruct = "0.4.0" +open-metrics-client = "0.13.0" [dependencies.libp2p] -version = "0.41.0" +# version = "0.41.0" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] +git = "https://github.com/libp2p/rust-libp2p" +# Latest libp2p master +rev = "17861d9cac121f7e448585a7f052d5eab4618826" +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext"] [dev-dependencies] slog-term = "2.6.0" slog-async = "2.5.0" tempfile = "3.1.0" exit-future = "0.2.0" -libp2p = { version = "0.41.0", default-features = false, features = ["plaintext"] } void = "1" [features] diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 51699d236f7..2a799610947 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -2,24 +2,25 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; use crate::config::gossipsub_config; -use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; +use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent}; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::rpc::*; -use crate::service::METADATA_FILENAME; +use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; -use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use libp2p::{ core::{ connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, }, gossipsub::{ + metrics::Config as GossipsubMetricsConfig, subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, @@ -45,12 +46,15 @@ use std::{ task::{Context, Poll}, }; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, ChainSpec, EnrForkId, EthSpec, ForkContext, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, }; pub mod gossipsub_scoring_parameters; +/// The number of peers we target per subnet for discovery queries. +pub const TARGET_SUBNET_PEERS: usize = 6; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// Identifier of requests sent by a peer. @@ -182,14 +186,14 @@ pub struct Behaviour { impl Behaviour { pub async fn new( local_key: &Keypair, - mut config: NetworkConfig, + ctx: ServiceContext<'_>, network_globals: Arc>, log: &slog::Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); + let mut config = ctx.config.clone(); + // Set up the Identify Behaviour let identify_config = if config.private { IdentifyConfig::new( @@ -215,25 +219,29 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = fork_context.all_fork_digests(); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { filter: Self::create_whitelist_filter( possible_fork_digests, - chain_spec.attestation_subnet_count, + ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, ), max_subscribed_topics: 200, max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(fork_context.clone()); + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); - // Build and configure the Gossipsub behaviour let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, config.gs_config.clone(), - None, // No metrics for the time being + gossipsub_metrics, filter, snappy_transform, ) @@ -246,7 +254,7 @@ impl Behaviour { let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(chain_spec, &config.gs_config); + let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); // Prepare scoring parameters let params = score_settings.get_peer_score_params( @@ -267,6 +275,7 @@ impl Behaviour { let peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, ..Default::default() }; @@ -274,7 +283,7 @@ impl Behaviour { Ok(Behaviour { // Sub-behaviours gossipsub, - eth2_rpc: RPC::new(fork_context.clone(), log.clone()), + eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), discovery, identify: Identify::new(identify_config), // Auxiliary fields @@ -287,7 +296,7 @@ impl Behaviour { network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, - fork_context, + fork_context: ctx.fork_context, update_gossipsub_scores, }) } @@ -393,14 +402,15 @@ impl Behaviour { .remove(&topic); // unsubscribe from the topic - let topic: Topic = topic.into(); + let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&topic) { + match self.gossipsub.unsubscribe(&libp2p_topic) { Err(_) => { - warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %topic); + warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false } Ok(v) => { + // Inform the network debug!(self.log, "Unsubscribed to topic"; "topic" => %topic); v } @@ -732,6 +742,18 @@ impl Behaviour { /// Convenience function to propagate a request. fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + // Increment metrics + match &request { + Request::Status(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) + } + Request::BlocksByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) + } + Request::BlocksByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) + } + } self.add_event(BehaviourEvent::RequestReceived { peer_id, id, @@ -868,6 +890,7 @@ impl NetworkBehaviourEventProcess for Behaviour< PeerAction::LowToleranceError, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), + "does_not_support_gossipsub", ); } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 789242e8d49..4cafcf62b1f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -20,8 +20,6 @@ use types::{ForkContext, ForkName}; const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M /// The maximum transmit size of gossip messages in bytes post-merge. const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M -/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh. -pub const MESH_N_LOW: usize = 6; /// The cache time is set to accommodate the circulation time of an attestation. /// @@ -116,6 +114,10 @@ pub struct Config { /// runtime. pub import_all_attestations: bool, + /// A setting specifying a range of values that tune the network parameters of lighthouse. The + /// lower the value the less bandwidth used, but the slower messages will be received. + pub network_load: u8, + /// Indicates if the user has set the network to be in private mode. Currently this /// prevents sending client identifying information over identify. pub private: bool, @@ -197,6 +199,7 @@ impl Default for Config { client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, + network_load: 3, private: false, subscribe_all_subnets: false, import_all_attestations: false, @@ -207,8 +210,72 @@ impl Default for Config { } } +/// Controls sizes of gossipsub meshes to tune a Lighthouse node's bandwidth/performance. +pub struct NetworkLoad { + pub name: &'static str, + pub mesh_n_low: usize, + pub outbound_min: usize, + pub mesh_n: usize, + pub mesh_n_high: usize, + pub gossip_lazy: usize, + pub history_gossip: usize, +} + +impl From for NetworkLoad { + fn from(load: u8) -> NetworkLoad { + match load { + 1 => NetworkLoad { + name: "Low", + mesh_n_low: 1, + outbound_min: 1, + mesh_n: 3, + mesh_n_high: 4, + gossip_lazy: 3, + history_gossip: 12, + }, + 2 => NetworkLoad { + name: "Low", + mesh_n_low: 2, + outbound_min: 2, + mesh_n: 4, + mesh_n_high: 8, + gossip_lazy: 3, + history_gossip: 12, + }, + 3 => NetworkLoad { + name: "Average", + mesh_n_low: 3, + outbound_min: 2, + mesh_n: 5, + mesh_n_high: 10, + gossip_lazy: 3, + history_gossip: 12, + }, + 4 => NetworkLoad { + name: "Average", + mesh_n_low: 4, + outbound_min: 3, + mesh_n: 8, + mesh_n_high: 12, + gossip_lazy: 3, + history_gossip: 12, + }, + // 5 and above + _ => NetworkLoad { + name: "High", + mesh_n_low: 5, + outbound_min: 3, + mesh_n: 10, + mesh_n_high: 15, + gossip_lazy: 5, + history_gossip: 12, + }, + } + } +} + /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing let fast_gossip_message_id = @@ -250,17 +317,21 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { )[..20], ) }; + + let load = NetworkLoad::from(network_load); + GossipsubConfigBuilder::default() .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(8) - .mesh_n_low(MESH_N_LOW) - .mesh_n_high(12) - .gossip_lazy(6) + .mesh_n(load.mesh_n) + .mesh_n_low(load.mesh_n_low) + .mesh_outbound_min(load.outbound_min) + .mesh_n_high(load.mesh_n_high) + .gossip_lazy(load.gossip_lazy) .fanout_ttl(Duration::from_secs(60)) .history_length(12) .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large - .history_gossip(3) + .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation .validation_mode(ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f2ae759b79..1d542a7f393 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -127,7 +127,7 @@ pub fn use_or_load_enr( pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, log: &slog::Logger, ) -> Result { // Build the local ENR. @@ -163,7 +163,7 @@ pub fn create_enr_builder_from_config( pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, ) -> Result { let mut builder = create_enr_builder_from_config(config, true); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index ae7335b5caa..34c29a44d17 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,7 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::{config, metrics}; +use crate::behaviour::TARGET_SUBNET_PEERS; +use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ @@ -47,8 +48,6 @@ pub use subnet_predicate::subnet_predicate; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; -/// Target number of peers we'd like to have connected to a given long-lived subnet. -pub const TARGET_SUBNET_PEERS: usize = config::MESH_N_LOW; /// Target number of peers to search for given a grouped subnet query. const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6; /// Number of times to attempt a discovery request. @@ -692,7 +691,7 @@ impl Discovery { return false; } - let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; + let target_peers = TARGET_SUBNET_PEERS.saturating_sub(peers_on_subnet); trace!(self.log, "Discovery query started for subnet"; "subnet_query" => ?subnet_query, "connected_peers_on_subnet" => peers_on_subnet, @@ -1039,6 +1038,7 @@ impl NetworkBehaviour for Discovery { Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); + metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. let enr = self.discv5.local_enr(); @@ -1096,7 +1096,7 @@ mod tests { ..Default::default() }; let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); - let enr: Enr = build_enr::(&enr_key, &config, EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 058b38ceb56..0460a42c8a9 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,7 +10,7 @@ mod config; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; -mod metrics; +pub mod metrics; pub mod peer_manager; pub mod rpc; mod service; @@ -66,13 +66,16 @@ pub use crate::types::{ error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, }; + +pub use open_metrics_client; + pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use libp2p; pub use libp2p::bandwidth::BandwidthSinks; -pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash}; +pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; pub use metrics::scrape_discovery_metrics; @@ -82,4 +85,4 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 4767f287f4c..1dfe0448b7a 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,16 +1,19 @@ pub use lighthouse_metrics::*; lazy_static! { + pub static ref NAT_OPEN: Result = try_create_int_counter( + "nat_open", + "An estimate indicating if the local node is exposed to the internet." + ); pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( "libp2p_address_update_total", "Count of libp2p socked updated events (when our view of our IP address has changed)" ); pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( - "libp2p_peer_connected_peers_total", + "libp2p_peers", "Count of libp2p peers currently connected" ); - pub static ref PEERS_CONNECTED_INTEROP: Result = - try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected"); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" @@ -19,6 +22,14 @@ lazy_static! { "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); + pub static ref DISCOVERY_SENT_BYTES: Result = try_create_int_gauge( + "discovery_sent_bytes", + "The number of bytes sent in discovery" + ); + pub static ref DISCOVERY_RECV_BYTES: Result = try_create_int_gauge( + "discovery_recv_bytes", + "The number of bytes received in discovery" + ); pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( "discovery_queue_size", "The number of discovery queries awaiting execution" @@ -31,11 +42,7 @@ lazy_static! { "discovery_sessions", "The number of active discovery sessions with peers" ); - pub static ref DISCOVERY_REQS_IP: Result = try_create_float_gauge_vec( - "discovery_reqs_per_ip", - "Unsolicited discovery requests per ip per second", - &["Addresses"] - ); + pub static ref PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "libp2p_peers_per_client", "The connected peers via client implementation", @@ -57,6 +64,11 @@ lazy_static! { "RPC errors per client", &["client", "rpc_error", "direction"] ); + pub static ref TOTAL_RPC_REQUESTS: Result = try_create_int_counter_vec( + "libp2p_rpc_requests_total", + "RPC requests total", + &["type"] + ); pub static ref PEER_ACTION_EVENTS_PER_CLIENT: Result = try_create_int_counter_vec( "libp2p_peer_actions_per_client", @@ -69,26 +81,66 @@ lazy_static! { "Gossipsub messages that we did not accept, per client", &["client", "validation_result"] ); + + pub static ref PEER_SCORE_DISTRIBUTION: Result = + try_create_int_gauge_vec( + "peer_score_distribution", + "The distribution of connected peer scores", + &["position"] + ); + + pub static ref PEER_SCORE_PER_CLIENT: Result = + try_create_float_gauge_vec( + "peer_score_per_client", + "Average score per client", + &["client"] + ); + + /* + * Inbound/Outbound peers + */ + /// The number of peers that dialed us. + pub static ref NETWORK_INBOUND_PEERS: Result = + try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us."); + + /// The number of peers that we dialed us. + pub static ref NETWORK_OUTBOUND_PEERS: Result = + try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); + + /* + * Peer Reporting + */ + pub static ref REPORT_PEER_MSGS: Result = try_create_int_counter_vec( + "libp2p_report_peer_msgs_total", + "Number of peer reports per msg", + &["msg"] + ); +} + +/// Checks if we consider the NAT open. +/// +/// Conditions for an open NAT: +/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of +/// users reporting an external port and our ENR gets updated. +/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we +/// rely on whether we have any inbound messages. If we have no socket update messages, but +/// manage to get at least one inbound peer, we are exposed correctly. +pub fn check_nat() { + // NAT is already deemed open. + if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { + return; + } + if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) == 0 + || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 + { + inc_counter(&NAT_OPEN); + } } pub fn scrape_discovery_metrics() { let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); - set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); - set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); - - let process_gauge_vec = |gauge: &Result, metrics: discv5::metrics::Metrics| { - if let Ok(gauge_vec) = gauge { - gauge_vec.reset(); - for (ip, value) in metrics.requests_per_ip_per_second.iter() { - if let Ok(metric) = gauge_vec.get_metric_with_label_values(&[&format!("{:?}", ip)]) - { - metric.set(*value); - } - } - } - }; - - process_gauge_vec(&DISCOVERY_REQS_IP, metrics); + set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); + set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index aef8f96504c..6c5523de454 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -16,6 +16,8 @@ pub struct Config { /* Peer count related configurations */ /// Whether discovery is enabled. pub discovery_enabled: bool, + /// Whether metrics are enabled. + pub metrics_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -34,6 +36,7 @@ impl Default for Config { fn default() -> Self { Config { discovery_enabled: true, + metrics_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 8695d149696..6b8f6fff608 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,6 +1,6 @@ //! Implementation of Lighthouse's peer management system. -use crate::discovery::TARGET_SUBNET_PEERS; +use crate::behaviour::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; @@ -8,13 +8,14 @@ use crate::{Subnet, SubnetDiscovery}; use discv5::Enr; use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; -use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; +use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use slog::{debug, error, warn}; use smallvec::SmallVec; use std::{ sync::Arc, time::{Duration, Instant}, }; +use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -71,6 +72,8 @@ pub struct PeerManager { heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. discovery_enabled: bool, + /// Keeps track if the current instance is reporting metrics or not. + metrics_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } @@ -111,6 +114,7 @@ impl PeerManager { ) -> error::Result { let config::Config { discovery_enabled, + metrics_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -130,6 +134,7 @@ impl PeerManager { sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, + metrics_enabled, log: log.clone(), }) } @@ -150,7 +155,13 @@ impl PeerManager { } } - self.report_peer(peer_id, PeerAction::Fatal, source, Some(reason)); + self.report_peer( + peer_id, + PeerAction::Fatal, + source, + Some(reason), + "goodbye_peer", + ); } /// Reports a peer for some action. @@ -162,12 +173,13 @@ impl PeerManager { action: PeerAction, source: ReportSource, reason: Option, + msg: &'static str, ) { let action = self .network_globals .peers .write() - .report_peer(peer_id, action, source); + .report_peer(peer_id, action, source, msg); self.handle_score_action(peer_id, action, reason); } @@ -378,19 +390,21 @@ impl PeerManager { "protocols" => ?info.protocols ); - // update the peer client kind metric - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&peer_info.client().kind.to_string()], - ) { - v.inc() - }; - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&previous_kind.to_string()], + // update the peer client kind metric if the peer is connected + if matches!( + peer_info.connection_status(), + PeerConnectionStatus::Connected { .. } + | PeerConnectionStatus::Disconnecting { .. } ) { - v.dec() - }; + metrics::inc_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&peer_info.client().kind.to_string()], + ); + metrics::dec_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&previous_kind.to_string()], + ); + } } } else { error!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string()); @@ -504,7 +518,13 @@ impl PeerManager { RPCError::Disconnected => return, // No penalty for a graceful disconnection }; - self.report_peer(peer_id, peer_action, ReportSource::RPC, None); + self.report_peer( + peer_id, + peer_action, + ReportSource::RPC, + None, + "handle_rpc_error", + ); } /// A ping request has been received. @@ -606,6 +626,46 @@ impl PeerManager { } } + // This function updates metrics for all connected peers. + fn update_connected_peer_metrics(&self) { + // Do nothing if we don't have metrics enabled. + if !self.metrics_enabled { + return; + } + + let mut connected_peer_count = 0; + let mut inbound_connected_peers = 0; + let mut outbound_connected_peers = 0; + let mut clients_per_peer = HashMap::new(); + + for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() { + connected_peer_count += 1; + if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() { + if *n_in > 0 { + inbound_connected_peers += 1; + } else { + outbound_connected_peers += 1; + } + } + *clients_per_peer + .entry(peer_info.client().kind.to_string()) + .or_default() += 1; + } + + metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count); + metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers); + metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers); + + for client_kind in ClientKind::iter() { + let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); + metrics::set_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&client_kind.to_string()], + *value as i64, + ); + } + } + /* Internal functions */ /// Sets a peer as connected as long as their reputation allows it @@ -705,22 +765,6 @@ impl PeerManager { // increment prometheus metrics metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); - - // Increment the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.inc() - }; - } true } @@ -802,6 +846,9 @@ impl PeerManager { self.handle_score_action(&peer_id, action, None); } + // Update peer score metrics; + self.update_peer_score_metrics(); + // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); @@ -840,6 +887,75 @@ impl PeerManager { self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); } } + + // Update metrics related to peer scoring. + fn update_peer_score_metrics(&self) { + if !self.metrics_enabled { + return; + } + // reset the gauges + let _ = metrics::PEER_SCORE_DISTRIBUTION + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::PEER_SCORE_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + + let mut avg_score_per_client: HashMap = HashMap::with_capacity(5); + { + let peers_db_read_lock = self.network_globals.peers.read(); + let connected_peers = peers_db_read_lock.best_peers_by_status(PeerInfo::is_connected); + let total_peers = connected_peers.len(); + for (id, (_peer, peer_info)) in connected_peers.into_iter().enumerate() { + // First quartile + if id == 0 { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1st"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers * 3 / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["3/4"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 2).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/2"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/4"], + peer_info.score().score() as i64, + ); + } else if id == total_peers.saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["last"], + peer_info.score().score() as i64, + ); + } + + let mut score_peers: &mut (f64, usize) = avg_score_per_client + .entry(peer_info.client().kind.to_string()) + .or_default(); + score_peers.0 += peer_info.score().score(); + score_peers.1 += 1; + } + } // read lock ended + + for (client, (score, peers)) in avg_score_per_client { + metrics::set_float_gauge_vec( + &metrics::PEER_SCORE_PER_CLIENT, + &[&client.to_string()], + score / (peers as f64), + ); + } + } } enum ConnectingType { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index a11f3739ea7..d194deffd4f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -111,8 +111,11 @@ impl NetworkBehaviour for PeerManager { endpoint: &ConnectedPoint, _failed_addresses: Option<&Vec>, ) { - // Log the connection debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint()); + // Check NAT if metrics are enabled + if self.network_globals.local_enr.read().udp().is_some() { + metrics::check_nat(); + } // Check to make sure the peer is not supposed to be banned match self.ban_status(peer_id) { @@ -150,10 +153,8 @@ impl NetworkBehaviour for PeerManager { return; } - // Register the newly connected peer (regardless if we are about to disconnect them). // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. - // let enr match endpoint { ConnectedPoint::Listener { send_back_addr, .. } => { self.inject_connect_ingoing(peer_id, send_back_addr.clone(), None); @@ -167,12 +168,9 @@ impl NetworkBehaviour for PeerManager { } } - let connected_peers = self.network_globals.connected_peers() as i64; - // increment prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_disconnected(&mut self, peer_id: &PeerId) { @@ -190,21 +188,6 @@ impl NetworkBehaviour for PeerManager { self.events .push(PeerManagerEvent::PeerDisconnected(*peer_id)); debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); - - // Decrement the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers - .read() - .peer_info(peer_id) - .map(|info| info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.dec() - }; - } } // NOTE: It may be the case that a rejected node, due to too many peers is disconnected @@ -212,12 +195,9 @@ impl NetworkBehaviour for PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(peer_id); - let connected_peers = self.network_globals.connected_peers() as i64; - // Update the prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_address_change( diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 81c03eaf751..cddff1218cd 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -490,7 +490,10 @@ impl PeerDB { peer_id: &PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, ) -> ScoreUpdateResult { + metrics::inc_counter_vec(&metrics::REPORT_PEER_MSGS, &[msg]); + match self.peers.get_mut(peer_id) { Some(info) => { let previous_state = info.score_state(); @@ -502,7 +505,13 @@ impl PeerDB { let result = Self::handle_score_transition(previous_state, peer_id, info, &self.log); if previous_state == info.score_state() { - debug!(self.log, "Peer score adjusted"; "peer_id" => %peer_id, "score" => %info.score()); + debug!( + self.log, + "Peer score adjusted"; + "msg" => %msg, + "peer_id" => %peer_id, + "score" => %info.score() + ); } match result { ScoreTransitionResult::Banned => { @@ -522,13 +531,23 @@ impl PeerDB { } ScoreTransitionResult::NoAction => ScoreUpdateResult::NoAction, ScoreTransitionResult::Unbanned => { - error!(self.log, "Report peer action lead to an unbanning"; "peer_id" => %peer_id); + error!( + self.log, + "Report peer action lead to an unbanning"; + "msg" => %msg, + "peer_id" => %peer_id + ); ScoreUpdateResult::NoAction } } } None => { - debug!(self.log, "Reporting a peer that doesn't exist"; "peer_id" =>%peer_id); + debug!( + self.log, + "Reporting a peer that doesn't exist"; + "msg" => %msg, + "peer_id" =>%peer_id + ); ScoreUpdateResult::NoAction } } @@ -590,28 +609,8 @@ impl PeerDB { /// A peer is being dialed. // VISIBILITY: Only the peer manager can adjust the connection state - // TODO: Remove the internal logic in favour of using the update_connection_state() function. - // This will be compatible once the ENR parameter is removed in the imminent behaviour tests PR. pub(super) fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option) { - let info = self.peers.entry(*peer_id).or_default(); - if let Some(enr) = enr { - info.set_enr(enr); - } - - if let Err(e) = info.set_dialing_peer() { - error!(self.log, "{}", e; "peer_id" => %peer_id); - } - - // If the peer was banned, remove the banned peer and addresses. - if info.is_banned() { - self.banned_peers_count - .remove_banned_peer(info.seen_ip_addresses()); - } - - // If the peer was disconnected, reduce the disconnected peer count. - if info.is_disconnected() { - self.disconnected_peers = self.disconnected_peers().count().saturating_sub(1); - } + self.update_connection_state(peer_id, NewConnectionState::Dialing { enr }); } /// Sets a peer as connected with an ingoing connection. @@ -667,7 +666,11 @@ impl PeerDB { // connection state for an unknown peer. if !matches!( new_state, - NewConnectionState::Connected { .. } | NewConnectionState::Disconnecting { .. } + NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) + | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before + | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer + | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); @@ -689,7 +692,11 @@ impl PeerDB { // Handle all the possible state changes match (info.connection_status().clone(), new_state) { - /* Handle the transition to a connected state */ + /* CONNECTED + * + * + * Handles the transition to a connected state + */ ( current_state, NewConnectionState::Connected { @@ -746,7 +753,47 @@ impl PeerDB { } } - /* Handle the transition to the disconnected state */ + /* DIALING + * + * + * Handles the transition to a dialing state + */ + (old_state, NewConnectionState::Dialing { enr }) => { + match old_state { + PeerConnectionStatus::Banned { .. } => { + warn!(self.log, "Dialing a banned peer"; "peer_id" => %peer_id); + self.banned_peers_count + .remove_banned_peer(info.seen_ip_addresses()); + } + PeerConnectionStatus::Disconnected { .. } => { + self.disconnected_peers = self.disconnected_peers.saturating_sub(1); + } + PeerConnectionStatus::Connected { .. } => { + warn!(self.log, "Dialing an already connected peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Dialing { .. } => { + warn!(self.log, "Dialing an already dialing peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Disconnecting { .. } => { + warn!(self.log, "Dialing a disconnecting peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Unknown => {} // default behaviour + } + // Update the ENR if one is known. + if let Some(enr) = enr { + info.set_enr(enr); + } + + if let Err(e) = info.set_dialing_peer() { + error!(self.log, "{}", e; "peer_id" => %peer_id); + } + } + + /* DISCONNECTED + * + * + * Handle the transition to the disconnected state + */ (old_state, NewConnectionState::Disconnected) => { // Remove all subnets for disconnected peers. info.clear_subnets(); @@ -780,7 +827,11 @@ impl PeerDB { } } - /* Handle the transition to the disconnecting state */ + /* DISCONNECTING + * + * + * Handles the transition to a disconnecting state + */ (PeerConnectionStatus::Banned { .. }, NewConnectionState::Disconnecting { to_ban }) => { error!(self.log, "Disconnecting from a banned peer"; "peer_id" => %peer_id); info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); @@ -802,7 +853,11 @@ impl PeerDB { info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); } - /* Handle transitioning to the banned state */ + /* BANNED + * + * + * Handles the transition to a banned state + */ (PeerConnectionStatus::Disconnected { .. }, NewConnectionState::Banned) => { // It is possible to ban a peer that is currently disconnected. This can occur when // there are many events that score it poorly and are processed after it has disconnected. @@ -860,7 +915,11 @@ impl PeerDB { return Some(BanOperation::ReadyToBan(banned_ips)); } - /* Handle the connection state of unbanning a peer */ + /* UNBANNED + * + * + * Handles the transition to an unbanned state + */ (old_state, NewConnectionState::Unbanned) => { if matches!(info.score_state(), ScoreState::Banned) { error!(self.log, "Unbanning a banned peer"; "peer_id" => %peer_id); @@ -880,8 +939,7 @@ impl PeerDB { // Increment the disconnected count and reduce the banned count self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); - self.disconnected_peers = - self.disconnected_peers().count().saturating_add(1); + self.disconnected_peers = self.disconnected_peers.saturating_add(1); } } } @@ -1040,6 +1098,11 @@ enum NewConnectionState { /// Whether the peer should be banned after the disconnect occurs. to_ban: bool, }, + /// We are dialing this peer. + Dialing { + /// An optional known ENR for the peer we are dialing. + enr: Option, + }, /// The peer has been disconnected from our local node. Disconnected, /// The peer has been banned and actions to shift the peer to the banned state should be @@ -1357,7 +1420,7 @@ mod tests { assert_eq!(pdb.banned_peers_count.banned_peers(), 0); for p in pdb.connected_peer_ids().cloned().collect::>() { - let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&p); } @@ -1426,9 +1489,19 @@ mod tests { pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); @@ -1481,7 +1554,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Disconnect and ban peer 2 - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // Should be 1 disconnected peer and one peer in the process of being disconnected println!( "3:{},{}", @@ -1495,7 +1573,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Now that the peer is disconnected, register the ban. - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be 1 disconnected peer and one banned peer. println!( "5:{},{}", @@ -1509,7 +1592,12 @@ mod tests { pdb.banned_peers().count() ); // Now ban peer 1. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be no disconnected peers and 2 banned peers println!( "6:{},{}", @@ -1523,7 +1611,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Same thing here. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); println!( "8:{},{}", pdb.disconnected_peers, pdb.banned_peers_count.banned_peers @@ -1559,7 +1652,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // This should add a new banned peer, there should be 0 disconnected and 2 banned @@ -1576,7 +1674,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should still have 2 banned peers @@ -1606,7 +1709,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should have 1 disconnect (peer 2) and one banned (peer 3) @@ -1657,7 +1765,12 @@ mod tests { ); // Ban peer 0 - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); // Should have 1 disconnect ( peer 2) and two banned (peer0, peer 3) @@ -1709,7 +1822,7 @@ mod tests { let p5 = connect_peer_with_ips(&mut pdb, vec![ip5]); for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1725,6 +1838,7 @@ mod tests { &peers[BANNED_PEERS_PER_IP_THRESHOLD + 1], PeerAction::Fatal, ReportSource::PeerManager, + "", ); pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); @@ -1777,7 +1891,7 @@ mod tests { // ban all peers for p in &peers { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1806,7 +1920,7 @@ mod tests { socker_addr.push(Protocol::Tcp(8080)); for p in &peers { pdb.connect_ingoing(p, socker_addr.clone(), None); - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1823,7 +1937,7 @@ mod tests { // reban every peer except one for p in &peers[1..] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1832,7 +1946,7 @@ mod tests { assert!(!pdb.ban_status(&p2).is_banned()); // reban last peer - let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 8f1738ac688..7cc84516a07 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -4,7 +4,7 @@ use libp2p::identify::IdentifyInfo; use serde::Serialize; -use strum::{AsRefStr, AsStaticStr}; +use strum::{AsRefStr, AsStaticStr, EnumIter}; /// Various client and protocol information related to a node. #[derive(Clone, Debug, Serialize)] @@ -21,7 +21,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr)] +#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr, EnumIter)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 3ff5dc04acf..941ca7e6c93 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -19,8 +19,6 @@ use PeerConnectionStatus::*; #[derive(Clone, Debug, Serialize)] #[serde(bound = "T: EthSpec")] pub struct PeerInfo { - /// The connection status of the peer - _status: PeerStatus, /// The peers reputation score: Score, /// Client managing this peer @@ -57,7 +55,6 @@ pub struct PeerInfo { impl Default for PeerInfo { fn default() -> PeerInfo { PeerInfo { - _status: Default::default(), score: Score::default(), client: Client::default(), connection_status: Default::default(), @@ -387,21 +384,6 @@ impl PeerInfo { } } -#[derive(Clone, Debug, Serialize)] -/// The current health status of the peer. -pub enum PeerStatus { - /// The peer is healthy. - Healthy, - /// The peer is clogged. It has not been responding to requests on time. - _Clogged, -} - -impl Default for PeerStatus { - fn default() -> Self { - PeerStatus::Healthy - } -} - /// Connection Direction of connection. #[derive(Debug, Clone, Serialize, AsRefStr)] #[strum(serialize_all = "snake_case")] diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 60252385d99..0ccdd28fdff 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -1,6 +1,7 @@ use crate::behaviour::{ save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, }; +use crate::config::NetworkLoad; use crate::discovery::enr; use crate::multiaddr::Protocol; use crate::rpc::{ @@ -20,6 +21,7 @@ use libp2p::{ swarm::{SwarmBuilder, SwarmEvent}, PeerId, Swarm, Transport, }; +use open_metrics_client::registry::Registry; use slog::{crit, debug, info, o, trace, warn, Logger}; use ssz::Decode; use std::fs::File; @@ -62,27 +64,34 @@ pub struct Service { pub log: Logger, } +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + impl Service { pub async fn new( executor: task_executor::TaskExecutor, - config: &NetworkConfig, - enr_fork_id: EnrForkId, + ctx: Context<'_>, log: &Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result<(Arc>, Self)> { let log = log.new(o!("service"=> "libp2p")); trace!(log, "Libp2p Service starting"); + let config = ctx.config; // initialise the node's ID let local_keypair = load_private_key(config, &log); // Create an ENR or load from disk if appropriate let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, enr_fork_id, &log)?; + enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; let local_peer_id = enr.peer_id(); + // Construct the metadata let meta_data = load_or_build_metadata(&config.network_dir, &log); // set up a collection of variables accessible outside of the network crate @@ -99,7 +108,7 @@ impl Service { &log, )); - info!(log, "Libp2p Service"; "peer_id" => %enr.peer_id()); + info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); let discovery_string = if config.disable_discovery { "None".into() } else { @@ -113,15 +122,8 @@ impl Service { .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour - let behaviour = Behaviour::new( - &local_keypair, - config.clone(), - network_globals.clone(), - &log, - fork_context, - chain_spec, - ) - .await?; + let behaviour = + Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; // use the executor for libp2p struct Executor(task_executor::TaskExecutor); @@ -279,11 +281,17 @@ impl Service { } /// Report a peer's action. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { self.swarm .behaviour_mut() .peer_manager_mut() - .report_peer(peer_id, action, source, None); + .report_peer(peer_id, action, source, None, msg); } /// Disconnect and ban a peer, providing a reason. diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 520921e87b9..7deb2108b07 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -29,7 +29,7 @@ pub fn fork_context() -> ForkContext { // Set fork_epoch to `Some` to ensure that the `ForkContext` object // includes altair in the list of forks chain_spec.altair_fork_epoch = Some(types::Epoch::new(42)); - chain_spec.merge_fork_epoch = Some(types::Epoch::new(84)); + chain_spec.bellatrix_fork_epoch = Some(types::Epoch::new(84)); ForkContext::new::(types::Slot::new(0), Hash256::zero(), &chain_spec) } @@ -128,19 +128,18 @@ pub async fn build_libp2p_instance( let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); - let fork_context = Arc::new(fork_context()); + let libp2p_context = lighthouse_network::Context { + config: &config, + enr_fork_id: EnrForkId::default(), + fork_context: Arc::new(fork_context()), + chain_spec: &ChainSpec::minimal(), + gossipsub_registry: None, + }; Libp2pInstance( - LibP2PService::new( - executor, - &config, - EnrForkId::default(), - &log, - fork_context, - &ChainSpec::minimal(), - ) - .await - .expect("should build libp2p instance") - .1, + LibP2PService::new(executor, libp2p_context, &log) + .await + .expect("should build libp2p instance") + .1, signal, ) } diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index 96f91797ad3..9b26e4939fa 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -167,7 +167,8 @@ async fn banned_peers_consistency() { &peer_id, PeerAction::Fatal, ReportSource::Processor, - None + None, + "" ); }, _ => {} diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index d18c96c0a73..9ece18d02cf 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -2,9 +2,9 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ - attestation_verification::{Error as AttnError, VerifiedAttestation}, + attestation_verification::{self, Error as AttnError, VerifiedAttestation}, observed_operations::ObservationOutcome, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, GossipVerifiedBlock, @@ -19,7 +19,7 @@ use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncCommitteeMessage, SyncSubnetId, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -100,12 +100,7 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - match self { - FailedAtt::Unaggregate { attestation, .. } => &attestation.data.beacon_block_root, - FailedAtt::Aggregate { attestation, .. } => { - &attestation.message.aggregate.data.beacon_block_root - } - } + &self.attestation().data.beacon_block_root } pub fn kind(&self) -> &'static str { @@ -114,6 +109,13 @@ impl FailedAtt { FailedAtt::Aggregate { .. } => "aggregated", } } + + pub fn attestation(&self) -> &Attestation { + match self { + FailedAtt::Unaggregate { attestation, .. } => attestation, + FailedAtt::Aggregate { attestation, .. } => &attestation.message.aggregate, + } + } } /// Items required to verify a batch of unaggregated gossip attestations. @@ -178,11 +180,12 @@ impl Worker { /* Auxiliary functions */ /// Penalizes a peer for misbehaviour. - fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction) { + fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { self.send_network_message(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::Gossipsub, + msg, }) } @@ -410,6 +413,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -608,6 +612,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -734,16 +739,24 @@ impl Worker { self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); return None; } + Err(e @ BlockError::BeaconChainError(_)) => { + debug!( + self.log, + "Gossip block beacon chain error"; + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) - | Err(e @ BlockError::NotFinalizedDescendant { .. }) - | Err(e @ BlockError::BeaconChainError(_)) => { + | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError, "gossip_block_high"); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } @@ -776,7 +789,7 @@ impl Worker { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError, "gossip_block_low"); return None; } }; @@ -927,7 +940,11 @@ impl Worker { "block root" => ?block.canonical_root(), "block slot" => block.slot() ); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_block_ssz", + ); trace!( self.log, "Invalid gossip beacon block ssz"; @@ -969,7 +986,11 @@ impl Worker { // the fault on the peer. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // We still penalize a peer slightly to prevent overuse of invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_exit", + ); return; } }; @@ -1028,7 +1049,11 @@ impl Worker { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_proposer_slashing", + ); return; } }; @@ -1079,7 +1104,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_attester_slashing", + ); return; } }; @@ -1117,6 +1146,7 @@ impl Worker { subnet_id: SyncSubnetId, seen_timestamp: Duration, ) { + let message_slot = sync_signature.slot; let sync_signature = match self .chain .verify_sync_committee_message_for_gossip(sync_signature, subnet_id) @@ -1128,6 +1158,8 @@ impl Worker { message_id, "sync_signature", e, + message_slot, + seen_timestamp, ); return; } @@ -1177,6 +1209,7 @@ impl Worker { sync_contribution: SignedContributionAndProof, seen_timestamp: Duration, ) { + let contribution_slot = sync_contribution.message.contribution.slot; let sync_contribution = match self .chain .verify_sync_contribution_for_gossip(sync_contribution) @@ -1189,6 +1222,8 @@ impl Worker { message_id, "sync_contribution", e, + contribution_slot, + seen_timestamp, ); return; } @@ -1232,15 +1267,13 @@ impl Worker { failed_att: FailedAtt, reprocess_tx: Option>>, error: AttnError, + seen_timestamp: Duration, ) { let beacon_block_root = failed_att.beacon_block_root(); let attestation_type = failed_att.kind(); metrics::register_attestation_error(&error); match &error { - AttnError::FutureEpoch { .. } - | AttnError::PastEpoch { .. } - | AttnError::FutureSlot { .. } - | AttnError::PastSlot { .. } => { + AttnError::FutureSlot { .. } => { /* * These errors can be triggered by a mismatch between our slot and the peer. * @@ -1257,11 +1290,37 @@ impl Worker { // Peers that are slow or not to spec can spam us with these messages draining our // bandwidth. We therefore penalize these peers when they do this. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } + AttnError::PastSlot { .. } => { + // Produce a slot clock frozen at the time we received the message from the + // network. + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + attestation_verification::verify_propagation_slot_range( + seen_clock, + failed_att.attestation(), + ); + + // Only penalize the peer if it would have been invalid at the moment we received + // it. + if hindsight_verification.is_err() { + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_past_slot", + ); + } + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => { /* * These errors are caused by invalid signatures. @@ -1269,7 +1328,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_selection_proof", + ); } AttnError::EmptyAggregationBitfield => { /* @@ -1279,7 +1342,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_empty_agg_bitfield", + ); } AttnError::AggregatorPubkeyUnknown(_) => { /* @@ -1296,7 +1363,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_pubkey", + ); } AttnError::AggregatorNotInCommittee { .. } => { /* @@ -1313,7 +1384,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_not_in_committee", + ); } AttnError::AttestationAlreadyKnown { .. } => { /* @@ -1389,7 +1464,11 @@ impl Worker { "type" => ?attestation_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_val_index_too_high", + ); } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( @@ -1453,8 +1532,9 @@ impl Worker { } } else { // We shouldn't make any further attempts to process this attestation. - // Downscore the peer. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. self.propagate_validation_result( message_id, peer_id, @@ -1482,7 +1562,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_unknown_target", + ); } AttnError::BadTargetEpoch => { /* @@ -1492,7 +1576,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_bad_target", + ); } AttnError::NoCommitteeForSlotAndIndex { .. } => { /* @@ -1501,7 +1589,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_no_committee", + ); } AttnError::NotExactlyOneAggregationBitSet(_) => { /* @@ -1510,7 +1602,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_too_many_agg_bits", + ); } AttnError::AttestsToFutureBlock { .. } => { /* @@ -1519,7 +1615,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_block", + ); } AttnError::InvalidSubnetId { received, expected } => { /* @@ -1532,7 +1632,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_subnet_id", + ); } AttnError::Invalid(_) => { /* @@ -1541,7 +1645,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_state_processing", + ); } AttnError::InvalidTargetEpoch { .. } => { /* @@ -1550,7 +1658,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_epoch", + ); } AttnError::InvalidTargetRoot { .. } => { /* @@ -1559,7 +1671,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_root", + ); } AttnError::TooManySkippedSlots { head_block_slot, @@ -1579,7 +1695,11 @@ impl Worker { // In this case we wish to penalize gossipsub peers that do this to avoid future // attestations that have too many skip slots. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_too_many_skipped_slots", + ); } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( HotColdDBError::AttestationStateIsFinalized { .. }, @@ -1602,8 +1722,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } } @@ -1625,6 +1743,8 @@ impl Worker { message_id: MessageId, message_type: &str, error: SyncCommitteeError, + sync_committee_message_slot: Slot, + seen_timestamp: Duration, ) { metrics::register_sync_committee_error(&error); @@ -1645,15 +1765,16 @@ impl Worker { // Unlike attestations, we have a zero slot buffer in case of sync committee messages, // so we don't penalize heavily. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - SyncCommitteeError::PastSlot { - message_slot, - earliest_permissible_slot, - } => { + SyncCommitteeError::PastSlot { .. } => { /* * This error can be triggered by a mismatch between our slot and the peer. * @@ -1667,12 +1788,38 @@ impl Worker { "type" => ?message_type, ); - // We tolerate messages that were just one slot late. - if *message_slot + 1 < *earliest_permissible_slot { - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + // Compute the slot when we received the message. + let received_slot = self + .chain + .slot_clock + .slot_of(seen_timestamp) + .unwrap_or_else(|| self.chain.slot_clock.genesis_slot()); + + // The message is "excessively" late if it was more than one slot late. + let excessively_late = received_slot > sync_committee_message_slot + 1; + + // This closure will lazily produce a slot clock frozen at the time we received the + // message from the network and return a bool indicating if the message was invalid + // at the time of receipt too. + let invalid_in_hindsight = || { + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + sync_committee_verification::verify_propagation_slot_range( + seen_clock, + &sync_committee_message_slot, + ); + hindsight_verification.is_err() + }; + + // Penalize the peer if the message was more than one slot late + if excessively_late && invalid_in_hindsight() { + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_past_slot", + ); } - // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } SyncCommitteeError::EmptyAggregationBitfield => { @@ -1683,7 +1830,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_empty_agg_bitfield", + ); } SyncCommitteeError::InvalidSelectionProof { .. } | SyncCommitteeError::InvalidSignature => { @@ -1693,7 +1844,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_proof_or_sig", + ); } SyncCommitteeError::AggregatorNotInCommittee { .. } | SyncCommitteeError::AggregatorPubkeyUnknown(_) => { @@ -1704,7 +1859,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_bad_aggregator", + ); } SyncCommitteeError::SyncContributionAlreadyKnown(_) | SyncCommitteeError::AggregatorAlreadyKnown(_) => { @@ -1737,7 +1896,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator", + ); } SyncCommitteeError::UnknownValidatorPubkey(_) => { debug!( @@ -1747,7 +1910,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator_pubkey", + ); } SyncCommitteeError::InvalidSubnetId { received, expected } => { /* @@ -1760,7 +1927,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subnet_id", + ); } SyncCommitteeError::Invalid(_) => { /* @@ -1769,7 +1940,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_state_processing", + ); } SyncCommitteeError::PriorSyncCommitteeMessageKnown { .. } => { /* @@ -1785,7 +1960,11 @@ impl Worker { ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_prior_known", + ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1806,8 +1985,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } SyncCommitteeError::BeaconStateError(e) => { /* @@ -1825,7 +2002,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_beacon_state_error", + ); } SyncCommitteeError::ContributionError(e) => { error!( @@ -1836,7 +2017,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_contribution_error", + ); } SyncCommitteeError::SyncCommitteeError(e) => { error!( @@ -1847,7 +2032,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_committee_error", + ); } SyncCommitteeError::ArithError(e) => { /* @@ -1860,7 +2049,11 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_arith_error", + ); } SyncCommitteeError::InvalidSubcommittee { .. } => { /* @@ -1868,7 +2061,11 @@ impl Worker { an invalid message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subcommittee", + ); } } debug!( diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index f3d49c2b425..f79a655745f 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -129,7 +129,7 @@ impl Worker { ) { let mut send_block_count = 0; for root in request.block_roots.iter() { - if let Ok(Some(block)) = self.chain.store.get_block(root) { + if let Ok(Some(block)) = self.chain.get_block_checking_early_attester_cache(root) { self.send_response( peer_id, Response::BlocksByRoot(Some(Box::new(block))), diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 6a75c2990a3..27e0a6711d0 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,7 +7,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; -use lighthouse_network::PeerId; +use lighthouse_network::{PeerAction, PeerId}; use slog::{crit, debug, error, info, trace, warn}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -23,6 +23,14 @@ pub enum ProcessId { ParentLookup(PeerId, Hash256), } +/// Returned when a chain segment import fails. +struct ChainSegmentFailed { + /// To be displayed in logs. + message: String, + /// Used to penalize peers. + peer_action: Option, +} + impl Worker { /// Attempt to process a block received from a direct RPC request, returning the processing /// result on the `result_tx` channel. @@ -123,9 +131,13 @@ impl Worker { "chain" => chain_id, "last_block_slot" => end_slot, "imported_blocks" => imported_blocks, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(imported_blocks > 0) + + BatchProcessResult::Failed { + imported_blocks: imported_blocks > 0, + peer_action: e.peer_action, + } } }; @@ -154,9 +166,12 @@ impl Worker { "batch_epoch" => epoch, "first_block_slot" => start_slot, "last_block_slot" => end_slot, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(false) + BatchProcessResult::Failed { + imported_blocks: false, + peer_action: e.peer_action, + } } }; @@ -175,7 +190,7 @@ impl Worker { // reverse match self.process_blocks(downloaded_blocks.iter().rev()) { (_, Err(e)) => { - debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => e); + debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => %e.message); self.send_sync_message(SyncMessage::ParentLookupFailed { peer_id, chain_head, @@ -193,7 +208,7 @@ impl Worker { fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator>, - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks = downloaded_blocks.cloned().collect::>(); match self.chain.process_chain_segment(blocks) { ChainSegmentResult::Successful { imported_blocks } => { @@ -223,7 +238,7 @@ impl Worker { fn process_backfill_blocks( &self, blocks: &[SignedBeaconBlock], - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { match self.chain.import_historical_block_batch(blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -250,7 +265,12 @@ impl Worker { "block_root" => ?block_root, "expected_root" => ?expected_block_root ); - String::from("mismatched_block_root") + + ChainSegmentFailed { + message: String::from("mismatched_block_root"), + // The peer is faulty if they send blocks with bad roots. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::InvalidSignature | HistoricalBlockError::SignatureSet(_) => { @@ -259,7 +279,12 @@ impl Worker { "Backfill batch processing error"; "error" => ?e ); - "invalid_signature".into() + + ChainSegmentFailed { + message: "invalid_signature".into(), + // The peer is faulty if they bad signatures. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( @@ -267,25 +292,55 @@ impl Worker { "Backfill batch processing error"; "error" => "pubkey_cache_timeout" ); - "pubkey_cache_timeout".into() + + ChainSegmentFailed { + message: "pubkey_cache_timeout".into(), + // This is an internal error, do not penalize the peer. + peer_action: None, + } } HistoricalBlockError::NoAnchorInfo => { warn!(self.log, "Backfill not required"); - String::from("no_anchor_info") + + ChainSegmentFailed { + message: String::from("no_anchor_info"), + // There is no need to do a historical sync, this is not a fault of + // the peer. + peer_action: None, + } } - HistoricalBlockError::IndexOutOfBounds - | HistoricalBlockError::BlockOutOfRange { .. } => { + HistoricalBlockError::IndexOutOfBounds => { error!( self.log, - "Backfill batch processing error"; + "Backfill batch OOB error"; "error" => ?e, ); - String::from("logic_error") + ChainSegmentFailed { + message: String::from("logic_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } + } + HistoricalBlockError::BlockOutOfRange { .. } => { + error!( + self.log, + "Backfill batch error"; + "error" => ?e, + ); + ChainSegmentFailed { + message: String::from("unexpected_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } } }, other => { warn!(self.log, "Backfill batch processing error"; "error" => ?other); - format!("{:?}", other) + ChainSegmentFailed { + message: format!("{:?}", other), + // This is an internal error, don't penalize the peer. + peer_action: None, + } } }; (0, Err(err)) @@ -312,15 +367,18 @@ impl Worker { } /// Helper function to handle a `BlockError` from `process_chain_segment` - fn handle_failed_chain_segment(&self, error: BlockError) -> Result<(), String> { + fn handle_failed_chain_segment( + &self, + error: BlockError, + ) -> Result<(), ChainSegmentFailed> { match error { BlockError::ParentUnknown(block) => { // blocks should be sequential and all parents should exist - - Err(format!( - "Block has an unknown parent: {}", - block.parent_root() - )) + Err(ChainSegmentFailed { + message: format!("Block has an unknown parent: {}", block.parent_root()), + // Peers are faulty if they send non-sequential blocks. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::BlockIsAlreadyKnown => { // This can happen for many reasons. Head sync's can download multiples and parent @@ -350,10 +408,14 @@ impl Worker { ); } - Err(format!( - "Block with slot {} is higher than the current slot {}", - block_slot, present_slot - )) + Err(ChainSegmentFailed { + message: format!( + "Block with slot {} is higher than the current slot {}", + block_slot, present_slot + ), + // Peers are faulty if they send blocks from the future. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::WouldRevertFinalizedSlot { .. } => { debug!(self.log, "Finalized or earlier block processed";); @@ -370,7 +432,11 @@ impl Worker { "outcome" => ?e, ); - Err(format!("Internal error whilst processing block: {:?}", e)) + Err(ChainSegmentFailed { + message: format!("Internal error whilst processing block: {:?}", e), + // Do not penalize peers for internal errors. + peer_action: None, + }) } other => { debug!( @@ -379,7 +445,11 @@ impl Worker { "outcome" => %other, ); - Err(format!("Peer sent invalid block. Reason: {:?}", other)) + Err(ChainSegmentFailed { + message: format!("Peer sent invalid block. Reason: {:?}", other), + // Do not penalize peers for internal errors. + peer_action: None, + }) } } } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 35c5b4dce14..a10d238764b 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -4,216 +4,42 @@ use beacon_chain::{ }; use fnv::FnvHashMap; pub use lighthouse_metrics::*; -use lighthouse_network::PubsubMessage; use lighthouse_network::{ - types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, + types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, }; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use strum::AsStaticRef; -use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, subnet_id::subnet_id_to_string, - sync_subnet_id::sync_subnet_id_to_string, EthSpec, -}; +use types::EthSpec; lazy_static! { - /* - * Gossip subnets and scoring - */ - pub static ref PEERS_PER_PROTOCOL: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_protocol", - "Peers via supported protocol", - &["protocol"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_attestation_subnets", - "Attestation subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_sync_subnets", - "Sync subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_attestation_subnet_topic_count", - "Peers subscribed per attestation subnet topic", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_sync_subnet_topic_count", - "Peers subscribed per sync subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_main_topic", - "Mesh peers per main topic", - &["topic_hash"] - ); - - pub static ref MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_topic", - "Average peer's score per topic", - &["topic_hash"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_attestation_subnet_topic", - "Average peer's score per attestation subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_sync_subnet_topic", - "Average peer's score per sync committee subnet topic", - &["subnet"] - ); - - pub static ref ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT: Result = try_create_int_counter_vec( - "gossipsub_attestations_published_per_subnet_per_slot", - "Failed attestation publishes per subnet", - &["subnet"] - ); - - pub static ref SCORES_BELOW_ZERO_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_zero_per_client", - "Relative number of scores below zero per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_gossip_threshold_per_client", - "Relative number of scores below gossip threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_publish_threshold_per_client", - "Relative number of scores below publish threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_greylist_threshold_per_client", - "Relative number of scores below greylist threshold per client", + pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = + try_create_int_gauge_vec( + "block_mesh_peers_per_client", + "Number of mesh peers for BeaconBlock topic per client", &["Client"] ); - pub static ref MIN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_min_scores_per_client", - "Minimum scores per client", - &["Client"] - ); - pub static ref MEDIAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_median_scores_per_client", - "Median scores per client", - &["Client"] - ); - pub static ref MEAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_mean_scores_per_client", - "Mean scores per client", - &["Client"] - ); - pub static ref MAX_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_max_scores_per_client", - "Max scores per client", - &["Client"] - ); - pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = - try_create_int_gauge_vec( - "block_mesh_peers_per_client", - "Number of mesh peers for BeaconBlock topic per client", - &["Client"] - ); pub static ref BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "beacon_aggregate_and_proof_mesh_peers_per_client", "Number of mesh peers for BeaconAggregateAndProof topic per client", &["Client"] ); -} - -lazy_static! { - /* - * Gossip Rx - */ - pub static ref GOSSIP_BLOCKS_RX: Result = try_create_int_counter( - "gossipsub_blocks_rx_total", - "Count of gossip blocks received" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_rx_total", - "Count of gossip unaggregated attestations received" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_rx_total", - "Count of gossip aggregated attestations received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_rx_total", - "Count of gossip sync committee messages received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_received_total", - "Count of gossip sync committee contributions received" - ); - - - /* - * Gossip Tx - */ - pub static ref GOSSIP_BLOCKS_TX: Result = try_create_int_counter( - "gossipsub_blocks_tx_total", - "Count of gossip blocks transmitted" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_tx_total", - "Count of gossip unaggregated attestations transmitted" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_tx_total", - "Count of gossip aggregated attestations transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_tx_total", - "Count of gossip sync committee messages transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_tx_total", - "Count of gossip sync committee contributions transmitted" - ); /* * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_attestation_subnet_subscriptions_total", + "validator_attestation_subnet_subscriptions_total", "Count of validator attestation subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( - "gossipsub_subnet_subscriptions_aggregator_total", + "validator_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); - - /* - * Sync committee subnet subscriptions - */ - pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_sync_committee_subnet_subscriptions_total", + pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( + "validator_sync_committee_subnet_subscriptions_total", "Count of validator sync committee subscription requests." ); @@ -406,14 +232,13 @@ lazy_static! { "beacon_processor_sync_contribution_verified_total", "Total number of sync committee contributions verified for gossip." ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_contribution_imported_total", "Total number of sync committee contributions imported to fork choice, etc." ); -} - -lazy_static! { + /// Errors and Debugging Stats pub static ref GOSSIP_ATTESTATION_ERRORS_PER_TYPE: Result = try_create_int_counter_vec( "gossipsub_attestation_errors_per_type", @@ -426,8 +251,16 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); +} + +lazy_static! { + + /* + * Bandwidth metrics + */ pub static ref INBOUND_LIBP2P_BYTES: Result = try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); + pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( "libp2p_outbound_bytes", "The outbound bandwidth over libp2p" @@ -436,18 +269,8 @@ lazy_static! { "libp2p_total_bandwidth", "The total inbound/outbound bandwidth over libp2p" ); -} -pub fn update_bandwidth_metrics(bandwidth: Arc) { - set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); - set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); - set_gauge( - &TOTAL_LIBP2P_BANDWIDTH, - (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, - ); -} -lazy_static! { /* * Sync related metrics */ @@ -489,11 +312,21 @@ lazy_static! { ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found" + "Number of queued attestations which have expired before a matching block has been found." ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported" + "Number of queued attestations where as matching block has been imported." + ); + +} + +pub fn update_bandwidth_metrics(bandwidth: Arc) { + set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); + set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); + set_gauge( + &TOTAL_LIBP2P_BANDWIDTH, + (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, ); } @@ -505,402 +338,51 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } -/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. -pub fn expose_publish_metrics(messages: &[PubsubMessage]) { - for message in messages { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_TX), - PubsubMessage::Attestation(subnet_id) => { - inc_counter_vec( - &ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, - &[subnet_id.0.as_ref()], - ); - inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::SyncCommitteeMessage(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_TX) - } - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX) - } - _ => {} - } - } -} - -/// Inspects a `message` received from the network and updates Prometheus metrics. -pub fn expose_receive_metrics(message: &PubsubMessage) { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_RX), - PubsubMessage::Attestation(_) => inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_RX), - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) - } - PubsubMessage::SyncCommitteeMessage(_) => inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_RX), - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX) - } - _ => {} - } -} - pub fn update_gossip_metrics( gossipsub: &Gossipsub, network_globals: &Arc>, ) { - // Clear the metrics - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = MESH_PEERS_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - - let _ = SCORES_BELOW_ZERO_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = MIN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEDIAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MAX_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - - let _ = BEACON_BLOCK_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - - // reset the mesh peers, showing all subnets - for subnet_id in 0..T::default_spec().attestation_subnet_count { - let _ = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - for subnet_id in 0..SYNC_COMMITTEE_SUBNET_COUNT { - let _ = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - // Subnet topics subscribed to + // Mesh peers per client for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - if let GossipKind::Attestation(subnet_id) = topic.kind() { - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) - .map(|v| v.set(1)); - } - } - } - - // Peers per subscribed subnet - let mut peers_per_topic: HashMap = HashMap::new(); - for (peer_id, topics) in gossipsub.all_peers() { - for topic_hash in topics { - *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; - - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { + match topic.kind() { + GossipKind::Attestation(_subnet_id) => {} + GossipKind::BeaconBlock => { + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); + if let Some(v) = + get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) + { v.inc() }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } } - GossipKind::SyncCommitteeMessage(subnet_id) => { + } + GossipKind::BeaconAggregateAndProof => { + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], + &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, + &[client], ) { v.inc() }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } - } - kind => { - // main topics - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.add(score) - }; - } } } + GossipKind::SyncCommitteeMessage(_subnet_id) => {} + _kind => {} } } } - // adjust to average scores by dividing by number of peers - for (topic_hash, peers) in peers_per_topic.iter() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - kind => { - // main topics - if let Some(v) = - get_gauge(&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, &[kind.as_ref()]) - { - v.set(v.get() / (*peers as f64)) - }; - } - } - } - } - - // mesh peers - for topic_hash in gossipsub.topics() { - let peers = gossipsub.mesh_peers(topic_hash).count(); - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - kind => { - // main topics - if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { - v.set(peers as i64) - }; - } - } - } - } - - // protocol peers - let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new(); - for (_peer, protocol) in gossipsub.peer_protocol() { - *peers_per_protocol - .entry(protocol.as_static_ref()) - .or_default() += 1; - } - - for (protocol, peers) in peers_per_protocol.iter() { - if let Some(v) = get_int_gauge(&PEERS_PER_PROTOCOL, &[protocol]) { - v.set(*peers) - }; - } - - let mut peer_to_client = HashMap::new(); - let mut scores_per_client: HashMap<&'static str, Vec> = HashMap::new(); - { - let peers = network_globals.peers.read(); - for (peer_id, _) in gossipsub.all_peers() { - let client = peers - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.as_static()) - .unwrap_or_else(|| "Unknown"); - - peer_to_client.insert(peer_id, client); - let score = gossipsub.peer_score(peer_id).unwrap_or(0.0); - scores_per_client.entry(client).or_default().push(score); - } - } - - // mesh peers per client - for topic_hash in gossipsub.topics() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::BeaconBlock => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = - get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) - { - v.inc() - }; - } - } - } - GossipKind::BeaconAggregateAndProof => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = get_int_gauge( - &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, - &[client], - ) { - v.inc() - }; - } - } - } - _ => (), - } - } - } - - for (client, scores) in scores_per_client.into_iter() { - let c = &[client]; - let len = scores.len(); - if len > 0 { - let mut below0 = 0; - let mut below_gossip_threshold = 0; - let mut below_publish_threshold = 0; - let mut below_greylist_threshold = 0; - let mut min = f64::INFINITY; - let mut sum = 0.0; - let mut max = f64::NEG_INFINITY; - - let count = scores.len() as f64; - - for &score in &scores { - if score < 0.0 { - below0 += 1; - } - if score < -4000.0 { - //TODO not hardcode - below_gossip_threshold += 1; - } - if score < -8000.0 { - //TODO not hardcode - below_publish_threshold += 1; - } - if score < -16000.0 { - //TODO not hardcode - below_greylist_threshold += 1; - } - if score < min { - min = score; - } - if score > max { - max = score; - } - sum += score; - } - - let median = if len == 0 { - 0.0 - } else if len % 2 == 0 { - (scores[len / 2 - 1] + scores[len / 2]) / 2.0 - } else { - scores[len / 2] - }; - - set_gauge_entry(&SCORES_BELOW_ZERO_PER_CLIENT, c, below0 as f64 / count); - set_gauge_entry( - &SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT, - c, - below_gossip_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT, - c, - below_publish_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT, - c, - below_greylist_threshold as f64 / count, - ); - - set_gauge_entry(&MIN_SCORES_PER_CLIENT, c, min); - set_gauge_entry(&MEDIAN_SCORES_PER_CLIENT, c, median); - set_gauge_entry(&MEAN_SCORES_PER_CLIENT, c, sum / count); - set_gauge_entry(&MAX_SCORES_PER_CLIENT, c, max); - } - } } pub fn update_sync_metrics(network_globals: &Arc>) { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ce8aca47250..c6f68d5faa6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,17 +7,21 @@ use crate::{ NetworkConfig, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; +use lighthouse_network::{ + open_metrics_client::registry::Registry, MessageAcceptance, Service as LibP2PService, +}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, + Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, BehaviourEvent, MessageId, NetworkGlobals, PeerId, }; -use lighthouse_network::{MessageAcceptance, Service as LibP2PService}; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; @@ -32,7 +36,7 @@ use types::{ mod tests; /// The interval (in seconds) that various network metrics will update. -const METRIC_UPDATE_INTERVAL: u64 = 1; +const METRIC_UPDATE_INTERVAL: u64 = 5; /// Number of slots before the fork when we should subscribe to the new fork topics. const SUBSCRIBE_DELAY_SLOTS: u64 = 2; /// Delay after a fork where we unsubscribe from pre-fork topics. @@ -93,6 +97,7 @@ pub enum NetworkMessage { peer_id: PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, }, /// Disconnect an ban a peer, providing a reason. GoodbyePeer { @@ -154,6 +159,7 @@ impl NetworkService { beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, ) -> error::Result<( Arc>, mpsc::UnboundedSender>, @@ -199,16 +205,18 @@ impl NetworkService { debug!(network_log, "Current fork"; "fork_name" => ?fork_context.current_fork()); - // launch libp2p service - let (network_globals, mut libp2p) = LibP2PService::new( - executor.clone(), + // construct the libp2p service context + let service_context = Context { config, enr_fork_id, - &network_log, - fork_context.clone(), - &beacon_chain.spec, - ) - .await?; + fork_context: fork_context.clone(), + chain_spec: &beacon_chain.spec, + gossipsub_registry, + }; + + // launch libp2p service + let (network_globals, mut libp2p) = + LibP2PService::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -272,7 +280,7 @@ impl NetworkService { log: network_log, }; - spawn_service(executor, network_service); + network_service.spawn_service(executor); Ok((network_globals, network_send)) } @@ -313,440 +321,531 @@ impl NetworkService { result } -} -fn spawn_service( - executor: task_executor::TaskExecutor, - mut service: NetworkService, -) { - let mut shutdown_sender = executor.shutdown_sender(); - - // spawn on the current executor - executor.spawn(async move { - - let mut metric_update_counter = 0; - loop { - // build the futures to check simultaneously - tokio::select! { - _ = service.metrics_update.tick(), if service.metrics_enabled => { - // update various network metrics - metric_update_counter +=1; - if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 { - // if a slot has occurred, reset the metrics - let _ = metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT - .as_ref() - .map(|gauge| gauge.reset()); + fn send_to_router(&mut self, msg: RouterMessage) { + if let Err(mpsc::error::SendError(msg)) = self.router_send.send(msg) { + debug!(self.log, "Failed to send msg to router"; "msg" => ?msg); + } + } + + fn spawn_service(mut self, executor: task_executor::TaskExecutor) { + let mut shutdown_sender = executor.shutdown_sender(); + + // spawn on the current executor + let service_fut = async move { + loop { + tokio::select! { + _ = self.metrics_update.tick(), if self.metrics_enabled => { + // update various network metrics + metrics::update_gossip_metrics::( + self.libp2p.swarm.behaviour().gs(), + &self.network_globals, + ); + // update sync metrics + metrics::update_sync_metrics(&self.network_globals); } - metrics::update_gossip_metrics::( - service.libp2p.swarm.behaviour_mut().gs(), - &service.network_globals, - ); - // update sync metrics - metrics::update_sync_metrics(&service.network_globals); - } - _ = service.gossipsub_parameter_update.tick() => { - if let Ok(slot) = service.beacon_chain.slot() { - if let Some(active_validators) = service.beacon_chain.with_head(|head| { - Ok::<_, BeaconChainError>( - head - .beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - service - .beacon_chain - .epoch() - .ok() - .map(|current_epoch| { - head - .beacon_state - .validators() - .iter() - .filter(|validator| - validator.is_active_at(current_epoch) - ) - .count() - }) - }) - ) - }).unwrap_or(None) { - if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() { - error!( - service.log, - "Failed to update gossipsub parameters"; - "active_validators" => active_validators - ); - } + _ = self.gossipsub_parameter_update.tick() => self.update_gossipsub_parameters(), + + // handle a message sent to the network + Some(msg) = self.network_recv.recv() => self.on_network_msg(msg, &mut shutdown_sender).await, + + // process any attestation service events + Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), + + // process any sync committee service events + Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + + event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, + + Some(_) = &mut self.next_fork_update => self.update_next_fork(), + + Some(_) = &mut self.next_unsubscribe => { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + info!(self.log, "Unsubscribed from old fork topics"); + self.next_unsubscribe = Box::pin(None.into()); + } + + Some(_) = &mut self.next_fork_subscriptions => { + if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { + let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); + let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); + info!(self.log, "Subscribing to new fork topics"); + self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.next_fork_subscriptions = Box::pin(None.into()); + } + else { + error!(self.log, "Fork subscription scheduled but no fork scheduled"); } } } - // handle a message sent to the network - Some(message) = service.network_recv.recv() => { + metrics::update_bandwidth_metrics(self.libp2p.bandwidth.clone()); + } + }; + executor.spawn(service_fut, "network"); + } + + /// Handle an event received from the network. + async fn on_libp2p_event( + &mut self, + ev: Libp2pEvent, + shutdown_sender: &mut Sender, + ) { + match ev { + Libp2pEvent::Behaviour(event) => match event { + BehaviourEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + BehaviourEvent::PeerConnectedIncoming(_) + | BehaviourEvent::PeerBanned(_) + | BehaviourEvent::PeerUnbanned(_) => { + // No action required for these events. + } + BehaviourEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + BehaviourEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { + peer_id, + id, + request, + }); + } + BehaviourEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { + peer_id, + request_id: id, + response, + }); + } + BehaviourEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + BehaviourEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + BehaviourEvent::PubsubMessage { + id, + source, + message, + .. + } => { match message { - NetworkMessage::SendRequest{ peer_id, request, request_id } => { - service.libp2p.send_request(peer_id, request_id, request); - } - NetworkMessage::SendResponse{ peer_id, response, id } => { - service.libp2p.send_response(peer_id, id, response); - } - NetworkMessage::SendErrorResponse{ peer_id, error, id, reason } => { - service.libp2p.respond_with_error(peer_id, id, error, reason); - } - NetworkMessage::UPnPMappingEstablished { tcp_socket, udp_socket} => { - service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); - // If there is an external TCP port update, modify our local ENR. - if let Some(tcp_socket) = tcp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - // if the discovery service is not auto-updating, update it with the - // UPnP mappings - if !service.discovery_auto_update { - if let Some(udp_socket) = udp_socket { - if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) { - warn!(service.log, "Failed to update ENR"; "error" => e); - } - } - } - }, - NetworkMessage::ValidationResult { - propagation_source, - message_id, - validation_result, - } => { - trace!(service.log, "Propagating gossipsub message"; - "propagation_peer" => ?propagation_source, - "message_id" => %message_id, - "validation_result" => ?validation_result - ); - service - .libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, message_id, validation_result - ); - } - NetworkMessage::Publish { messages } => { - let mut topic_kinds = Vec::new(); - for message in &messages { - if !topic_kinds.contains(&message.kind()) { - topic_kinds.push(message.kind()); - } - } - debug!( - service.log, - "Sending pubsub messages"; - "count" => messages.len(), - "topics" => ?topic_kinds - ); - metrics::expose_publish_metrics(&messages); - service.libp2p.swarm.behaviour_mut().publish(messages); - } - NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), - NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = service + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self .attestation_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Attestation validator subscription failed"; "error" => e); - } + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); } - NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = service - .sync_committee_service - .validator_subscriptions(subscriptions) { - warn!(service.log, "Sync committee calidator subscription failed"; "error" => e); - } + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } - NetworkMessage::SubscribeCoreTopics => { - if service.shutdown_after_sync { - let _ = shutdown_sender - .send(ShutdownReason::Success( - "Beacon node completed sync. Shutting down as --shutdown-after-sync flag is enabled")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); - return; - } - let mut subscribed_topics: Vec = vec![]; - for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(topic_kind.clone(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - - // If we are to subscribe to all subnets we do it here - if service.subscribe_all_subnets { - for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { - let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - for subnet_id in 0..<::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64() { - let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); - // Update the ENR bitfield - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if service.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(service.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } + } + } + }, + Libp2pEvent::NewListenAddr(multiaddr) => { + self.network_globals + .listen_multiaddrs + .write() + .push(multiaddr); + } + Libp2pEvent::ZeroListeners => { + let _ = shutdown_sender + .send(ShutdownReason::Failure( + "All listeners are closed. Unable to listen", + )) + .await + .map_err(|e| { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + }); + } + } + } - if !subscribed_topics.is_empty() { - info!( - service.log, - "Subscribed to topics"; - "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() - ); - } - } + /// Handle a message sent to the network service. + async fn on_network_msg( + &mut self, + msg: NetworkMessage, + shutdown_sender: &mut Sender, + ) { + match msg { + NetworkMessage::SendRequest { + peer_id, + request, + request_id, + } => { + self.libp2p.send_request(peer_id, request_id, request); + } + NetworkMessage::SendResponse { + peer_id, + response, + id, + } => { + self.libp2p.send_response(peer_id, id, response); + } + NetworkMessage::SendErrorResponse { + peer_id, + error, + id, + reason, + } => { + self.libp2p.respond_with_error(peer_id, id, error, reason); + } + NetworkMessage::UPnPMappingEstablished { + tcp_socket, + udp_socket, + } => { + self.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port())); + // If there is an external TCP port update, modify our local ENR. + if let Some(tcp_socket) = tcp_socket { + if let Err(e) = self + .libp2p + .swarm + .behaviour_mut() + .discovery_mut() + .update_enr_tcp_port(tcp_socket.port()) + { + warn!(self.log, "Failed to update ENR"; "error" => e); } } - // process any attestation service events - Some(attestation_service_message) = service.attestation_service.next() => { - match attestation_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + // if the discovery service is not auto-updating, update it with the + // UPnP mappings + if !self.discovery_auto_update { + if let Some(udp_socket) = udp_socket { + if let Err(e) = self + .libp2p + .swarm + .behaviour_mut() + .discovery_mut() + .update_enr_udp_socket(udp_socket) + { + warn!(self.log, "Failed to update ENR"; "error" => e); } } } - // process any sync committee service events - Some(sync_committee_service_message) = service.sync_committee_service.next() => { - match sync_committee_service_message { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in service.required_gossip_fork_digests() { - let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - service.libp2p.swarm.behaviour_mut().unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover); + } + NetworkMessage::ValidationResult { + propagation_source, + message_id, + validation_result, + } => { + trace!(self.log, "Propagating gossipsub message"; + "propagation_peer" => ?propagation_source, + "message_id" => %message_id, + "validation_result" => ?validation_result + ); + self.libp2p + .swarm + .behaviour_mut() + .report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); + } + NetworkMessage::Publish { messages } => { + let mut topic_kinds = Vec::new(); + for message in &messages { + if !topic_kinds.contains(&message.kind()) { + topic_kinds.push(message.kind()); + } + } + debug!( + self.log, + "Sending pubsub messages"; + "count" => messages.len(), + "topics" => ?topic_kinds + ); + self.libp2p.swarm.behaviour_mut().publish(messages); + } + NetworkMessage::ReportPeer { + peer_id, + action, + source, + msg, + } => self.libp2p.report_peer(&peer_id, action, source, msg), + NetworkMessage::GoodbyePeer { + peer_id, + reason, + source, + } => self.libp2p.goodbye_peer(&peer_id, reason, source), + NetworkMessage::AttestationSubscribe { subscriptions } => { + if let Err(e) = self + .attestation_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Attestation validator subscription failed"; "error" => e); + } + } + NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = self + .sync_committee_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); + } + } + NetworkMessage::SubscribeCoreTopics => { + if self.shutdown_after_sync { + if let Err(e) = shutdown_sender + .send(ShutdownReason::Success( + "Beacon node completed sync. \ + Shutting down as --shutdown-after-sync flag is enabled", + )) + .await + { + warn!( + self.log, + "failed to send a shutdown signal"; + "error" => %e + ) + } + return; + } + let mut subscribed_topics: Vec = vec![]; + for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + topic_kind.clone(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); } } } - libp2p_event = service.libp2p.next_event() => { - // poll the swarm - match libp2p_event { - Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDialed(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer dialed to router"); }); - }, - BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => { - // No action required for these events. - }, - BehaviourEvent::PeerDisconnected(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::PeerDisconnected(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send peer disconnect to router"); - }); - }, - BehaviourEvent::RequestReceived{peer_id, id, request} => { - let _ = service - .router_send - .send(RouterMessage::RPCRequestReceived{peer_id, id, request}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - } - BehaviourEvent::ResponseReceived{peer_id, id, response} => { - let _ = service - .router_send - .send(RouterMessage::RPCResponseReceived{ peer_id, request_id: id, response }) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - } - BehaviourEvent::RPCFailed{id, peer_id} => { - let _ = service - .router_send - .send(RouterMessage::RPCFailed{ peer_id, request_id: id}) - .map_err(|_| { - debug!(service.log, "Failed to send RPC to router"); - }); - - } - BehaviourEvent::StatusPeer(peer_id) => { - let _ = service - .router_send - .send(RouterMessage::StatusPeer(peer_id)) - .map_err(|_| { - debug!(service.log, "Failed to send re-status peer to router"); - }); - } - BehaviourEvent::PubsubMessage { - id, - source, - message, - .. - } => { - // Update prometheus metrics. - metrics::expose_receive_metrics(&message); - - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = service.attestation_service.should_process_attestation( - subnet, - attestation, - ); - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, should_process)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - _ => { - // all else is sent to the router - let _ = service - .router_send - .send(RouterMessage::PubsubMessage(id, source, message, true)) - .map_err(|_| { - debug!(service.log, "Failed to send pubsub message to router"); - }); - } - } + // If we are to subscribe to all subnets we do it here + if self.subscribe_all_subnets { + for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { + let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); } } - Libp2pEvent::NewListenAddr(multiaddr) => { - service.network_globals.listen_multiaddrs.write().push(multiaddr); - } - Libp2pEvent::ZeroListeners => { - let _ = shutdown_sender - .send(ShutdownReason::Failure("All listeners are closed. Unable to listen")) - .await - .map_err(|e| warn!( - service.log, - "failed to send a shutdown signal"; - "error" => %e - )); - } } - } - Some(_) = &mut service.next_fork_update => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - - let fork_context = &service.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { - info!( - service.log, - "Transitioned to new fork"; - "old_fork" => ?fork_context.current_fork(), - "new_fork" => ?new_fork_name, - ); - fork_context.update_current_fork(*new_fork_name); - - service - .libp2p + let subnet_max = <::EthSpec as EthSpec>::SyncCommitteeSubnetCount::to_u64(); + for subnet_id in 0..subnet_max { + let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); + // Update the ENR bitfield + self.libp2p .swarm .behaviour_mut() - .update_fork_version(new_enr_fork_id.clone()); - // Reinitialize the next_fork_update - service.next_fork_update = Box::pin(next_fork_delay(&service.beacon_chain).into()); - - // Set the next_unsubscribe delay. - let epoch_duration = service.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); - let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); - - // Update the `next_fork_subscriptions` timer if the next fork is known. - service.next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&service.beacon_chain).into()); - service.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); - info!(service.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); - } else { - crit!(service.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + .update_enr_subnet(subnet, true); + for fork_digest in self.required_gossip_fork_digests() { + let topic = GossipTopic::new( + subnet.into(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } } + } + if !subscribed_topics.is_empty() { + info!( + self.log, + "Subscribed to topics"; + "topics" => ?subscribed_topics.into_iter().map(|topic| format!("{}", topic)).collect::>() + ); } - Some(_) = &mut service.next_unsubscribe => { - let new_enr_fork_id = service.beacon_chain.enr_fork_id(); - service.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); - info!(service.log, "Unsubscribed from old fork topics"); - service.next_unsubscribe = Box::pin(None.into()); + } + } + } + + fn update_gossipsub_parameters(&mut self) { + if let Ok(slot) = self.beacon_chain.slot() { + if let Some(active_validators) = self + .beacon_chain + .with_head(|head| { + Ok::<_, BeaconChainError>( + head.beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + .or_else(|| { + // if active validator cached was not build we count the + // active validators + self.beacon_chain.epoch().ok().map(|current_epoch| { + head.beacon_state + .validators() + .iter() + .filter(|validator| validator.is_active_at(current_epoch)) + .count() + }) + }), + ) + }) + .unwrap_or(None) + { + if self + .libp2p + .swarm + .behaviour_mut() + .update_gossipsub_parameters(active_validators, slot) + .is_err() + { + error!( + self.log, + "Failed to update gossipsub parameters"; + "active_validators" => active_validators + ); } - Some(_) = &mut service.next_fork_subscriptions => { - if let Some((fork_name, _)) = service.beacon_chain.duration_to_next_fork() { - let fork_version = service.beacon_chain.spec.fork_version_for_name(fork_name); - let fork_digest = ChainSpec::compute_fork_digest(fork_version, service.beacon_chain.genesis_validators_root); - info!(service.log, "Subscribing to new fork topics"); - service.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); - service.next_fork_subscriptions = Box::pin(None.into()); - } - else { - error!(service.log, "Fork subscription scheduled but no fork scheduled"); - } + } + } + } + + fn on_attestation_service_msg(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); + } + } + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } + } + } + + fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + match msg { + SubnetServiceMessage::Subscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().subscribe(topic); } } - metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone()); + SubnetServiceMessage::Unsubscribe(subnet) => { + for fork_digest in self.required_gossip_fork_digests() { + let topic = + GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); + self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + } + } + SubnetServiceMessage::EnrAdd(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, true); + } + SubnetServiceMessage::EnrRemove(subnet) => { + self.libp2p + .swarm + .behaviour_mut() + .update_enr_subnet(subnet, false); + } + SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { + self.libp2p + .swarm + .behaviour_mut() + .discover_subnet_peers(subnets_to_discover); + } } - }, "network"); + } + + fn update_next_fork(&mut self) { + let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + + let fork_context = &self.fork_context; + if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + info!( + self.log, + "Transitioned to new fork"; + "old_fork" => ?fork_context.current_fork(), + "new_fork" => ?new_fork_name, + ); + fork_context.update_current_fork(*new_fork_name); + + self.libp2p + .swarm + .behaviour_mut() + .update_fork_version(new_enr_fork_id); + // Reinitialize the next_fork_update + self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); + + // Set the next_unsubscribe delay. + let epoch_duration = + self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); + let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + + // Update the `next_fork_subscriptions` timer if the next fork is known. + self.next_fork_subscriptions = + Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); + self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); + info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + } else { + crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); + } + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 33b190e4808..d78b1fe4f80 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -67,9 +67,10 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let _network_service = NetworkService::start(beacon_chain.clone(), &config, executor) - .await - .unwrap(); + let _network_service = + NetworkService::start(beacon_chain.clone(), &config, executor, None) + .await + .unwrap(); drop(signal); }); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index b9016b9fdcd..0c34eef274f 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -54,6 +54,13 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Return type when attempting to start the backfill sync process. @@ -119,7 +126,7 @@ pub struct BackFillSync { /// Batches validated by this chain. validated_batches: u64, - /// We keep track of peer that are participating in the backfill sync. Unlike RangeSync, + /// We keep track of peers that are participating in the backfill sync. Unlike RangeSync, /// BackFillSync uses all synced peers to download the chain from. If BackFillSync fails, we don't /// want to penalize all our synced peers, so we use this variable to keep track of peers that /// have participated and only penalize these peers if backfill sync fails. @@ -539,9 +546,17 @@ impl BackFillSync { "error" => %e, "batch" => self.processing_target); // This is unlikely to happen but it would stall syncing since the batch now has no // blocks to continue, and the chain is expecting a processing result that won't - // arrive. To mitigate this, (fake) fail this processing so that the batch is + // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + // The beacon processor queue is full, no need to penalize the peer. + peer_action: None, + }, + ) } else { Ok(ProcessResult::Successful) } @@ -621,7 +636,10 @@ impl BackFillSync { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = match self.batches.get_mut(&batch_id) { Some(v) => v, None => { @@ -659,12 +677,20 @@ impl BackFillSync { // that it is likely all peers are sending invalid batches // repeatedly and are either malicious or faulty. We stop the backfill sync and // report all synced peers that have participated. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for peer in self.participating_peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Backfill batch failed to download. Penalizing peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for peer in self.participating_peers.drain() { + network.report_peer(peer, *peer_action, "backfill_batch_failed"); + } } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) .map(|_| ProcessResult::Successful) @@ -776,7 +802,7 @@ impl BackFillSync { for attempt in batch.attempts() { // The validated batch has been re-processed if attempt.hash != processed_attempt.hash { - // The re-downloaded version was different + // The re-downloaded version was different. if processed_attempt.peer_id != attempt.peer_id { // A different peer sent the correct batch, the previous peer did not // We negatively score the original peer. @@ -785,7 +811,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -794,7 +824,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_same_peer", + ); } } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f0726ca947b..960dd12afce 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -131,13 +131,16 @@ pub enum SyncRequestType { RangeSync(Epoch, ChainId), } -/// The result of processing a multiple blocks (a chain segment). +/// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. Success(bool), /// The batch processing failed. It carries whether the processing imported any block. - Failed(bool), + Failed { + imported_blocks: bool, + peer_action: Option, + }, } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -366,8 +369,11 @@ impl SyncManager { } else { crit!(self.log, "Parent chain has no blocks"); } - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); return; } // add the block to response @@ -385,8 +391,11 @@ impl SyncManager { // tolerate this behaviour. if !single_block_request.block_returned { warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => %single_block_request.hash, "peer_id" => %peer_id); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_no_block", + ); } return; } @@ -509,8 +518,11 @@ impl SyncManager { warn!(self.log, "Single block lookup failed"; "outcome" => ?outcome); // This could be a range of errors. But we couldn't process the block. // For now we consider this a mid tolerance error. - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_lookup_failed", + ); } } } @@ -833,8 +845,11 @@ impl SyncManager { self.request_parent(parent_request); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. - self.network - .report_peer(peer, PeerAction::LowToleranceError); + self.network.report_peer( + peer, + PeerAction::LowToleranceError, + "parent_request_bad_hash", + ); } else { // The last block in the queue is the only one that has not attempted to be processed yet. // @@ -904,6 +919,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::MidToleranceError, + "parent_request_err", ); } } @@ -942,6 +958,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::LowToleranceError, + "request_parent_import_failed", ); return; // drop the request } @@ -1109,8 +1126,11 @@ impl SyncManager { // A peer sent an object (block or attestation) that referenced a parent. // The processing of this chain failed. self.failed_chains.insert(chain_head); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "parent_lookup_failed", + ); } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e991e86e059..9415f210026 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -170,13 +170,14 @@ impl SyncNetworkContext { } /// Reports to the scoring algorithm the behaviour of a peer. - pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction) { + pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction, msg: &'static str) { debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); self.network_send .send(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::SyncService, + msg, }) .unwrap_or_else(|e| { warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 70e27b5a0ac..7239081ad13 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,7 +1,6 @@ use crate::sync::RequestId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; -use ssz::Encode; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; @@ -20,6 +19,34 @@ pub trait BatchConfig { fn max_batch_download_attempts() -> u8; /// The max batch processing attempts. fn max_batch_processing_attempts() -> u8; + /// Hashing function of a batch's attempt. Used for scoring purposes. + /// + /// When a batch fails processing, it is possible that the batch is wrong (faulty or + /// incomplete) or that a previous one is wrong. For this reason we need to re-download and + /// re-process the batches awaiting validation and the current one. Consider this scenario: + /// + /// ```ignore + /// BatchA BatchB BatchC BatchD + /// -----X Empty Empty Y----- + /// ``` + /// + /// BatchA declares that it refers X, but BatchD declares that it's first block is Y. There is no + /// way to know if BatchD is faulty/incomplete or if batches B and/or C are missing blocks. It is + /// also possible that BatchA belongs to a different chain to the rest starting in some block + /// midway in the batch's range. For this reason, the four batches would need to be re-downloaded + /// and re-processed. + /// + /// If batchD was actually good, it will still register two processing attempts for the same set of + /// blocks. In this case, we don't want to penalize the peer that provided the first version, since + /// it's equal to the successfully processed one. + /// + /// The function `batch_attempt_hash` provides a way to compare two batch attempts without + /// storing the full set of blocks. + /// + /// Note that simpler hashing functions considered in the past (hash of first block, hash of last + /// block, number of received blocks) are not good enough to differentiate attempts. For this + /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -31,6 +58,11 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } + fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + blocks.hash(&mut hasher); + hasher.finish() + } } /// Error type of a batch in a wrong state. @@ -301,7 +333,7 @@ impl BatchInfo { pub fn start_processing(&mut self) -> Result>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { - self.state = BatchState::Processing(Attempt::new(peer, &blocks)); + self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); Ok(blocks) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -387,11 +419,8 @@ pub struct Attempt { } impl Attempt { - #[allow(clippy::ptr_arg)] - fn new(peer_id: PeerId, blocks: &Vec>) -> Self { - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - blocks.as_ssz_bytes().hash(&mut hasher); - let hash = hasher.finish(); + fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index a1acac614ea..4474f1cc34e 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -313,7 +313,14 @@ impl SyncingChain { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + peer_action: None, + }, + ) } else { Ok(KeepChain) } @@ -488,7 +495,10 @@ impl SyncingChain { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( "Batch not found for current processing target {}", @@ -511,12 +521,20 @@ impl SyncingChain { // report all peers. // There are some edge cases with forks that could land us in this situation. // This should be unlikely, so we tolerate these errors, but not often. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for (peer, _) in self.peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Batch failed to download. Dropping chain scoring peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for (peer, _) in self.peers.drain() { + network.report_peer(peer, *peer_action, "batch_failed"); + } } Err(RemoveChain::ChainFailed(batch_id)) } else { @@ -606,7 +624,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -615,7 +637,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_same_peer", + ); } } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 2cc3ffaf6be..c9b252ca116 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -6,15 +6,16 @@ mod metrics; mod persistence; mod sync_aggregate_id; +pub use attestation::AttMaxCover; +pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, }; use crate::sync_aggregate_id::SyncAggregateId; -use attestation::AttMaxCover; use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; -use max_cover::{maximum_cover, MaxCover}; +use max_cover::maximum_cover; use parking_lot::RwLock; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0b2cda91ef4..4c2960c9d6a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -104,6 +104,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") .takes_value(true), ) + .arg( + Arg::with_name("network-load") + .long("network-load") + .value_name("INTEGER") + .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") + .default_value("3") + .set(clap::ArgSettings::Hidden) + .takes_value(true), + ) .arg( Arg::with_name("disable-upnp") .long("disable-upnp") @@ -402,11 +411,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("fee-recipient") .long("fee-recipient") + .value_name("FEE-RECIPIENT") .help("Once the merge has happened, this address will receive transaction fees \ collected from any blocks produced by this node. Defaults to a junk \ address whilst the merge is in development stages. THE DEFAULT VALUE \ WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") .requires("merge") + .takes_value(true) ) /* diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ce2f65e70b4..20408229311 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -469,7 +469,7 @@ pub fn get_config( } client_config.chain.max_network_size = - lighthouse_network::gossip_max_size(spec.merge_fork_epoch.is_some()); + lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); if cli_args.is_present("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { @@ -626,6 +626,13 @@ pub fn set_network_config( config.discovery_port = port; } + if let Some(value) = cli_args.value_of("network-load") { + let network_load = value + .parse::() + .map_err(|_| format!("Invalid integer: {}", value))?; + config.network_load = network_load; + } + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; @@ -714,7 +721,7 @@ pub fn set_network_config( None } }) { - addr.push_str(&format!(":{}", enr_udp_port.to_string())); + addr.push_str(&format!(":{}", enr_udp_port)); } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 121e22fc659..66a6cf5d28c 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -22,6 +22,6 @@ serde = "1.0.116" serde_derive = "1.0.116" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.6.0" +lru = "0.7.1" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 93cec12401e..7552d42306c 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -10,6 +10,7 @@ * [Build from Source](./installation-source.md) * [Raspberry Pi 4](./pi.md) * [Cross-Compiling](./cross-compiling.md) + * [Homebrew](./homebrew.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index 3d1b14d1b18..b90bd631d41 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,40 +1,4 @@ # Pre-Releases -[sigp/lighthouse]: https://github.com/sigp/lighthouse -[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest -[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases -[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 -[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 - -From time-to-time, Lighthouse *pre-releases* will be published on the [sigp/lighthouse] repository. -These releases have passed the usual automated testing, however the developers would like to see it -running "in the wild" in a variety of configurations before declaring it an official, stable -release. Pre-releases are also used by developers to get feedback from users regarding the -ergonomics of new features or changes. - -Github will clearly show such releases as a "Pre-release" and they *will not* show up on -[sigp/lighthouse/releases/latest]. However, pre-releases *will* show up on the -[sigp/lighthouse/releases] page, so **please pay attention to avoid the pre-releases when you're -looking for stable Lighthouse**. - -### Examples - -[`v1.4.0-rc.0`] has `rc` (release candidate) in the version string and is therefore a pre-release. This -release is *not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). - -However, [`v1.4.0`] is considered stable since it is not marked as a pre-release and does not -contain `rc` in the version string. This release is intended for use on mainnet. - -## When to use a pre-release - -Users may wish to try a pre-release for the following reasons: - -- To preview new features before they are officially released. -- To help detect bugs and regressions before they reach production. -- To provide feedback on annoyances before they make it into a release and become harder to change or revert. - -## When *not* to use a pre-release - -It is not recommended to use pre-releases for any critical tasks on mainnet (e.g., staking). To test -critical features, try one of the testnets (e.g., Prater). - +Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +be used interchangeably. diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md new file mode 100644 index 00000000000..842bc484041 --- /dev/null +++ b/book/src/advanced-release-candidates.md @@ -0,0 +1,43 @@ +# Release Candidates + +[sigp/lighthouse]: https://github.com/sigp/lighthouse +[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest +[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases +[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 +[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 + +From time-to-time, Lighthouse *release candidates* will be published on the [sigp/lighthouse] +repository. These releases have passed the usual automated testing, however the developers would +like to see it running "in the wild" in a variety of configurations before declaring it an official, +stable release. Release candidates are also used by developers to get feedback from users regarding the +ergonomics of new features or changes. + +Github will clearly show such releases as a "Pre-release" and they *will not* show up on +[sigp/lighthouse/releases/latest]. However, release candidates *will* show up on the +[sigp/lighthouse/releases] page, so **please pay attention to avoid the release candidates when +you're looking for stable Lighthouse**. + +From time to time, Lighthouse may use the terms "release candidate" and "pre release" +interchangeably. A pre release is identical to a release candidate. + +### Examples + +[`v1.4.0-rc.0`] has `rc` in the version string and is therefore a release candidate. This release is +*not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). + +However, [`v1.4.0`] is considered stable since it is not marked as a release candidate and does not +contain `rc` in the version string. This release is intended for use on mainnet. + +## When to use a release candidate + +Users may wish to try a release candidate for the following reasons: + +- To preview new features before they are officially released. +- To help detect bugs and regressions before they reach production. +- To provide feedback on annoyances before they make it into a release and become harder to change or revert. + +## When *not* to use a release candidate + +It is not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). +To test critical features, try one of the testnets (e.g., Prater). + diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 8ea35f7348a..7836ac14a48 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -407,4 +407,44 @@ The endpoint will return immediately. See the beacon node logs for an indication ### `/lighthouse/database/historical_blocks` Manually provide `SignedBeaconBlock`s to backfill the database. This is intended -for use by Lighthouse developers during testing only. \ No newline at end of file +for use by Lighthouse developers during testing only. + +### `/lighthouse/block_rewards` + +Fetch information about the block rewards paid to proposers for a range of consecutive blocks. + +Two query parameters are required: + +* `start_slot` (inclusive): the slot of the first block to compute rewards for. +* `end_slot` (inclusive): the slot of the last block to compute rewards for. + +Example: + +```bash +curl "http://localhost:5052/lighthouse/block_rewards?start_slot=1&end_slot=32" | jq +``` + +```json +[ + { + "block_root": "0x51576c2fcf0ab68d7d93c65e6828e620efbb391730511ffa35584d6c30e51410", + "attestation_rewards": { + "total": 4941156, + }, + .. + }, + .. +] +``` + +Caveats: + +* Presently only attestation rewards are computed. +* The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] + in the source. +* For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_slot` needs to be loaded from the database, and + loading a state on a boundary is most efficient. + +[block_reward_src]: +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_reward.rs \ No newline at end of file diff --git a/book/src/faq.md b/book/src/faq.md index ae43aec20e9..419f95dcbd3 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -55,14 +55,10 @@ voting period the validator might have to wait ~3.4 hours for next voting period. In times of very, very severe network issues, the network may even fail to vote in new Eth1 blocks, stopping all new validator deposits! -> Note: you can see the list of validators included in the beacon chain using -> our REST API: [/beacon/validators/all](./http/beacon.md#beaconvalidatorsall) - #### 2. Waiting for a validator to be activated If a validator has provided an invalid public key or signature, they will -_never_ be activated or even show up in -[/beacon/validators/all](./http/beacon.html#beaconvalidatorsall). +_never_ be activated. They will simply be forgotten by the beacon chain! But, if those parameters were correct, once the Eth1 delays have elapsed and the validator appears in the beacon chain, there's _another_ delay before the validator becomes "active" @@ -143,7 +139,7 @@ See [here](./slashing-protection.md#misplaced-slashing-database). If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) -If you are updating by rebuilding from source, see [here.](./installation-source.md#updating-lighthouse) +If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: diff --git a/book/src/homebrew.md b/book/src/homebrew.md new file mode 100644 index 00000000000..317dc0e0fa6 --- /dev/null +++ b/book/src/homebrew.md @@ -0,0 +1,36 @@ +# Homebrew package + +Lighthouse is available on Linux and macOS via the [Homebrew package manager](https://brew.sh). + +Please note that this installation method is maintained by the Homebrew community. +It is not officially supported by the Lighthouse team. + +### Installation + +Install the latest version of the [`lighthouse`][formula] formula with: + +```bash +brew install lighthouse +``` + +### Usage + +If Homebrew is installed to your `PATH` (default), simply run: + +```bash +lighthouse --help +``` + +Alternatively, you can find the `lighthouse` binary at: + +```bash +"$(brew --prefix)/bin/lighthouse" --help +``` + +### Maintenance + +The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. + +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. + + [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/installation.md b/book/src/installation.md index 009bfc00c0f..38fbe6b7808 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,6 +8,10 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). +The community maintains additional installation methods (currently only one). + +- [Homebrew package](./homebrew.md). + Additionally, there are two extra guides for specific uses: - [Rapsberry Pi 4 guide](./pi.md). diff --git a/book/src/intro.md b/book/src/intro.md index d3a95c86311..b31deeef884 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -20,7 +20,7 @@ You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). -- Utilize the whole stack by starting a [local testnet](./local-testnets.md). +- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. diff --git a/book/src/pi.md b/book/src/pi.md index 6bc274c9a36..24796d394e3 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation.md#dependencies-ubuntu). +Install the [Ubuntu Dependencies](installation-source.md#ubuntu). (I.e., run the `sudo apt install ...` command at that link). > Tips: diff --git a/book/src/redundancy.md b/book/src/redundancy.md index a50e3243748..b01a01dd268 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -86,7 +86,7 @@ now processing, validating, aggregating and forwarding *all* attestations, whereas previously it was likely only doing a fraction of this work. Without these flags, subscription to attestation subnets and aggregation of attestations is only performed for validators which [explicitly request -subscriptions](subscribe-api). +subscriptions][subscribe-api]. There are 64 subnets and each validator will result in a subscription to *at least* one subnet. So, using the two aforementioned flags will result in diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 73fff42dfe3..91af60078a4 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -75,7 +75,7 @@ The example assumes that the `wally` wallet was generated from the [wallet](./wallet-create.md) example. ```bash -lighthouse --network pyrmont account validator create --name wally --wallet-password wally.pass --count 1 +lighthouse --network pyrmont account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` This command will: diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 72e2e379c72..67e17fecad9 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -98,7 +98,7 @@ curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H Returns a per-validator summary of how that validator performed during the current epoch. -The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of these +The [Global Votes](#global) endpoint is the summation of all of these individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index e423ed1764b..d12fdbc6133 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.0.1" +version = "2.1.1" authors = ["Sigma Prime "] edition = "2018" diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 1e550e60c44..4df7a5f235e 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -132,13 +132,15 @@ impl BootNodeConfig { /// The set of configuration parameters that can safely be (de)serialized. /// -/// Its fields are a subset of the fields of `BootNodeConfig`. +/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { pub listen_socket: SocketAddr, // TODO: Generalise to multiaddr pub boot_nodes: Vec, pub local_enr: Enr, + pub disable_packet_filter: bool, + pub enable_enr_auto_update: bool, } impl BootNodeConfigSerialization { @@ -150,7 +152,7 @@ impl BootNodeConfigSerialization { boot_nodes, local_enr, local_key: _, - discv5_config: _, + discv5_config, phantom: _, } = config; @@ -158,6 +160,8 @@ impl BootNodeConfigSerialization { listen_socket: *listen_socket, boot_nodes: boot_nodes.clone(), local_enr: local_enr.clone(), + disable_packet_filter: !discv5_config.enable_packet_filter, + enable_enr_auto_update: discv5_config.enr_update, } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bdad6728667..8dc808c2653 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -9,6 +9,7 @@ #[cfg(feature = "lighthouse")] pub mod lighthouse; +#[cfg(feature = "lighthouse")] pub mod lighthouse_vc; pub mod mixin; pub mod types; @@ -245,6 +246,7 @@ impl BeaconNodeHttpClient { } /// Perform a HTTP POST request, returning a JSON response. + #[cfg(feature = "lighthouse")] async fn post_with_response( &self, url: U, @@ -1256,8 +1258,12 @@ impl BeaconNodeHttpClient { .push("attester") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.attester_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.attester_duties, + ) + .await } /// `POST validator/aggregate_and_proofs` @@ -1356,8 +1362,12 @@ impl BeaconNodeHttpClient { .push("sync") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.sync_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.sync_duties, + ) + .await } } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index a8993a39c5c..adf73d8b923 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -1,5 +1,8 @@ //! This module contains endpoints that are non-standard and only available on Lighthouse servers. +mod attestation_performance; +mod block_rewards; + use crate::{ ok_or_error, types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, @@ -12,6 +15,10 @@ use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use store::{AnchorInfo, Split}; +pub use attestation_performance::{ + AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, +}; +pub use block_rewards::{AttestationRewards, BlockReward, BlockRewardMeta, BlockRewardsQuery}; pub use lighthouse_network::{types::SyncState, PeerInfo}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union diff --git a/common/eth2/src/lighthouse/attestation_performance.rs b/common/eth2/src/lighthouse/attestation_performance.rs new file mode 100644 index 00000000000..5ce1d90a38d --- /dev/null +++ b/common/eth2/src/lighthouse/attestation_performance.rs @@ -0,0 +1,39 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::Epoch; + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceStatistics { + pub active: bool, + pub head: bool, + pub target: bool, + pub source: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub delay: Option, +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformance { + pub index: u64, + pub epochs: HashMap, +} + +impl AttestationPerformance { + pub fn initialize(indices: Vec) -> Vec { + let mut vec = Vec::with_capacity(indices.len()); + for index in indices { + vec.push(Self { + index, + ..Default::default() + }) + } + vec + } +} + +/// Query parameters for the `/lighthouse/analysis/attestation_performance` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationPerformanceQuery { + pub start_epoch: Epoch, + pub end_epoch: Epoch, +} diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs new file mode 100644 index 00000000000..186cbd888cf --- /dev/null +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -0,0 +1,54 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use types::{Hash256, Slot}; + +/// Details about the rewards paid to a block proposer for proposing a block. +/// +/// All rewards in GWei. +/// +/// Presently this only counts attestation rewards, but in future should be expanded +/// to include information on slashings and sync committee aggregates too. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockReward { + /// Sum of all reward components. + pub total: u64, + /// Block root of the block that these rewards are for. + pub block_root: Hash256, + /// Metadata about the block, particularly reward-relevant metadata. + pub meta: BlockRewardMeta, + /// Rewards due to attestations. + pub attestation_rewards: AttestationRewards, + /// Sum of rewards due to sync committee signatures. + pub sync_committee_rewards: u64, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardMeta { + pub slot: Slot, + pub parent_slot: Slot, + pub proposer_index: u64, + pub graffiti: String, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct AttestationRewards { + /// Total block reward from attestations included. + pub total: u64, + /// Total rewards from previous epoch attestations. + pub prev_epoch_total: u64, + /// Total rewards from current epoch attestations. + pub curr_epoch_total: u64, + /// Vec of attestation rewards for each attestation included. + /// + /// Each element of the vec is a map from validator index to reward. + pub per_attestation_rewards: Vec>, +} + +/// Query parameters for the `/lighthouse/block_rewards` endpoint. +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockRewardsQuery { + /// Lower slot limit for block rewards returned (inclusive). + pub start_slot: Slot, + /// Upper slot limit for block rewards returned (inclusive). + pub end_slot: Slot, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index be65dd8776c..78567ad83c1 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -10,6 +10,9 @@ use std::str::{from_utf8, FromStr}; use std::time::Duration; pub use types::*; +#[cfg(feature = "lighthouse")] +use crate::lighthouse::BlockReward; + /// An API error serializable to JSON. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] @@ -428,10 +431,13 @@ pub struct AttestationPoolQuery { pub committee_index: Option, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorsQuery { - pub id: Option>, - pub status: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub status: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -520,33 +526,81 @@ pub struct SyncingData { #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] -pub struct QueryVec(pub Vec); +pub struct QueryVec { + values: Vec, +} + +fn query_vec<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + Ok(Vec::from(QueryVec::from(vec))) +} + +fn option_query_vec<'de, D, T>(deserializer: D) -> Result>, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + if vec.is_empty() { + return Ok(None); + } + + Ok(Some(Vec::from(QueryVec::from(vec)))) +} + +impl From>> for QueryVec { + fn from(vecs: Vec>) -> Self { + Self { + values: vecs.into_iter().flat_map(|qv| qv.values).collect(), + } + } +} impl TryFrom for QueryVec { type Error = String; fn try_from(string: String) -> Result { if string.is_empty() { - return Ok(Self(vec![])); + return Ok(Self { values: vec![] }); } - string - .split(',') - .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) - .collect::, String>>() - .map(Self) + Ok(Self { + values: string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse query".to_string())) + .collect::, String>>()?, + }) + } +} + +impl From> for Vec { + fn from(vec: QueryVec) -> Vec { + vec.values } } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorBalancesQuery { - pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, } #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +/// Borrowed variant of `ValidatorIndexData`, for serializing/sending. +#[derive(Clone, Copy, Serialize)] +#[serde(transparent)] +pub struct ValidatorIndexDataRef<'a>( + #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], +); + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, @@ -602,9 +656,12 @@ pub struct BeaconCommitteeSubscription { } #[derive(Deserialize)] +#[serde(deny_unknown_fields)] pub struct PeersQuery { - pub state: Option>, - pub direction: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub state: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub direction: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -785,6 +842,8 @@ pub enum EventKind { ChainReorg(SseChainReorg), ContributionAndProof(Box>), LateHead(SseLateHead), + #[cfg(feature = "lighthouse")] + BlockReward(BlockReward), } impl EventKind { @@ -798,6 +857,8 @@ impl EventKind { EventKind::ChainReorg(_) => "chain_reorg", EventKind::ContributionAndProof(_) => "contribution_and_proof", EventKind::LateHead(_) => "late_head", + #[cfg(feature = "lighthouse")] + EventKind::BlockReward(_) => "block_reward", } } @@ -850,6 +911,10 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Contribution and Proof: {:?}", e)) })?, ))), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), + )?)), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -858,8 +923,10 @@ impl EventKind { } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct EventQuery { - pub topics: QueryVec, + #[serde(deserialize_with = "query_vec")] + pub topics: Vec, } #[derive(Debug, Clone, Copy, PartialEq, Deserialize)] @@ -873,6 +940,8 @@ pub enum EventTopic { ChainReorg, ContributionAndProof, LateHead, + #[cfg(feature = "lighthouse")] + BlockReward, } impl FromStr for EventTopic { @@ -888,6 +957,8 @@ impl FromStr for EventTopic { "chain_reorg" => Ok(EventTopic::ChainReorg), "contribution_and_proof" => Ok(EventTopic::ContributionAndProof), "late_head" => Ok(EventTopic::LateHead), + #[cfg(feature = "lighthouse")] + "block_reward" => Ok(EventTopic::BlockReward), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -904,6 +975,8 @@ impl fmt::Display for EventTopic { EventTopic::ChainReorg => write!(f, "chain_reorg"), EventTopic::ContributionAndProof => write!(f, "contribution_and_proof"), EventTopic::LateHead => write!(f, "late_head"), + #[cfg(feature = "lighthouse")] + EventTopic::BlockReward => write!(f, "block_reward"), } } } @@ -961,7 +1034,9 @@ mod tests { fn query_vec() { assert_eq!( QueryVec::try_from("0,1,2".to_string()).unwrap(), - QueryVec(vec![0_u64, 1, 2]) + QueryVec { + values: vec![0_u64, 1, 2] + } ); } } diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 4d17356ced3..b889b828870 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 74240 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index aa375ab2ea4..72a106f36a0 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 1919188 ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge -MERGE_FORK_VERSION: 0x02001020 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02001020 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml index b5f8415805f..913671c2bea 100644 --- a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 432000 ALTAIR_FORK_VERSION: 0x01002009 ALTAIR_FORK_EPOCH: 61650 # Merge -MERGE_FORK_VERSION: 0x02002009 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02002009 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03002009 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 4b7160ae05a..98973de1add 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -307,6 +307,12 @@ pub fn set_float_gauge(gauge: &Result, value: f64) { } } +pub fn set_float_gauge_vec(gauge_vec: &Result, name: &[&str], value: f64) { + if let Some(gauge) = get_gauge(gauge_vec, name) { + gauge.set(value); + } +} + pub fn inc_gauge(gauge: &Result) { if let Ok(gauge) = gauge { gauge.inc(); diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5892f59f562..a66ff66e5ce 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.0.1-", + prefix = "Lighthouse/v2.1.1-", fallback = "unknown" ); diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 402cdc27aa8..681849a78ce 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -11,22 +11,20 @@ use std::env; use std::os::raw::c_int; use std::result::Result; -/// The value to be provided to `malloc_mmap_threshold`. +/// The optimal mmap threshold for Lighthouse seems to be around 128KB. /// -/// Value chosen so that values of the validators tree hash cache will *not* be allocated via -/// `mmap`. -/// -/// The size of a single chunk is: -/// -/// NODES_PER_VALIDATOR * VALIDATORS_PER_ARENA * 32 = 15 * 4096 * 32 = 1.875 MiB -const OPTIMAL_MMAP_THRESHOLD: c_int = 2 * 1_024 * 1_024; +/// By default GNU malloc will start with a threshold of 128KB and adjust it upwards, but we've +/// found that the upwards adjustments tend to result in heap fragmentation. Explicitly setting the +/// threshold to 128KB disables the dynamic adjustments and encourages `mmap` usage, which keeps the +/// heap size under control. +const OPTIMAL_MMAP_THRESHOLD: c_int = 128 * 1_024; /// Constants used to configure malloc internals. /// /// Source: /// /// https://github.com/lattera/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/malloc/malloc.h#L115-L123 -const M_MMAP_THRESHOLD: c_int = -4; +const M_MMAP_THRESHOLD: c_int = -3; /// Environment variables used to configure malloc. /// @@ -134,8 +132,8 @@ fn env_var_present(name: &str) -> bool { /// ## Resources /// /// - https://man7.org/linux/man-pages/man3/mallopt.3.html -fn malloc_mmap_threshold(num_arenas: c_int) -> Result<(), c_int> { - into_result(mallopt(M_MMAP_THRESHOLD, num_arenas)) +fn malloc_mmap_threshold(threshold: c_int) -> Result<(), c_int> { + into_result(mallopt(M_MMAP_THRESHOLD, threshold)) } fn mallopt(param: c_int, val: c_int) -> c_int { diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 16965f43cdf..8699a8cf2c9 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -67,11 +67,7 @@ const BEACON_PROCESS_METRICS: &[JsonMetric] = &[ "disk_beaconchain_bytes_total", JsonType::Integer, ), - JsonMetric::new( - "libp2p_peer_connected_peers_total", - "network_peers_connected", - JsonType::Integer, - ), + JsonMetric::new("libp2p_peers", "network_peers_connected", JsonType::Integer), JsonMetric::new( "libp2p_outbound_bytes", "network_libp2p_bytes_total_transmit", diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index f50931c6f6a..183f5c9313d 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -112,4 +112,18 @@ pub trait SlotClock: Send + Sync + Sized + Clone { Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) }) } + + /// Produces a *new* slot clock with the same configuration of `self`, except that clock is + /// "frozen" at the `freeze_at` time. + /// + /// This is useful for observing the slot clock at arbitrary fixed points in time. + fn freeze_at(&self, freeze_at: Duration) -> ManualSlotClock { + let slot_clock = ManualSlotClock::new( + self.genesis_slot(), + self.genesis_duration(), + self.slot_duration(), + ); + slot_clock.set_current_time(freeze_at); + slot_clock + } } diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index f99d7773b9a..09b6f125fce 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -18,3 +18,4 @@ tokio = { version = "1.14.0", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" +serde_array_query = "0.1.0" diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 5f37dde87de..346361b18fe 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -3,5 +3,6 @@ pub mod cors; pub mod metrics; +pub mod query; pub mod reject; pub mod task; diff --git a/common/warp_utils/src/query.rs b/common/warp_utils/src/query.rs new file mode 100644 index 00000000000..c5ed5c5f128 --- /dev/null +++ b/common/warp_utils/src/query.rs @@ -0,0 +1,22 @@ +use crate::reject::custom_bad_request; +use serde::Deserialize; +use warp::Filter; + +// Custom query filter using `serde_array_query`. +// This allows duplicate keys inside query strings. +pub fn multi_key_query<'de, T: Deserialize<'de>>( +) -> impl warp::Filter,), Error = std::convert::Infallible> + Copy +{ + raw_query().then(|query_str: String| async move { + serde_array_query::from_str(&query_str).map_err(|e| custom_bad_request(e.to_string())) + }) +} + +// This ensures that empty query strings are still accepted. +// This is because warp::filters::query::raw() does not allow empty query strings +// but warp::query::() does. +fn raw_query() -> impl Filter + Copy { + warp::filters::query::raw() + .or(warp::any().map(String::default)) + .unify() +} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 86b32aab1a4..3ab07c6af12 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -589,7 +589,7 @@ where .on_verified_block(block, block_root, state) .map_err(Error::AfterBlockFailed)?; - let execution_status = if let Some(execution_payload) = block.body().execution_payload() { + let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { let block_hash = execution_payload.block_hash; if block_hash == Hash256::zero() { diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 4d4b073f4a1..b71de4ccdbe 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -17,6 +17,7 @@ eth2_serde_utils = "0.1.1" eth2_ssz = "0.4.1" typenum = "1.12.0" arbitrary = { version = "1.0", features = ["derive"], optional = true } +derivative = "2.1.1" [dev-dependencies] serde_json = "1.0.58" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index afecd8ce7d8..dfad3aedcb8 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -1,6 +1,7 @@ use crate::tree_hash::bitfield_bytes_tree_hash_root; use crate::Error; use core::marker::PhantomData; +use derivative::Derivative; use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -87,7 +88,8 @@ pub type BitVector = Bitfield>; /// The internal representation of the bitfield is the same as that required by SSZ. The lowest /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = ""))] pub struct Bitfield { bytes: Vec, len: usize, diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 8b8d660fb9e..ca5d40f14fa 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -44,7 +45,8 @@ pub use typenum; /// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); /// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); /// ``` -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct FixedVector { vec: Vec, diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 242a55b2c94..1414d12c8c3 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -46,7 +47,8 @@ pub use typenum; /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); /// ``` -#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct VariableList { vec: Vec, diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 1bb88c84d16..fb2c9bfa7d0 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,7 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::{upgrade_to_altair, upgrade_to_merge}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -58,13 +58,13 @@ pub fn initialize_beacon_state_from_eth1( // Similarly, perform an upgrade to the merge if configured from genesis. if spec - .merge_fork_epoch + .bellatrix_fork_epoch .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) { - upgrade_to_merge(&mut state, spec)?; + upgrade_to_bellatrix(&mut state, spec)?; // Remove intermediate Altair fork from `state.fork`. - state.fork_mut().previous_version = spec.merge_fork_version; + state.fork_mut().previous_version = spec.bellatrix_fork_version; // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index ed7275be080..857c7763325 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -148,10 +148,7 @@ pub fn per_block_processing( // `process_randao` as the former depends on the `randao_mix` computed with the reveal of the // previous block. if is_execution_enabled(state, block.body()) { - let payload = block - .body() - .execution_payload() - .ok_or(BlockProcessingError::IncorrectStateType)?; + let payload = block.body().execution_payload()?; process_execution_payload(state, payload, spec)?; } @@ -159,7 +156,7 @@ pub fn per_block_processing( process_eth1_data(state, block.body().eth1_data())?; process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { process_sync_aggregate( state, sync_aggregate, diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 31386a8fb12..8358003e4b4 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -42,19 +42,7 @@ pub fn process_sync_aggregate( } // Compute participant and proposer rewards - let total_active_balance = state.get_total_active_balance()?; - let total_active_increments = - total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? - .safe_mul(total_active_increments)?; - let max_participant_rewards = total_base_rewards - .safe_mul(SYNC_REWARD_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)? - .safe_div(T::slots_per_epoch())?; - let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; - let proposer_reward = participant_reward - .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + let (participant_reward, proposer_reward) = compute_sync_aggregate_rewards(state, spec)?; // Apply participant and proposer rewards let committee_indices = state.get_sync_committee_indices(¤t_sync_committee)?; @@ -73,3 +61,26 @@ pub fn process_sync_aggregate( Ok(()) } + +/// Compute the `(participant_reward, proposer_reward)` for a sync aggregate. +/// +/// The `state` should be the pre-state from the same slot as the block containing the aggregate. +pub fn compute_sync_aggregate_rewards( + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(u64, u64), BlockProcessingError> { + let total_active_balance = state.get_total_active_balance()?; + let total_active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + .safe_mul(total_active_increments)?; + let max_participant_rewards = total_base_rewards + .safe_mul(SYNC_REWARD_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + .safe_div(T::slots_per_epoch())?; + let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; + let proposer_reward = participant_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + Ok((participant_reward, proposer_reward)) +} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3e7a799341f..28044a462c5 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -302,7 +302,7 @@ where /// Include the signature of the block's sync aggregate (if it exists) for verification. pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { - if let Some(sync_aggregate) = block.message().body().sync_aggregate() { + if let Ok(sync_aggregate) = block.message().body().sync_aggregate() { if let Some(signature_set) = sync_aggregate_signature_set( &self.decompressor, sync_aggregate, diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 454cee5ffb1..9018db65bcd 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,4 @@ -use crate::upgrade::{upgrade_to_altair, upgrade_to_merge}; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -52,8 +52,8 @@ pub fn per_slot_processing( upgrade_to_altair(state, spec)?; } // If the Merge fork epoch is reached, perform an irregular state upgrade. - if spec.merge_fork_epoch == Some(state.current_epoch()) { - upgrade_to_merge(state, spec)?; + if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { + upgrade_to_bellatrix(state, spec)?; } } diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index fda1a714af3..fdf13c82818 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -2,4 +2,4 @@ pub mod altair; pub mod merge; pub use altair::upgrade_to_altair; -pub use merge::upgrade_to_merge; +pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c41987609e3..2e4ed441a47 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -5,7 +5,7 @@ use types::{ }; /// Transform a `Altair` state into an `Merge` state. -pub fn upgrade_to_merge( +pub fn upgrade_to_bellatrix( pre_state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { @@ -24,7 +24,7 @@ pub fn upgrade_to_merge( slot: pre.slot, fork: Fork { previous_version: pre.fork.current_version, - current_version: spec.merge_fork_version, + current_version: spec.bellatrix_fork_version, epoch, }, // History diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index f62fcf5999f..bc013fe42d3 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -43,7 +43,8 @@ regex = "1.3.9" lazy_static = "1.4.0" parking_lot = "0.11.1" itertools = "0.10.0" -superstruct = "0.3.0" +superstruct = "0.4.0" +serde_json = "1.0.74" [dev-dependencies] criterion = "0.3.3" diff --git a/consensus/types/presets/mainnet/bellatrix.yaml b/consensus/types/presets/mainnet/bellatrix.yaml new file mode 100644 index 00000000000..7ae61b732f1 --- /dev/null +++ b/consensus/types/presets/mainnet/bellatrix.yaml @@ -0,0 +1,21 @@ +# Mainnet preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/presets/minimal/bellatrix.yaml b/consensus/types/presets/minimal/bellatrix.yaml new file mode 100644 index 00000000000..3417985fad1 --- /dev/null +++ b/consensus/types/presets/minimal/bellatrix.yaml @@ -0,0 +1,21 @@ +# Minimal preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 66d9e78a85b..1c9ec3bc4da 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,3 +1,4 @@ +use derivative::Derivative; use safe_arith::ArithError; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,7 +24,10 @@ pub enum Error { /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index bdd4142b497..0026db0ee7c 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -5,6 +5,7 @@ use crate::beacon_block_body::{ use crate::test_utils::TestRandom; use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -19,15 +20,16 @@ use tree_hash_derive::TreeHash; variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), ), @@ -36,7 +38,8 @@ use tree_hash_derive::TreeHash; tree_hash(enum_behaviour = "transparent") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -66,7 +69,7 @@ impl<'a, T: EthSpec> SignedRoot for BeaconBlockRef<'a, T> {} impl BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.merge_fork_epoch == Some(T::genesis_epoch()) { + if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { Self::Merge(BeaconBlockMerge::empty(spec)) } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { Self::Altair(BeaconBlockAltair::empty(spec)) @@ -237,13 +240,8 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&ExecutionPayload, InconsistentFork> { - self.body() - .execution_payload() - .ok_or_else(|| InconsistentFork { - fork_at_slot: ForkName::Merge, - object_fork: self.body().fork_name(), - }) + pub fn execution_payload(&self) -> Result<&ExecutionPayload, Error> { + self.body().execution_payload() } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 3b417f5d0ba..c4df4f27717 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -15,22 +16,24 @@ use tree_hash_derive::TreeHash; variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -50,24 +53,6 @@ pub struct BeaconBlockBody { } impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { - /// Access the sync aggregate from the block's body, if one exists. - pub fn sync_aggregate(self) -> Option<&'a SyncAggregate> { - match self { - BeaconBlockBodyRef::Base(_) => None, - BeaconBlockBodyRef::Altair(inner) => Some(&inner.sync_aggregate), - BeaconBlockBodyRef::Merge(inner) => Some(&inner.sync_aggregate), - } - } - - /// Access the execution payload from the block's body, if one exists. - pub fn execution_payload(self) -> Option<&'a ExecutionPayload> { - match self { - BeaconBlockBodyRef::Base(_) => None, - BeaconBlockBodyRef::Altair(_) => None, - BeaconBlockBodyRef::Merge(inner) => Some(&inner.execution_payload), - } - } - /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 68a5175a91b..f191eb86710 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -132,12 +132,12 @@ pub struct ChainSpec { /* * Merge hard fork params */ - pub inactivity_penalty_quotient_merge: u64, - pub min_slashing_penalty_quotient_merge: u64, - pub proportional_slashing_multiplier_merge: u64, - pub merge_fork_version: [u8; 4], + pub inactivity_penalty_quotient_bellatrix: u64, + pub min_slashing_penalty_quotient_bellatrix: u64, + pub proportional_slashing_multiplier_bellatrix: u64, + pub bellatrix_fork_version: [u8; 4], /// The Merge fork epoch is optional, with `None` representing "Merge never happens". - pub merge_fork_epoch: Option, + pub bellatrix_fork_epoch: Option, pub terminal_total_difficulty: Uint256, pub terminal_block_hash: Hash256, pub terminal_block_hash_activation_epoch: Epoch, @@ -217,7 +217,7 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.merge_fork_epoch { + match self.bellatrix_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, _ => match self.altair_fork_epoch { Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, @@ -231,7 +231,7 @@ impl ChainSpec { match fork_name { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, - ForkName::Merge => self.merge_fork_version, + ForkName::Merge => self.bellatrix_fork_version, } } @@ -240,7 +240,7 @@ impl ChainSpec { match fork_name { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, - ForkName::Merge => self.merge_fork_epoch, + ForkName::Merge => self.bellatrix_fork_epoch, } } @@ -249,7 +249,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.inactivity_penalty_quotient, BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, - BeaconState::Merge(_) => self.inactivity_penalty_quotient_merge, + BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, } } @@ -261,7 +261,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.proportional_slashing_multiplier, BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, - BeaconState::Merge(_) => self.proportional_slashing_multiplier_merge, + BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, } } @@ -273,7 +273,7 @@ impl ChainSpec { match state { BeaconState::Base(_) => self.min_slashing_penalty_quotient, BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, - BeaconState::Merge(_) => self.min_slashing_penalty_quotient_merge, + BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -526,13 +526,13 @@ impl ChainSpec { /* * Merge hard fork params */ - inactivity_penalty_quotient_merge: u64::checked_pow(2, 24) + inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) .expect("pow does not overflow"), - min_slashing_penalty_quotient_merge: u64::checked_pow(2, 5) + min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) .expect("pow does not overflow"), - proportional_slashing_multiplier_merge: 3, - merge_fork_version: [0x02, 0x00, 0x00, 0x00], - merge_fork_epoch: None, + proportional_slashing_multiplier_bellatrix: 3, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], + bellatrix_fork_epoch: None, terminal_total_difficulty: Uint256::MAX .checked_sub(Uint256::from(2u64.pow(10))) .expect("subtraction does not overflow") @@ -583,8 +583,8 @@ impl ChainSpec { altair_fork_version: [0x01, 0x00, 0x00, 0x01], altair_fork_epoch: None, // Merge - merge_fork_version: [0x02, 0x00, 0x00, 0x01], - merge_fork_epoch: None, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], + bellatrix_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -611,9 +611,15 @@ pub struct Config { #[serde(default)] pub preset_base: String, + // TODO(merge): remove this default + #[serde(default = "default_terminal_total_difficulty")] #[serde(with = "eth2_serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: Hash256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -631,11 +637,15 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] - merge_fork_version: [u8; 4], + bellatrix_fork_version: [u8; 4], + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_epoch")] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] - pub merge_fork_epoch: Option>, + pub bellatrix_fork_epoch: Option>, #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, @@ -659,6 +669,7 @@ pub struct Config { #[serde(with = "eth2_serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -668,6 +679,29 @@ pub struct Config { deposit_contract_address: Address, } +fn default_bellatrix_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + +fn default_bellatrix_fork_epoch() -> Option> { + None +} + +fn default_terminal_total_difficulty() -> Uint256 { + "115792089237316195423570985008687907853269984665640564039457584007913129638912" + .parse() + .unwrap() +} + +fn default_terminal_block_hash() -> Hash256 { + Hash256::zero() +} + +fn default_terminal_block_hash_activation_epoch() -> Epoch { + Epoch::new(u64::MAX) +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -733,9 +767,9 @@ impl Config { altair_fork_epoch: spec .altair_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - merge_fork_version: spec.merge_fork_version, - merge_fork_epoch: spec - .merge_fork_epoch + bellatrix_fork_version: spec.bellatrix_fork_version, + bellatrix_fork_epoch: spec + .bellatrix_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, @@ -778,8 +812,8 @@ impl Config { genesis_delay, altair_fork_version, altair_fork_epoch, - merge_fork_epoch, - merge_fork_version, + bellatrix_fork_epoch, + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -807,8 +841,8 @@ impl Config { genesis_delay, altair_fork_version, altair_fork_epoch: altair_fork_epoch.map(|q| q.value), - merge_fork_epoch: merge_fork_epoch.map(|q| q.value), - merge_fork_version, + bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 16d36c850c3..affda1a061d 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ -use crate::{AltairPreset, BasePreset, ChainSpec, Config, EthSpec}; +use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; use serde_derive::{Deserialize, Serialize}; +use serde_json::Value; use std::collections::HashMap; /// Fusion of a runtime-config with the compile-time preset values. @@ -14,10 +15,12 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - + // TODO(merge): re-enable + // #[serde(flatten)] + // pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] - pub extra_fields: HashMap, + pub extra_fields: HashMap, } impl ConfigAndPreset { @@ -25,6 +28,8 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + // TODO(merge): re-enable + let _bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = HashMap::new(); Self { @@ -79,7 +84,7 @@ impl ConfigAndPreset { ), ]; for (key, value) in fields { - self.extra_fields.insert(key.to_uppercase(), value); + self.extra_fields.insert(key.to_uppercase(), value.into()); } } } @@ -103,8 +108,13 @@ mod test { let mut yamlconfig = ConfigAndPreset::from_chain_spec::(&mainnet_spec); let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); + let (k3, v3) = ("SAMPLE_HARDFORK_KEY3", 32); + let (k4, v4) = ("SAMPLE_HARDFORK_KEY4", Value::Null); yamlconfig.extra_fields.insert(k1.into(), v1.into()); yamlconfig.extra_fields.insert(k2.into(), v2.into()); + yamlconfig.extra_fields.insert(k3.into(), v3.into()); + yamlconfig.extra_fields.insert(k4.into(), v4); + serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); let reader = OpenOptions::new() diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index 4b201360abf..a347cf675cf 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -12,7 +12,9 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Deposit { pub proof: FixedVector, pub data: DepositData, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d984f168f1b..6c5444e110f 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -11,7 +11,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 1b29fb34f7b..2fb253f12c1 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -1,4 +1,5 @@ use crate::{test_utils::TestRandom, *}; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -9,8 +10,9 @@ pub type Transaction = VariableList; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, )] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct ExecutionPayload { pub parent_hash: Hash256, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 88a2f31264b..52b9294c8ca 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -36,11 +36,14 @@ impl ForkContext { } // Only add Merge to list of forks if it's enabled - // Note: `merge_fork_epoch == None` implies merge hasn't been activated yet on the config. - if spec.merge_fork_epoch.is_some() { + // Note: `bellatrix_fork_epoch == None` implies merge hasn't been activated yet on the config. + if spec.bellatrix_fork_epoch.is_some() { fork_to_digest.push(( ForkName::Merge, - ChainSpec::compute_fork_digest(spec.merge_fork_version, genesis_validators_root), + ChainSpec::compute_fork_digest( + spec.bellatrix_fork_version, + genesis_validators_root, + ), )); } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 54cc7a2451e..4a2e7620874 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -25,17 +25,17 @@ impl ForkName { match self { ForkName::Base => { spec.altair_fork_epoch = None; - spec.merge_fork_epoch = None; + spec.bellatrix_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.merge_fork_epoch = None; + spec.bellatrix_fork_epoch = None; spec } ForkName::Merge => { spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.merge_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } } @@ -112,7 +112,7 @@ impl FromStr for ForkName { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, - "merge" => ForkName::Merge, + "bellatrix" | "merge" => ForkName::Merge, _ => return Err(()), }) } @@ -123,7 +123,7 @@ impl Display for ForkName { match self { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), - ForkName::Merge => "merge".fmt(f), + ForkName::Merge => "bellatrix".fmt(f), } } } @@ -181,4 +181,11 @@ mod test { assert_eq!(ForkName::from_str("NO_NAME"), Err(())); assert_eq!(ForkName::from_str("no_name"), Err(())); } + + #[test] + fn fork_name_bellatrix_or_merge() { + assert_eq!(ForkName::from_str("bellatrix"), Ok(ForkName::Merge)); + assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); + assert_eq!(ForkName::Merge.to_string(), "bellatrix"); + } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index cecd6c2018e..f5f74b601b0 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -12,7 +12,7 @@ use tree_hash::TreeHash; pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. -#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5b1d3707ae8..5e27b667481 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -125,7 +125,7 @@ pub use crate::indexed_attestation::IndexedAttestation; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 030c1234059..ccda1a06a06 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -150,6 +150,40 @@ impl AltairPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct BellatrixPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub inactivity_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proportional_slashing_multiplier_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bytes_per_transaction: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_transactions_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub bytes_per_logs_bloom: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_extra_data_bytes: u64, +} + +impl BellatrixPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + inactivity_penalty_quotient_bellatrix: spec.inactivity_penalty_quotient_bellatrix, + min_slashing_penalty_quotient_bellatrix: spec.min_slashing_penalty_quotient_bellatrix, + proportional_slashing_multiplier_bellatrix: spec + .proportional_slashing_multiplier_bellatrix, + max_bytes_per_transaction: T::max_bytes_per_transaction() as u64, + max_transactions_per_payload: T::max_transactions_per_payload() as u64, + bytes_per_logs_bloom: T::bytes_per_logs_bloom() as u64, + max_extra_data_bytes: T::max_extra_data_bytes() as u64, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -182,6 +216,9 @@ mod test { let altair: AltairPreset = preset_from_file(&preset_name, "altair.yaml"); assert_eq!(altair, AltairPreset::from_chain_spec::(&spec)); + + let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); + assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 383805f97fb..8d7df0cb02c 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,5 +1,6 @@ use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; @@ -41,19 +42,21 @@ impl From for Hash256 { variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, - TreeHash + TreeHash, + Derivative, ), + derivative(PartialEq, Hash(bound = "E: EthSpec")), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), serde(bound = "E: EthSpec") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index df7888ec25b..dc786beb6e9 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,11 +2,8 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -15,26 +12,13 @@ use tree_hash_derive::TreeHash; /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] -#[derivative(PartialEq, Eq)] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, pub signature: Signature, } -/// Implementation of non-crypto-secure `Hash`, for use with `HashMap` and `HashSet`. -/// -/// Guarantees `header1 == header2 -> hash(header1) == hash(header2)`. -/// -/// Used in the slasher. -impl Hash for SignedBeaconBlockHeader { - fn hash(&self, state: &mut H) { - self.message.hash(state); - self.signature.as_ssz_bytes().hash(state); - } -} - impl SignedBeaconBlockHeader { /// Verify that this block header was signed by `pubkey`. pub fn verify_signature( diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 49a9b534559..69f0e6e2c9f 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -10,7 +10,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, pub signature: Signature, diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 781c67374eb..2292b021118 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -1,6 +1,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; +use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -20,7 +21,10 @@ impl From for Error { } #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 66d2f009479..cc10632d07c 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -12,7 +12,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 2001de042b4..fdb59626fb2 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -9,6 +9,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -264,6 +265,18 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +/// Hashes the `self.serialize()` bytes. +#[allow(clippy::derive_hash_xor_eq)] +impl Hash for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl fmt::Display for GenericAggregateSignature where Sig: TSignature, diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index f3aeeb5598f..10ef75fc680 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -7,6 +7,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -145,6 +146,13 @@ impl> TreeHash for GenericSignature> Hash for GenericSignature { + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl> fmt::Display for GenericSignature { impl_display!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index b5c0284971c..aa33c90d0c3 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -9,6 +9,7 @@ use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -84,6 +85,12 @@ impl PartialEq for GenericSignatureBytes { } } +impl Hash for GenericSignatureBytes { + fn hash(&self, hasher: &mut H) { + self.bytes.hash(hasher); + } +} + /// Serializes the `GenericSignature` in compressed form, storing the bytes in the newly created `Self`. impl From> for GenericSignatureBytes where diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 35582df380e..f2d8b79b986 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -113,6 +113,14 @@ impl PartialEq for Signature { } } +impl Eq for Signature {} + +impl std::hash::Hash for Signature { + fn hash(&self, hasher: &mut H) { + self.0.hash(hasher); + } +} + #[derive(Clone)] pub struct AggregateSignature([u8; SIGNATURE_BYTES_LEN]); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index af58d5e8c4d..2b9541de3f6 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.0.1" +version = "2.1.1" authors = ["Paul Hauner "] edition = "2018" diff --git a/lcli/Dockerfile b/lcli/Dockerfile index bddf39a43ae..5a4177ead90 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.53.0 AS builder +FROM rust:1.56.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs index 45452735dc0..87175ace892 100644 --- a/lcli/src/etl/block_efficiency.rs +++ b/lcli/src/etl/block_efficiency.rs @@ -274,6 +274,9 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { // Add them to the set. included_attestations_set.extend(attestations_in_block.clone()); + // Remove expired available attestations. + available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); + // Don't write data from the initialization epoch. if epoch != initialization_epoch { let included = attestations_in_block.len(); @@ -309,9 +312,6 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { } } } - - // Remove expired available attestations. - available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); } let mut offline = "None".to_string(); diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index cb65bb4380f..6f39392d121 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -39,7 +39,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 630d65963a0..83dcc2e7198 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -63,7 +63,7 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul } if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? { - spec.merge_fork_epoch = Some(fork_epoch); + spec.bellatrix_fork_epoch = Some(fork_epoch); } let genesis_state_bytes = if matches.is_present("interop-genesis-state") { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9725155e9c3..130322e0e9c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.0.1" +version = "2.1.1" authors = ["Sigma Prime "] edition = "2018" autotests = false diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index ac5403efdbf..8424a2fdc34 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -33,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 18446744073709551615 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 73d5a20657d..6d03cafe10b 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,7 +11,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Checkpoint, Epoch, Hash256}; +use types::{Address, Checkpoint, Epoch, Hash256}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -206,6 +206,24 @@ fn eth1_purge_cache_flag() { .with_config(|config| assert!(config.eth1.purge_cache)); } +// Tests for Merge flags. +#[test] +fn merge_fee_recipient_flag() { + CommandLineTest::new() + .flag("merge", None) + .flag( + "fee-recipient", + Some("0x00000000219ab540356cbb839cbe05303d7705fa"), + ) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.suggested_fee_recipient, + Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) + ) + }); +} + // Tests for Network flags. #[test] fn network_dir_flag() { diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index ac23002c376..7b3c3acb3ca 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -139,9 +139,25 @@ fn enr_port_flag() { }) } -// TODO add tests for flags `enable-enr-auto-update` and `disable-packet-filter`. -// -// These options end up in `Discv5Config`, which doesn't support serde (de)serialization. +#[test] +fn disable_packet_filter_flag() { + CommandLineTest::new() + .flag("disable-packet-filter", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.disable_packet_filter, true); + }); +} + +#[test] +fn enable_enr_auto_update_flag() { + CommandLineTest::new() + .flag("enable-enr-auto-update", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.enable_enr_auto_update, true); + }); +} #[test] fn network_dir_flag() { diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 883c6660294..8151aac2490 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -4,6 +4,8 @@ # Starts a beacon node based upon a genesis state created by `./setup.sh`. # +set -Eeuo pipefail + source ./vars.env SUBSCRIBE_ALL_SUBNETS= diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh index bef207a6947..ca02a24140f 100755 --- a/scripts/local_testnet/bootnode.sh +++ b/scripts/local_testnet/bootnode.sh @@ -5,6 +5,8 @@ # Starts a bootnode from the generated enr. # +set -Eeuo pipefail + source ./vars.env echo "Generating bootnode enr" diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh index bc4db74c619..b01b1a2dffb 100755 --- a/scripts/local_testnet/clean.sh +++ b/scripts/local_testnet/clean.sh @@ -4,6 +4,8 @@ # Deletes all files associated with the local testnet. # +set -Eeuo pipefail + source ./vars.env if [ -d $DATADIR ]; then diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 762700dbd63..69edc1e7704 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -Eeuo pipefail + source ./vars.env exec ganache-cli \ diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index c729a1645ae..4f52a5f256e 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Kill processes +set -Eeuo pipefail + # First parameter is the file with # one pid per line. if [ -f "$1" ]; then diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/print_logs.sh new file mode 100755 index 00000000000..2a9e7822a6f --- /dev/null +++ b/scripts/local_testnet/print_logs.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Print the tail of all the logs output from local testnet + +set -Eeuo pipefail + +source ./vars.env + +for f in "$TESTNET_DIR"/*.log +do + [[ -e "$f" ]] || break # handle the case of no *.log files + echo "=============================================================================" + echo "$f" + echo "=============================================================================" + tail "$f" + echo "" +done diff --git a/scripts/local_testnet/reset_genesis_time.sh b/scripts/local_testnet/reset_genesis_time.sh index c7332e327ed..68c8fb6b4cb 100755 --- a/scripts/local_testnet/reset_genesis_time.sh +++ b/scripts/local_testnet/reset_genesis_time.sh @@ -4,6 +4,8 @@ # Resets the beacon state genesis time to now. # +set -Eeuo pipefail + source ./vars.env NOW=$(date +%s) diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index cdae9b2ba2d..7126e4c5dc0 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Start all processes necessary to create a local testnet +set -Eeuo pipefail + source ./vars.env # VC_COUNT is defaulted in vars.env @@ -49,7 +51,7 @@ for (( bn=1; bn<=$BN_COUNT; bn++ )); do done for (( vc=1; vc<=$VC_COUNT; vc++ )); do touch $LOG_DIR/validator_node_$vc.log -done +done # Sleep with a message sleeping() { @@ -67,7 +69,7 @@ execute_command() { EX_NAME=$2 shift shift - CMD="$EX_NAME $@ &>> $LOG_DIR/$LOG_NAME" + CMD="$EX_NAME $@ >> $LOG_DIR/$LOG_NAME 2>&1" echo "executing: $CMD" echo "$CMD" > "$LOG_DIR/$LOG_NAME" eval "$CMD &" @@ -89,7 +91,7 @@ execute_command_add_PID() { # Delay to let ganache-cli to get started execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh -sleeping 2 +sleeping 10 # Delay to get data setup execute_command setup.log ./setup.sh diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index 47f390ba766..b1c3188ee3a 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Stop all processes that were started with start_local_testnet.sh +set -Eeuo pipefail + source ./vars.env PID_FILE=$TESTNET_DIR/PIDS.pid diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 6755384be59..5aa75dfe2d0 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -6,6 +6,8 @@ # # Usage: ./validator_client.sh +set -Eeuo pipefail + source ./vars.env DEBUG_LEVEL=${3:-info} diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index f88e9eb716e..208fbb6d856 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -43,3 +43,6 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 + +# Command line arguments for validator client +VC_ARGS="" diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 01beda7e9c0..c319c2de1a2 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -14,7 +14,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } mdbx = { package = "libmdbx", version = "0.1.0" } -lru = "0.6.6" +lru = "0.7.1" parking_lot = "0.11.0" rand = "0.7.3" safe_arith = { path = "../consensus/safe_arith" } diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar new file mode 100644 index 00000000000..d9084af3480 --- /dev/null +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -0,0 +1,26 @@ +FROM rust:1.56.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +COPY . lighthouse + +# build lighthouse directly with a cargo build command, bypassing the makefile +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse + +# build lcli binary directly with cargo install command, bypassing the makefile +RUN cargo install --path /lighthouse/lcli --force --locked + +FROM ubuntu:latest +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + libssl-dev \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# create and move the libvoidstar file +RUN mkdir libvoidstar +COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so + +# set the env variable to avoid having to always set it +ENV LD_LIBRARY_PATH=/usr/lib +# move the lighthouse binary and lcli binary +COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so new file mode 100644 index 00000000000..0f8a0f23c3f Binary files /dev/null and b/testing/antithesis/libvoidstar/libvoidstar.so differ diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 8c2a0f10e35..3cd6d17c0c4 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.6 +TESTS_TAG := v1.1.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index ce9e1d6b4ed..2eb4ce5407c 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -30,18 +30,11 @@ # LightClientUpdate "tests/.*/.*/ssz_static/LightClientUpdate", # LightClientSnapshot - "tests/minimal/altair/ssz_static/LightClientSnapshot", - "tests/mainnet/altair/ssz_static/LightClientSnapshot", - "tests/minimal/merge/ssz_static/LightClientSnapshot", - "tests/mainnet/merge/ssz_static/LightClientSnapshot", + "tests/.*/.*/ssz_static/LightClientSnapshot", # Merkle-proof tests for light clients - "tests/mainnet/altair/merkle/single_proof", - "tests/minimal/altair/merkle/single_proof", - "tests/mainnet/merge/merkle/single_proof", - "tests/minimal/merge/merkle/single_proof", - # FIXME(merge): Merge transition tests are now available but not yet passing - "tests/mainnet/merge/transition/", - "tests/minimal/merge/transition/", + "tests/.*/.*/merkle/single_proof", + # One of the EF researchers likes to pack the tarballs on a Mac + ".*\.DS_Store.*" ] def normalize_path(path): diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 868e4a0c5ae..ae12447abf3 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::upgrade_to_altair; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -49,10 +49,7 @@ impl Case for ForkTest { fn is_enabled_for_fork(fork_name: ForkName) -> bool { // Upgrades exist targeting all forks except phase0/base. // Fork tests also need BLS. - // FIXME(merge): enable merge tests once available - cfg!(not(feature = "fake_crypto")) - && fork_name != ForkName::Base - && fork_name != ForkName::Merge + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { @@ -61,8 +58,9 @@ impl Case for ForkTest { let spec = &E::default_spec(); let mut result = match fork_name { + ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), - _ => panic!("unknown fork: {:?}", fork_name), + ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ecdfebc2863..608429a9cb2 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -154,15 +154,10 @@ impl Case for ForkChoiceTest { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let tester = Tester::new(self, fork_choice_spec::(fork_name))?; - // TODO(merge): enable these tests before production. - // This test will fail until this PR is merged and released: - // - // https://github.com/ethereum/consensus-specs/pull/2760 - if self.description == "shorter_chain_but_heavier_weight" - // This test is skipped until we can do retrospective confirmations of the terminal - // block after an optimistic sync. - || self.description == "block_lookup_failed" - { + // TODO(merge): re-enable this test before production. + // This test is skipped until we can do retrospective confirmations of the terminal + // block after an optimistic sync. + if self.description == "block_lookup_failed" { return Err(Error::SkippedKnownFailure); }; diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index d833846e471..195df7f3822 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -239,7 +239,6 @@ impl Operation for ExecutionPayload { spec: &ChainSpec, extra: &Operations, ) -> Result<(), BlockProcessingError> { - // FIXME(merge): we may want to plumb the validity bool into state processing let valid = extra .execution_metadata .as_ref() diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 8e6ba226731..d2b1bb2c624 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -39,7 +39,8 @@ impl LoadCase for TransitionTest { spec.altair_fork_epoch = Some(metadata.fork_epoch); } ForkName::Merge => { - spec.merge_fork_epoch = Some(metadata.fork_epoch); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); } } @@ -73,10 +74,7 @@ impl Case for TransitionTest { fn is_enabled_for_fork(fork_name: ForkName) -> bool { // Upgrades exist targeting all forks except phase0/base. // Transition tests also need BLS. - // FIXME(merge): Merge transition tests are now available but not yet passing - cfg!(not(feature = "fake_crypto")) - && fork_name != ForkName::Base - && fork_name != ForkName::Merge + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index a1d5b0916df..636119cdbaf 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -2,7 +2,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name; use crate::type_name::TypeName; use derivative::Derivative; -use std::fs; +use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; @@ -31,30 +31,27 @@ pub trait Handler { } fn run_for_fork(&self, fork_name: ForkName) { - let fork_name_str = match fork_name { - ForkName::Base => "phase0", - ForkName::Altair => "altair", - ForkName::Merge => "merge", - }; + let fork_name_str = fork_name.to_string(); let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("consensus-spec-tests") .join("tests") .join(Self::config_name()) - .join(fork_name_str) + .join(&fork_name_str) .join(Self::runner_name()) .join(self.handler_name()); // Iterate through test suites + let as_directory = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + }; let test_cases = fs::read_dir(&handler_path) .expect("handler dir exists") - .flat_map(|entry| { - entry - .ok() - .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) - }) + .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) - .flat_map(Result::ok) + .filter_map(as_directory) .map(|test_case_dir| { let path = test_case_dir.path(); let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); @@ -439,63 +436,21 @@ impl Handler for FinalityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceGetHeadHandler(PhantomData); - -impl Handler for ForkChoiceGetHeadHandler { - type Case = cases::ForkChoiceTest; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "fork_choice" - } - - fn handler_name(&self) -> String { - "get_head".into() - } - - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // These tests check block validity (which may include signatures) and there is no need to - // run them with fake crypto. - cfg!(not(feature = "fake_crypto")) - } +pub struct ForkChoiceHandler { + handler_name: String, + _phantom: PhantomData, } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceOnBlockHandler(PhantomData); - -impl Handler for ForkChoiceOnBlockHandler { - type Case = cases::ForkChoiceTest; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "fork_choice" - } - - fn handler_name(&self) -> String { - "on_block".into() - } - - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // These tests check block validity (which may include signatures) and there is no need to - // run them with fake crypto. - cfg!(not(feature = "fake_crypto")) +impl ForkChoiceHandler { + pub fn new(handler_name: &str) -> Self { + Self { + handler_name: handler_name.into(), + _phantom: PhantomData, + } } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceOnMergeBlockHandler(PhantomData); - -impl Handler for ForkChoiceOnMergeBlockHandler { +impl Handler for ForkChoiceHandler { type Case = cases::ForkChoiceTest; fn config_name() -> &'static str { @@ -507,15 +462,20 @@ impl Handler for ForkChoiceOnMergeBlockHandler { } fn handler_name(&self) -> String { - "on_merge_block".into() + self.handler_name.clone() } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Merge block tests are only enabled for Bellatrix or later. + if self.handler_name == "on_merge_block" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) - // These tests only exist for the merge. - && fork_name == ForkName::Merge } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 2201bc5ee86..bdefec0014d 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -413,20 +413,26 @@ fn finality() { #[test] fn fork_choice_get_head() { - ForkChoiceGetHeadHandler::::default().run(); - ForkChoiceGetHeadHandler::::default().run(); + ForkChoiceHandler::::new("get_head").run(); + ForkChoiceHandler::::new("get_head").run(); } #[test] fn fork_choice_on_block() { - ForkChoiceOnBlockHandler::::default().run(); - ForkChoiceOnBlockHandler::::default().run(); + ForkChoiceHandler::::new("on_block").run(); + ForkChoiceHandler::::new("on_block").run(); } #[test] fn fork_choice_on_merge_block() { - ForkChoiceOnMergeBlockHandler::::default().run(); - ForkChoiceOnMergeBlockHandler::::default().run(); + ForkChoiceHandler::::new("on_merge_block").run(); + ForkChoiceHandler::::new("on_merge_block").run(); +} + +#[test] +fn fork_choice_ex_ante() { + ForkChoiceHandler::::new("ex_ante").run(); + ForkChoiceHandler::::new("ex_ante").run(); } #[test] diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 42bf61384db..7ff387b9c6a 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -193,7 +193,7 @@ pub async fn verify_full_sync_aggregates_up_to( .map(|agg| agg.num_set_bits()) }) .map_err(|e| format!("Error while getting beacon block: {:?}", e))? - .ok_or(format!("Altair block {} should have sync aggregate", slot))?; + .map_err(|_| format!("Altair block {} should have sync aggregate", slot))?; if sync_aggregate_count != E::sync_committee_size() { return Err(format!( diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 9cfe0ab4ea3..634e49feea1 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -15,7 +15,11 @@ serde_derive = "1.0.116" serde_json = "1.0.58" eth2_serde_utils = "0.1.1" filesystem = { path = "../../common/filesystem" } +arbitrary = { version = "1.0", features = ["derive"], optional = true } [dev-dependencies] lazy_static = "1.4.0" rayon = "1.4.1" + +[features] +arbitrary-fuzz = ["arbitrary", "types/arbitrary-fuzz"] diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index 57875902604..e3d935b4c98 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -1,8 +1,8 @@ -TESTS_TAG := v5.2.0 +TESTS_TAG := v5.2.1 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz -ARCHIVE_URL := https://github.com/eth2-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) +ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index 2bca9727afc..b96dd8eb796 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -224,6 +224,19 @@ fn main() { .with_blocks(vec![(0, 20, false)]), ], ), + MultiTestCase::new( + "multiple_interchanges_single_validator_multiple_blocks_out_of_order", + vec![ + TestCase::new(interchange(vec![(0, vec![0], vec![])])).with_blocks(vec![ + (0, 10, true), + (0, 20, true), + (0, 30, true), + ]), + TestCase::new(interchange(vec![(0, vec![20], vec![])])) + .contains_slashable_data() + .with_blocks(vec![(0, 29, false)]), + ], + ), MultiTestCase::new( "multiple_interchanges_single_validator_fail_iff_imported", vec![ diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index a9185e5bb24..3793766b6aa 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -7,6 +7,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -15,6 +16,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -23,6 +25,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -32,6 +35,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -42,6 +46,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index 6bd6ce38b3f..dc828773b9c 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -9,6 +9,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -16,6 +17,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -25,6 +27,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -33,6 +36,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, @@ -230,7 +234,7 @@ impl TestCase { } } -fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { +pub fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { // Metadata should be unchanged. assert_eq!(interchange.metadata, minified.metadata); diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 725aa6057dd..2b187f46eff 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -648,29 +648,17 @@ impl SlashingDatabase { // Summary of minimum and maximum messages pre-import. let prev_summary = self.validator_summary(pubkey, txn)?; - // If the interchange contains a new maximum slot block, import it. + // If the interchange contains any blocks, update the database with the new max slot. let max_block = record.signed_blocks.iter().max_by_key(|b| b.slot); if let Some(max_block) = max_block { - // Block is relevant if there are no previous blocks, or new block has slot greater than - // previous maximum. - if prev_summary - .max_block_slot - .map_or(true, |max_block_slot| max_block.slot > max_block_slot) - { - self.insert_block_proposal( - txn, - pubkey, - max_block.slot, - max_block - .signing_root - .map(SigningRoot::from) - .unwrap_or_default(), - )?; - - // Prune the database so that it contains *only* the new block. - self.prune_signed_blocks(&record.pubkey, max_block.slot, txn)?; - } + // Store new synthetic block with maximum slot and null signing root. Remove all other + // blocks. + let new_max_slot = max_or(prev_summary.max_block_slot, max_block.slot); + let signing_root = SigningRoot::default(); + + self.clear_signed_blocks(pubkey, txn)?; + self.insert_block_proposal(txn, pubkey, new_max_slot, signing_root)?; } // Find the attestations with max source and max target. Unless the input contains slashable @@ -901,6 +889,23 @@ impl SlashingDatabase { Ok(()) } + /// Remove all blocks signed by a given `public_key`. + /// + /// Dangerous, should only be used immediately before inserting a new block in the same + /// transacation. + fn clear_signed_blocks( + &self, + public_key: &PublicKeyBytes, + txn: &Transaction, + ) -> Result<(), NotSafe> { + let validator_id = self.get_validator_id_in_txn(txn, public_key)?; + txn.execute( + "DELETE FROM signed_blocks WHERE validator_id = ?1", + params![validator_id], + )?; + Ok(()) + } + /// Prune the signed attestations table for the given validator keys. pub fn prune_all_signed_attestations<'a>( &self, diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 487b5744d07..18780c3092c 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -253,22 +253,19 @@ impl CandidateBeaconNode { "our_genesis_fork" => ?spec.genesis_fork_version, ); return Err(CandidateError::Incompatible); - } else if *spec != beacon_node_spec { + } else if beacon_node_spec.altair_fork_epoch != spec.altair_fork_epoch { warn!( log, - "Beacon node config does not match exactly"; + "Beacon node has mismatched Altair fork epoch"; "endpoint" => %self.beacon_node, - "advice" => "check that the BN is updated and configured for any upcoming forks", + "endpoint_altair_fork_epoch" => ?beacon_node_spec.altair_fork_epoch, ); - debug!( - log, - "Beacon node config"; - "config" => ?beacon_node_spec, - ); - debug!( + } else if beacon_node_spec.bellatrix_fork_epoch != spec.bellatrix_fork_epoch { + warn!( log, - "Our config"; - "config" => ?spec, + "Beacon node has mismatched Bellatrix fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_bellatrix_fork_epoch" => ?beacon_node_spec.bellatrix_fork_epoch, ); } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 57585e2672f..72e651f7d18 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -282,10 +282,7 @@ pub fn load_pem_certificate>(pem_path: P) -> Result Result { - Url::parse(base_url)?.join(&format!( - "api/v1/eth2/sign/{}", - voting_public_key.to_string() - )) + Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`.