diff --git a/.buildkite/hooks/post-checkout b/.buildkite/hooks/post-checkout index a36d2574ebbfad..f41f238e0e6414 100644 --- a/.buildkite/hooks/post-checkout +++ b/.buildkite/hooks/post-checkout @@ -38,10 +38,3 @@ source ci/env.sh kill -9 "$victim" || true done ) - -# HACK: These are in our docker images, need to be removed from CARGO_HOME -# because we try to cache downloads across builds with CARGO_HOME -# cargo lacks a facility for "system" tooling, always tries CARGO_HOME first -cargo uninstall cargo-audit &>/dev/null || true -cargo uninstall svgbob_cli &>/dev/null || true -cargo uninstall mdbook &>/dev/null || true diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 025b228f8579de..4c798a83d36889 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -28,5 +28,11 @@ fi export SBF_TOOLS_VERSION -SCCACHE_S3_KEY_PREFIX="${rust_stable}_${rust_nightly}_${SBF_TOOLS_VERSION}" +SCCACHE_KEY_PREFIX="${rust_stable}_${rust_nightly}_${SBF_TOOLS_VERSION}" +export SCCACHE_KEY_PREFIX + +SCCACHE_S3_KEY_PREFIX="$SCCACHE_KEY_PREFIX" export SCCACHE_S3_KEY_PREFIX + +SCCACHE_GCS_KEY_PREFIX="$SCCACHE_KEY_PREFIX" +export SCCACHE_GCS_KEY_PREFIX diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index f0ecfb20accb4e..6afd398f43accb 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -43,6 +43,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | @@ -90,6 +92,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | @@ -139,6 +143,8 @@ jobs: .github/scripts/purge-ubuntu-runner.sh - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" - shell: bash run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index a99a5ffe0045a1..4fe1b4fc2ae902 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Release channels have their own copy of this changelog: ## [1.18.0] - Unreleased * Changes * Added a github check to support `changelog` label + * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) * Upgrade Notes ## [1.17.0] diff --git a/Cargo.lock b/Cargo.lock index 2f33fe8a6b8b6f..519388ed1be9eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,9 +75,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -167,6 +167,20 @@ version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.5.0" @@ -436,7 +450,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -590,7 +604,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -616,9 +630,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" dependencies = [ "serde", ] @@ -864,9 +878,9 @@ dependencies = [ [[package]] name = "bytecount" -version = "0.6.4" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" @@ -1464,7 +1478,7 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "3.2.1" -source = "git+https://github.com/solana-labs/curve25519-dalek.git?rev=c14774464c4d38de553c6ef2f48a10982c1b4801#c14774464c4d38de553c6ef2f48a10982c1b4801" +source = "git+https://github.com/solana-labs/curve25519-dalek.git?rev=b500cdc2a920cd5bff9e2dd974d7b97349d61464#b500cdc2a920cd5bff9e2dd974d7b97349d61464" dependencies = [ "byteorder", "digest 0.9.0", @@ -1495,7 +1509,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1506,7 +1520,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1690,7 +1704,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1699,6 +1713,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "eager" version = "0.1.0" @@ -1790,7 +1810,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1984,6 +2004,12 @@ dependencies = [ "percent-encoding 2.3.0", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "fs-err" version = "2.9.0" @@ -2010,9 +2036,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -2025,9 +2051,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -2035,15 +2061,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -2053,38 +2079,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2226,7 +2252,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.28", + "futures 0.3.29", "log", "reqwest", "serde", @@ -2307,7 +2333,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", ] [[package]] @@ -2487,7 +2513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "headers", "http", "hyper", @@ -2599,11 +2625,30 @@ dependencies = [ "version_check", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "index_list" -version = "0.2.7" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" +checksum = "70891286cb8e844fdfcf1178b47569699f9e20b5ecc4b45a6240a64771444638" [[package]] name = "indexmap" @@ -2617,9 +2662,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -2716,7 +2761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2734,7 +2779,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "futures-executor", "futures-util", "log", @@ -2749,7 +2794,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-client-transports", ] @@ -2771,7 +2816,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2787,7 +2832,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2802,7 +2847,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "lazy_static", "log", @@ -2818,7 +2863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "globset", "jsonrpc-core", "lazy_static", @@ -2955,9 +3000,9 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949bdd22e4ed93481d45e9a6badb34b99132bcad0c8a8d4f05c42f7dcc7b90bc" +checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" dependencies = [ "ark-bn254", "ark-ff", @@ -2972,9 +3017,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -3137,6 +3182,33 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mockall" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -3212,7 +3284,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "libc", ] @@ -3299,7 +3371,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3375,11 +3447,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" dependencies = [ - "num_enum_derive 0.7.0", + "num_enum_derive 0.7.1", ] [[package]] @@ -3403,19 +3475,19 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "num_enum_derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3477,11 +3549,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3509,18 +3581,18 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.92" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", @@ -3583,7 +3655,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "libc", "log", "rand 0.7.3", @@ -3922,7 +3994,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3991,7 +4063,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.3.3", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand 0.8.5", @@ -4091,7 +4163,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4125,7 +4197,7 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "rustls-native-certs", @@ -4311,7 +4383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", "yasna", ] @@ -4343,6 +4415,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.0" @@ -4464,11 +4545,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.2", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -4550,11 +4645,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -4563,12 +4658,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.3", "rustls-webpki", "sct", ] @@ -4605,12 +4700,12 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.3", + "untrusted 0.9.0", ] [[package]] @@ -4688,8 +4783,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -4753,9 +4848,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -4771,13 +4866,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4831,7 +4926,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4852,7 +4947,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "itoa", "ryu", "serde", @@ -4866,7 +4961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" dependencies = [ "dashmap", - "futures 0.3.28", + "futures 0.3.29", "lazy_static", "log", "parking_lot 0.12.1", @@ -4881,7 +4976,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5101,7 +5196,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.28", + "futures 0.3.29", "httparse", "log", "rand 0.8.5", @@ -5207,7 +5302,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -5240,6 +5335,7 @@ dependencies = [ "strum_macros", "tar", "tempfile", + "test-case", "thiserror", ] @@ -5302,7 +5398,7 @@ name = "solana-banks-client" version = "1.18.0" dependencies = [ "borsh 0.10.3", - "futures 0.3.28", + "futures 0.3.29", "solana-banks-interface", "solana-banks-server", "solana-program", @@ -5329,7 +5425,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -5451,7 +5547,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.0", + "num_enum 0.7.1", "rand 0.8.5", "rayon", "solana-logger", @@ -5670,9 +5766,9 @@ dependencies = [ "bincode", "crossbeam-channel", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "quinn", @@ -5753,7 +5849,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rand 0.8.5", @@ -5784,14 +5880,14 @@ dependencies = [ "eager", "etcd-client", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "lazy_static", "log", "lru", "min-max-heap", - "num_enum 0.7.0", + "num_enum 0.7.1", "prio-graph", "quinn", "rand 0.8.5", @@ -5980,7 +6076,7 @@ dependencies = [ name = "solana-frozen-abi" version = "1.18.0" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "block-buffer 0.10.4", "bs58", "bv", @@ -6008,7 +6104,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6092,7 +6188,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "log", "lru", @@ -6191,7 +6287,7 @@ version = "1.18.0" dependencies = [ "assert_matches", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "bs58", "byteorder", "chrono", @@ -6199,14 +6295,15 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "libc", "log", "lru", + "mockall", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -6265,7 +6362,7 @@ dependencies = [ "crossbeam-channel", "csv", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "log", @@ -6475,7 +6572,7 @@ dependencies = [ name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", "assert_matches", "bincode", "bv", @@ -6552,7 +6649,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "blake3", "borsh 0.10.3", "borsh 0.9.3", @@ -6685,7 +6782,7 @@ dependencies = [ "async-mutex", "async-trait", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "log", @@ -6802,7 +6899,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "indicatif", "jsonrpc-core", "jsonrpc-http-server", @@ -6847,7 +6944,7 @@ version = "1.18.0" dependencies = [ "anyhow", "clap 2.33.3", - "futures 0.3.28", + "futures 0.3.29", "serde_json", "solana-account-decoder", "solana-clap-utils", @@ -6889,6 +6986,7 @@ dependencies = [ name = "solana-runtime" version = "1.18.0" dependencies = [ + "aquamarine", "arrayref", "assert_matches", "base64 0.21.5", @@ -6915,11 +7013,12 @@ dependencies = [ "lz4", "memmap2", "memoffset 0.9.0", + "mockall", "modular-bitfield", "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -6976,7 +7075,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "borsh 0.10.3", "bs58", "bytemuck", @@ -6998,7 +7097,7 @@ dependencies = [ "memmap2", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -7034,7 +7133,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7095,7 +7194,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.28", + "futures 0.3.29", "goauth", "http", "hyper", @@ -7155,7 +7254,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "libc", "log", @@ -7246,7 +7345,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "pickledb", "serde", @@ -7275,7 +7374,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rayon", @@ -7348,7 +7447,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "log", "lru", @@ -7709,7 +7808,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7721,7 +7820,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", "thiserror", ] @@ -7779,7 +7878,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7821,7 +7920,7 @@ dependencies = [ "bytemuck", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "solana-program", "solana-zk-token-sdk", "spl-memo", @@ -7952,9 +8051,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -8056,7 +8155,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.28", + "futures 0.3.29", "humantime", "opentelemetry", "pin-project", @@ -8085,13 +8184,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix", "windows-sys 0.48.0", ] @@ -8130,7 +8229,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8142,7 +8241,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "test-case-core", ] @@ -8178,7 +8277,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8317,7 +8416,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -8448,7 +8547,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -8711,6 +8810,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "uriparse" version = "0.6.4" @@ -8842,7 +8947,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -8876,7 +8981,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9164,22 +9269,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c19fae0c8a9efc6a8281f2e623db8af1db9e57852e04cde3e754dd2dc29340f" +checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc56589e9ddd1f1c28d4b4b5c773ce232910a6bb67a70133d61c9e347585efe9" +checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -9199,7 +9304,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6a95693e26bc31..778b37f477db1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,8 +132,9 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" +aquamarine = "0.3.2" aes-gcm-siv = "0.10.3" -ahash = "0.8.5" +ahash = "0.8.6" anyhow = "1.0.75" ark-bn254 = "0.4.0" ark-ec = "0.4.0" @@ -157,7 +158,7 @@ borsh = "0.10.3" bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" -bytecount = "0.6.4" +bytecount = "0.6.7" bytemuck = "1.14.0" byteorder = "1.5.0" bytes = "1.5" @@ -199,8 +200,8 @@ flate2 = "1.0.28" fnv = "1.0.7" fs-err = "2.9.0" fs_extra = "1.3.0" -futures = "0.3.28" -futures-util = "0.3.28" +futures = "0.3.29" +futures-util = "0.3.29" gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } gethostname = "0.2.3" @@ -215,8 +216,8 @@ humantime = "2.0.1" hyper = "0.14.27" hyper-proxy = "0.9.1" im = "15.1.0" -index_list = "0.2.7" -indexmap = "2.0.2" +index_list = "0.2.11" +indexmap = "2.1.0" indicatif = "0.17.7" itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ @@ -235,7 +236,7 @@ lazy_static = "1.4.0" libc = "0.2.149" libloading = "0.7.4" libsecp256k1 = "0.6.0" -light-poseidon = "0.1.1" +light-poseidon = "0.1.2" log = "0.4.20" lru = "0.7.7" lz4 = "1.24.0" @@ -243,13 +244,14 @@ memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" min-max-heap = "1.3.0" +mockall = "0.11.4" modular-bitfield = "0.11.2" nix = "0.26.4" num-bigint = "0.4.4" num-derive = "0.4" num-traits = "0.2" num_cpus = "1.16.0" -num_enum = "0.7.0" +num_enum = "0.7.1" openssl = "0.10" ouroboros = "0.15.6" parking_lot = "0.12" @@ -283,12 +285,12 @@ reqwest = { version = "0.11.22", default-features = false } rolling-file = "0.2.0" rpassword = "7.2" rustc_version = "0.4" -rustls = { version = "0.21.7", default-features = false, features = ["quic"] } +rustls = { version = "0.21.8", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" seqlock = "0.2.0" -serde = "1.0.189" +serde = "1.0.192" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.107" @@ -400,7 +402,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" -tempfile = "3.8.0" +tempfile = "3.8.1" test-case = "3.2.1" thiserror = "1.0.50" tiny-bip39 = "0.8.2" @@ -449,6 +451,13 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their # dependencies in our build tree. # +# If you are developing downstream using non-crates-io solana-program (local or +# forked repo, or from github rev, eg), duplicate the following patch statements +# in your Cargo.toml. If you still hit duplicate-type errors with the patch +# statements in place, run `cargo update -p solana-program` and/or `cargo update +# -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock +# file. +# # There is a similar override in `programs/sbf/Cargo.toml`. Please keep both comments and the # overrides in sync. solana-program = { path = "sdk/program" } @@ -515,9 +524,9 @@ rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # # https://github.com/dalek-cryptography/curve25519-dalek/commit/29e5c29b0e5c6821e4586af58b0d0891dd2ec639 # -# Comparison with `c14774464c4d38de553c6ef2f48a10982c1b4801`: +# Comparison with `b500cdc2a920cd5bff9e2dd974d7b97349d61464`: # -# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:c14774464c4d38de553c6ef2f48a10982c1b4801 +# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:b500cdc2a920cd5bff9e2dd974d7b97349d61464 # # Or, using the branch name instead of the hash: # @@ -525,4 +534,4 @@ rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # [patch.crates-io.curve25519-dalek] git = "https://github.com/solana-labs/curve25519-dalek.git" -rev = "c14774464c4d38de553c6ef2f48a10982c1b4801" +rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 9905e15f5323cd..65b60ad5cfc0db 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -56,6 +56,30 @@ pub enum UiAccountData { Binary(String, UiAccountEncoding), } +impl UiAccountData { + /// Returns decoded account data in binary format if possible + pub fn decode(&self) -> Option> { + match self { + UiAccountData::Json(_) => None, + UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(), + UiAccountData::Binary(blob, encoding) => match encoding { + UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(), + UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(), + UiAccountEncoding::Base64Zstd => { + BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { + let mut data = vec![]; + zstd::stream::read::Decoder::new(zstd_data.as_slice()) + .and_then(|mut reader| reader.read_to_end(&mut data)) + .map(|_| data) + .ok() + }) + } + UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, + }, + } + } +} + #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)] #[serde(rename_all = "camelCase")] pub enum UiAccountEncoding { @@ -139,24 +163,7 @@ impl UiAccount { } pub fn decode(&self) -> Option { - let data = match &self.data { - UiAccountData::Json(_) => None, - UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(), - UiAccountData::Binary(blob, encoding) => match encoding { - UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(), - UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(), - UiAccountEncoding::Base64Zstd => { - BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { - let mut data = vec![]; - zstd::stream::read::Decoder::new(zstd_data.as_slice()) - .and_then(|mut reader| reader.read_to_end(&mut data)) - .map(|_| data) - .ok() - }) - } - UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, - }, - }?; + let data = self.data.decode()?; Some(T::create( self.lamports, data, diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b4fcceea000552..6ce4d2f087e72d 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -79,6 +79,7 @@ solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } +test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 7265626d8927e5..0ac199e6633522 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -24,7 +24,7 @@ use { itertools::Itertools, log::*, solana_program_runtime::{ - compute_budget::{self, ComputeBudget}, + compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ @@ -34,9 +34,8 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot}, feature_set::{ - self, add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_congestion_multiplier_from_fee_calculation, simplify_writable_program_account_check, FeatureSet, }, fee::FeeStructure, @@ -246,15 +245,16 @@ impl Accounts { feature_set: &FeatureSet, ) -> Result> { if feature_set.is_active(&feature_set::cap_transaction_accounts_data_size::id()) { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - let _process_transaction_result = compute_budget.process_instructions( + let compute_budget_limits = process_compute_budget_instructions( tx.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); + feature_set, + ) + .unwrap_or_default(); // sanitize against setting size limit to zero - NonZeroUsize::new(compute_budget.loaded_accounts_data_size_limit).map_or( + NonZeroUsize::new( + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), + ) + .map_or( Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), |v| Ok(Some(v)), ) @@ -721,7 +721,7 @@ impl Accounts { fee_structure.calculate_fee( tx.message(), lamports_per_signature, - &ComputeBudget::fee_budget_limits(tx.message().program_instructions_iter(), feature_set), + &process_compute_budget_instructions(tx.message().program_instructions_iter(), feature_set).unwrap_or_default().into(), feature_set.is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -1470,8 +1470,9 @@ mod tests { transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, assert_matches::assert_matches, - solana_program_runtime::prioritization_fee::{ - PrioritizationFeeDetails, PrioritizationFeeType, + solana_program_runtime::{ + compute_budget_processor, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -1747,13 +1748,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); @@ -4249,7 +4252,11 @@ mod tests { let result_no_limit = Ok(None); let result_default_limit = Ok(Some( - NonZeroUsize::new(compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES).unwrap(), + NonZeroUsize::new( + usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .unwrap(), + ) + .unwrap(), )); let result_requested_limit: Result> = Ok(Some(NonZeroUsize::new(99).unwrap())); @@ -4277,7 +4284,10 @@ mod tests { // if tx doesn't set limit, then default limit (64MiB) // if tx sets limit, then requested limit // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); test(tx_not_set_limit, &feature_set, &result_default_limit); test(tx_set_limit_99, &feature_set, &result_requested_limit); test(tx_set_limit_0, &feature_set, &result_invalid_limit); @@ -4312,13 +4322,15 @@ mod tests { ); let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); let message = SanitizedMessage::try_from(tx.message().clone()).unwrap(); let fee = FeeStructure::default().calculate_fee( &message, lamports_per_signature, - &ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set), + &process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(), true, false, ); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 469de17c5bcc2d..ed0f3e551c66cf 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -81,7 +81,7 @@ use { serde::{Deserialize, Serialize}, smallvec::SmallVec, solana_measure::{measure::Measure, measure_us}, - solana_nohash_hasher::IntSet, + solana_nohash_hasher::{IntMap, IntSet}, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -486,6 +486,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::CompareResults, + test_skip_rewrites_but_include_in_bank_hash: false, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -498,6 +499,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::None, + test_skip_rewrites_but_include_in_bank_hash: false, }; pub type BinnedHashData = Vec>; @@ -557,6 +559,7 @@ pub struct AccountsDbConfig { /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) pub ancient_append_vec_offset: Option, + pub test_skip_rewrites_but_include_in_bank_hash: bool, pub skip_initial_hash_calc: bool, pub exhaustively_verify_refcounts: bool, /// how to create ancient storages @@ -1436,10 +1439,12 @@ pub struct AccountsDb { pub storage: AccountStorage, - #[allow(dead_code)] /// from AccountsDbConfig create_ancient_storage: CreateAncientStorage, + /// true if this client should skip rewrites but still include those rewrites in the bank hash as if rewrites had occurred. + pub test_skip_rewrites_but_include_in_bank_hash: bool, + pub accounts_cache: AccountsCache, write_cache_limit_bytes: Option, @@ -1573,6 +1578,7 @@ pub struct AccountsStats { delta_hash_scan_time_total_us: AtomicU64, delta_hash_accumulate_time_total_us: AtomicU64, delta_hash_num: AtomicU64, + skipped_rewrites_num: AtomicUsize, last_store_report: AtomicInterval, store_hash_accounts: AtomicU64, @@ -1722,24 +1728,33 @@ impl SplitAncientStorages { /// So a slot remains in the same chunk whenever it is included in the accounts hash. /// When the slot gets deleted or gets consumed in an ancient append vec, it will no longer be in its chunk. /// The results of scanning a chunk of appendvecs can be cached to avoid scanning large amounts of data over and over. - fn new(oldest_non_ancient_slot: Slot, snapshot_storages: &SortedStorages) -> Self { + fn new(oldest_non_ancient_slot: Option, snapshot_storages: &SortedStorages) -> Self { let range = snapshot_storages.range(); - // any ancient append vecs should definitely be cached - // We need to break the ranges into: - // 1. individual ancient append vecs (may be empty) - // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) - // 3. evenly divided full chunks in the middle - // 4. unevenly divided chunk of most recent slots (may be empty) - let ancient_slots = - Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages, |storage| { - storage.capacity() > get_ancient_append_vec_capacity() * 50 / 100 - }); + let (ancient_slots, first_non_ancient_slot) = if let Some(oldest_non_ancient_slot) = + oldest_non_ancient_slot + { + // any ancient append vecs should definitely be cached + // We need to break the ranges into: + // 1. individual ancient append vecs (may be empty) + // 2. first unevenly divided chunk starting at 1 epoch old slot (may be empty) + // 3. evenly divided full chunks in the middle + // 4. unevenly divided chunk of most recent slots (may be empty) + let ancient_slots = + Self::get_ancient_slots(oldest_non_ancient_slot, snapshot_storages, |storage| { + storage.capacity() > get_ancient_append_vec_capacity() * 50 / 100 + }); + + let first_non_ancient_slot = ancient_slots + .last() + .map(|last_ancient_slot| last_ancient_slot.saturating_add(1)) + .unwrap_or(range.start); + + (ancient_slots, first_non_ancient_slot) + } else { + (vec![], range.start) + }; - let first_non_ancient_slot = ancient_slots - .last() - .map(|last_ancient_slot| last_ancient_slot.saturating_add(1)) - .unwrap_or(range.start); Self::new_with_ancient_info(range, ancient_slots, first_non_ancient_slot) } @@ -2547,6 +2562,7 @@ impl AccountsDb { exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), + test_skip_rewrites_but_include_in_bank_hash: false, } } @@ -2622,6 +2638,11 @@ impl AccountsDb { .map(|config| config.test_partitioned_epoch_rewards) .unwrap_or_default(); + let test_skip_rewrites_but_include_in_bank_hash = accounts_db_config + .as_ref() + .map(|config| config.test_skip_rewrites_but_include_in_bank_hash) + .unwrap_or_default(); + let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); @@ -2647,6 +2668,7 @@ impl AccountsDb { .and_then(|x| x.write_cache_limit_bytes), partitioned_epoch_rewards_config, exhaustively_verify_refcounts, + test_skip_rewrites_but_include_in_bank_hash, ..Self::default_with_accounts_index( accounts_index, base_working_path, @@ -2912,6 +2934,7 @@ impl AccountsDb { } fn background_hasher(receiver: Receiver) { + info!("Background account hasher has started"); loop { let result = receiver.recv(); match result { @@ -2922,11 +2945,13 @@ impl AccountsDb { let _ = (*account).hash(); }; } - Err(_) => { + Err(err) => { + info!("Background account hasher is stopping because: {err}"); break; } } } + info!("Background account hasher has stopped"); } fn start_background_hasher(&mut self) { @@ -4303,7 +4328,7 @@ impl AccountsDb { shrink_slots: &ShrinkCandidates, shrink_ratio: f64, oldest_non_ancient_slot: Option, - ) -> (HashMap>, ShrinkCandidates) { + ) -> (IntMap>, ShrinkCandidates) { struct StoreUsageInfo { slot: Slot, alive_ratio: f64, @@ -4346,7 +4371,7 @@ impl AccountsDb { // Working from the beginning of store_usage which are the most sparse and see when we can stop // shrinking while still achieving the overall goals. - let mut shrink_slots = HashMap::new(); + let mut shrink_slots = IntMap::default(); let mut shrink_slots_next_batch = ShrinkCandidates::default(); for usage in &store_usage { let store = &usage.store; @@ -5882,7 +5907,7 @@ impl AccountsDb { self.purge_slot_cache(*remove_slot, slot_cache); remove_cache_elapsed.stop(); remove_cache_elapsed_across_slots += remove_cache_elapsed.as_us(); - // Nobody else shoud have removed the slot cache entry yet + // Nobody else should have removed the slot cache entry yet assert!(self.accounts_cache.remove_slot(*remove_slot).is_some()); } else { self.purge_slot_storage(*remove_slot, purge_stats); @@ -6944,6 +6969,11 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), + ( + "skipped_rewrites_num", + self.stats.skipped_rewrites_num.swap(0, Ordering::Relaxed), + i64 + ), ); } @@ -7137,21 +7167,32 @@ impl AccountsDb { } } - /// if ancient append vecs are enabled, return a slot 'max_slot_inclusive' - (slots_per_epoch - `self.ancient_append_vec_offset`) - /// otherwise, return 0 + /// `oldest_non_ancient_slot` is only applicable when `Append` is used for ancient append vec packing. + /// If `Pack` is used for ancient append vec packing, return None. + /// Otherwise, return a slot 'max_slot_inclusive' - (slots_per_epoch - `self.ancient_append_vec_offset`) + /// If ancient append vecs are not enabled, return 0. fn get_oldest_non_ancient_slot_for_hash_calc_scan( &self, max_slot_inclusive: Slot, config: &CalcAccountsHashConfig<'_>, - ) -> Slot { - if self.ancient_append_vec_offset.is_some() { + ) -> Option { + if self.create_ancient_storage == CreateAncientStorage::Pack { + // oldest_non_ancient_slot is only applicable when ancient storages are created with `Append`. When ancient storages are created with `Pack`, ancient storages + // can be created in between non-ancient storages. Return None, because oldest_non_ancient_slot is not applicable here. + None + } else if self.ancient_append_vec_offset.is_some() { // For performance, this is required when ancient appendvecs are enabled - self.get_oldest_non_ancient_slot_from_slot(config.epoch_schedule, max_slot_inclusive) + Some( + self.get_oldest_non_ancient_slot_from_slot( + config.epoch_schedule, + max_slot_inclusive, + ), + ) } else { // This causes the entire range to be chunked together, treating older append vecs just like new ones. // This performs well if there are many old append vecs that haven't been cleaned yet. // 0 will have the effect of causing ALL older append vecs to be chunked together, just like every other append vec. - 0 + Some(0) } } @@ -7291,7 +7332,11 @@ impl AccountsDb { let mut init_accum = true; // load from cache failed, so create the cache file for this chunk for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - let ancient = slot < oldest_non_ancient_slot; + let ancient = + oldest_non_ancient_slot.is_some_and(|oldest_non_ancient_slot| { + slot < oldest_non_ancient_slot + }); + let (_, scan_us) = measure_us!(if let Some(storage) = storage { if init_accum { let range = bin_range.end - bin_range.start; @@ -7908,7 +7953,6 @@ impl AccountsDb { slot: Slot, ) -> (Vec<(Pubkey, AccountHash)>, u64, Measure) { let mut scan = Measure::start("scan"); - let scan_result: ScanStorageResult<(Pubkey, AccountHash), DashMap> = self.scan_account_storage( slot, @@ -7928,6 +7972,7 @@ impl AccountsDb { ScanStorageResult::Cached(cached_result) => cached_result, ScanStorageResult::Stored(stored_result) => stored_result.into_iter().collect(), }; + (hashes, scan.as_us(), accumulate) } @@ -7968,12 +8013,12 @@ impl AccountsDb { } } - /// Calculate accounts delta hash for `slot` + /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { - self.calculate_accounts_delta_hash_internal(slot, None) + self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) } /// Calculate accounts delta hash for `slot` @@ -7984,9 +8029,20 @@ impl AccountsDb { &self, slot: Slot, ignore: Option, + mut skipped_rewrites: HashMap, ) -> AccountsDeltaHash { let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot); let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect(); + + hashes.iter().for_each(|(k, _h)| { + skipped_rewrites.remove(k); + }); + + let num_skipped_rewrites = skipped_rewrites.len(); + hashes.extend(skipped_rewrites); + + info!("skipped rewrite hashes {} {}", slot, num_skipped_rewrites); + if let Some(ignore) = ignore { hashes.retain(|k| k.0 != ignore); } @@ -8015,6 +8071,10 @@ impl AccountsDb { .delta_hash_accumulate_time_total_us .fetch_add(accumulate.as_us(), Ordering::Relaxed); self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed); + self.stats + .skipped_rewrites_num + .fetch_add(num_skipped_rewrites, Ordering::Relaxed); + accounts_delta_hash } @@ -9960,6 +10020,7 @@ pub mod tests { sync::atomic::AtomicBool, thread::{self, Builder, JoinHandle}, }, + test_case::test_case, }; fn linear_ancestors(end_slot: u64) -> Ancestors { @@ -15020,7 +15081,6 @@ pub mod tests { db.store_uncached(1, &[(&account_key1, &account2)]); db.calculate_accounts_delta_hash(0); db.calculate_accounts_delta_hash(1); - db.print_accounts_stats("pre-clean1"); // clean accounts - no accounts should be cleaned, since no rooted slots @@ -15042,7 +15102,6 @@ pub mod tests { db.store_uncached(2, &[(&account_key2, &account3)]); db.store_uncached(2, &[(&account_key1, &account3)]); db.calculate_accounts_delta_hash(2); - db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); @@ -16291,9 +16350,22 @@ pub mod tests { assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); } - #[test] - fn test_get_oldest_non_ancient_slot_for_hash_calc_scan() { + #[test_case(CreateAncientStorage::Append; "append")] + #[test_case(CreateAncientStorage::Pack; "pack")] + fn test_get_oldest_non_ancient_slot_for_hash_calc_scan( + create_ancient_storage: CreateAncientStorage, + ) { + let expected = |v| { + if create_ancient_storage == CreateAncientStorage::Append { + Some(v) + } else { + None + } + }; + let mut db = AccountsDb::new_single_for_tests(); + db.create_ancient_storage = create_ancient_storage; + let config = CalcAccountsHashConfig::default(); let slot = config.epoch_schedule.slots_per_epoch; let slots_per_epoch = config.epoch_schedule.slots_per_epoch; @@ -16302,23 +16374,23 @@ pub mod tests { // no ancient append vecs, so always 0 assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch + offset, &config), - 0 + expected(0) ); // ancient append vecs enabled (but at 0 offset), so can be non-zero db.ancient_append_vec_offset = Some(0); // 0..=(slots_per_epoch - 1) are all non-ancient assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch - 1, &config), - 0 + expected(0) ); // 1..=slots_per_epoch are all non-ancient, so 1 is oldest non ancient assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch, &config), - 1 + expected(1) ); assert_eq!( db.get_oldest_non_ancient_slot_for_hash_calc_scan(slots_per_epoch + offset, &config), - offset + 1 + expected(offset + 1) ); } @@ -16391,7 +16463,7 @@ pub mod tests { fn test_split_storages_ancient_chunks() { let storages = SortedStorages::empty(); assert_eq!(storages.max_slot_inclusive(), 0); - let result = SplitAncientStorages::new(0, &storages); + let result = SplitAncientStorages::new(Some(0), &storages); assert_eq!(result, SplitAncientStorages::default()); } @@ -16741,7 +16813,7 @@ pub mod tests { // 1 = all storages are non-ancient // 2 = ancient slots: 1 // 3 = ancient slots: 1, 2 - // 4 = ancient slots: 1, 2, 3 (except 2 is large, 3 is not, so treat 3 as non-ancient) + // 4 = ancient slots: 1, 2 (except 2 is large, 3 is not, so treat 3 as non-ancient) // 5 = ... for oldest_non_ancient_slot in 0..6 { let ancient_slots = SplitAncientStorages::get_ancient_slots( diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 0fbc11c07964cf..1efd678873f620 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -56,8 +56,7 @@ impl AccountsDb { } let accounts_update_notifier = self.accounts_update_notifier.as_ref().unwrap(); - let notifier = &accounts_update_notifier.read().unwrap(); - notifier.notify_end_of_restore_from_snapshot(); + accounts_update_notifier.notify_end_of_restore_from_snapshot(); notify_stats.report(); } @@ -72,8 +71,7 @@ impl AccountsDb { P: Iterator, { if let Some(accounts_update_notifier) = &self.accounts_update_notifier { - let notifier = &accounts_update_notifier.read().unwrap(); - notifier.notify_account_update( + accounts_update_notifier.notify_account_update( slot, account, txn, @@ -121,13 +119,7 @@ impl AccountsDb { mut accounts_to_stream: HashMap, notify_stats: &mut GeyserPluginNotifyAtSnapshotRestoreStats, ) { - let notifier = self - .accounts_update_notifier - .as_ref() - .unwrap() - .read() - .unwrap(); - + let notifier = self.accounts_update_notifier.as_ref().unwrap(); let mut measure_notify = Measure::start("accountsdb-plugin-notifying-accounts"); let local_write_version = 0; for (_, mut account) in accounts_to_stream.drain() { @@ -177,7 +169,7 @@ pub mod tests { }, std::sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, }, }; @@ -246,12 +238,11 @@ pub mod tests { accounts.store_uncached(slot0, &[(&key2, &account2)]); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); accounts.notify_account_restore_from_snapshot(); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 1); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] @@ -303,12 +294,11 @@ pub mod tests { AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner()); accounts.store_uncached(slot1, &[(&key3, &account3)]); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); accounts.notify_account_restore_from_snapshot(); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 1); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] @@ -342,7 +332,7 @@ pub mod tests { let notifier = GeyserTestPlugin::default(); - let notifier = Arc::new(RwLock::new(notifier)); + let notifier = Arc::new(notifier); accounts.set_geyser_plugin_notifer(Some(notifier.clone())); // Account with key1 is updated twice in two different slots -- should only get notified twice. @@ -372,7 +362,6 @@ pub mod tests { AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner()); accounts.store_cached((slot1, &[(&key3, &account3)][..]), None); - let notifier = notifier.write().unwrap(); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 2); assert_eq!( notifier.accounts_notified.get(&key1).unwrap()[0] diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index e9f25b318d5a4b..7631ea694635b8 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -332,13 +332,14 @@ const _: () = assert!( "CalculateHashIntermediate cannot have any padding" ); -#[derive(Default, Debug, PartialEq, Eq)] -pub struct CumulativeOffset { - pub index: Vec, - pub start_offset: usize, +#[derive(Debug, PartialEq, Eq)] +struct CumulativeOffset { + /// Since the source data is at most 2D, two indexes are enough. + index: [usize; 2], + start_offset: usize, } -pub trait ExtractSliceFromRawData<'b, T: 'b> { +trait ExtractSliceFromRawData<'b, T: 'b> { fn extract<'a>(&'b self, offset: &'a CumulativeOffset, start: usize) -> &'b [T]; } @@ -416,7 +417,7 @@ impl CumulativeOffsets { .filter_map(|(i, len)| { if len > 0 { let result = CumulativeOffset { - index: vec![i], + index: [i, i], start_offset: total_count, }; total_count += len; @@ -1242,7 +1243,7 @@ pub enum ZeroLamportAccounts { /// Hash of an account #[repr(transparent)] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable, AbiExample)] pub struct AccountHash(pub Hash); // Ensure the newtype wrapper never changes size from the underlying Hash @@ -1373,7 +1374,7 @@ mod tests { cumulative_offsets = Vec::with_capacity(raw.len() * v_outer.len()); } cumulative_offsets.push(CumulativeOffset { - index: vec![i, j], + index: [i, j], start_offset: total_count, }); total_count += len; @@ -1516,18 +1517,18 @@ mod tests { let len = combined.len(); assert_eq!(cumulative.total_count(), len); (0..combined.len()).for_each(|start| { - let mut retreived = Vec::default(); + let mut retrieved = Vec::default(); let mut cumulative_start = start; // read all data - while retreived.len() < (len - start) { + while retrieved.len() < (len - start) { let this_one = cumulative.get_slice(cumulative_start); - retreived.extend(this_one.iter()); + retrieved.extend(this_one.iter()); cumulative_start += this_one.len(); assert_ne!(0, this_one.len()); } assert_eq!( &combined[start..], - &retreived[..], + &retrieved[..], "permutation: {permutation}" ); }); @@ -2126,7 +2127,7 @@ mod tests { fn test_accountsdb_cumulative_find() { let input = CumulativeOffsets { cumulative_offsets: vec![CumulativeOffset { - index: vec![0], + index: [0; 2], start_offset: 0, }], total_count: 0, @@ -2136,11 +2137,11 @@ mod tests { let input = CumulativeOffsets { cumulative_offsets: vec![ CumulativeOffset { - index: vec![0], + index: [0; 2], start_offset: 0, }, CumulativeOffset { - index: vec![1], + index: [1; 2], start_offset: 2, }, ], diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index cd37df61693248..fc389116d09b71 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -3,7 +3,7 @@ use { accounts_index_storage::{AccountsIndexStorage, Startup}, accounts_partition::RentPayingAccountsByPartition, ancestors::Ancestors, - bucket_map_holder::{Age, BucketMapHolder}, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, contains::Contains, in_mem_accounts_index::{InMemAccountsIndex, InsertNewEntryResults, StartupStats}, inline_spl_token::{self, GenericTokenAccount}, @@ -36,7 +36,7 @@ use { }, path::PathBuf, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicU8, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }, @@ -238,7 +238,7 @@ pub struct AccountMapEntryMeta { /// true if entry in in-mem idx has changes and needs to be written to disk pub dirty: AtomicBool, /// 'age' at which this entry should be purged from the cache (implements lru) - pub age: AtomicU8, + pub age: AtomicAge, } impl AccountMapEntryMeta { @@ -248,7 +248,7 @@ impl AccountMapEntryMeta { ) -> Self { AccountMapEntryMeta { dirty: AtomicBool::new(true), - age: AtomicU8::new(storage.future_age_to_flush(is_cached)), + age: AtomicAge::new(storage.future_age_to_flush(is_cached)), } } pub fn new_clean + Into>( @@ -256,7 +256,7 @@ impl AccountMapEntryMeta { ) -> Self { AccountMapEntryMeta { dirty: AtomicBool::new(false), - age: AtomicU8::new(storage.future_age_to_flush(false)), + age: AtomicAge::new(storage.future_age_to_flush(false)), } } } @@ -2113,7 +2113,7 @@ pub mod tests { let (slot, account_info) = entry.slot_list.read().unwrap()[0]; let meta = AccountMapEntryMeta { dirty: AtomicBool::new(entry.dirty()), - age: AtomicU8::new(entry.age()), + age: AtomicAge::new(entry.age()), }; PreAllocatedAccountMapEntry::Entry(Arc::new(AccountMapEntryInner::new( vec![(slot, account_info)], diff --git a/accounts-db/src/accounts_update_notifier_interface.rs b/accounts-db/src/accounts_update_notifier_interface.rs index ae31cb06d32339..ec86fce8cd6898 100644 --- a/accounts-db/src/accounts_update_notifier_interface.rs +++ b/accounts-db/src/accounts_update_notifier_interface.rs @@ -3,7 +3,7 @@ use { solana_sdk::{ account::AccountSharedData, clock::Slot, pubkey::Pubkey, transaction::SanitizedTransaction, }, - std::sync::{Arc, RwLock}, + std::sync::Arc, }; pub trait AccountsUpdateNotifierInterface: std::fmt::Debug { @@ -25,4 +25,4 @@ pub trait AccountsUpdateNotifierInterface: std::fmt::Debug { fn notify_end_of_restore_from_snapshot(&self); } -pub type AccountsUpdateNotifier = Arc>; +pub type AccountsUpdateNotifier = Arc; diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index c5fb8e68729b08..fc7bf3ba4131f0 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -22,6 +22,8 @@ use { }, }; pub type Age = u8; +pub type AtomicAge = AtomicU8; +const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); const AGE_MS: u64 = DEFAULT_MS_PER_SLOT; // match one age per slot time @@ -37,12 +39,12 @@ pub struct BucketMapHolder + Into> /// Instead of accessing the single age and doing math each time, each value is incremented each time the age occurs, which is ~400ms. /// Callers can ask for the precomputed value they already want. /// rolling 'current' age - pub age: AtomicU8, + pub age: AtomicAge, /// rolling age that is 'ages_to_stay_in_cache' + 'age' - pub future_age_to_flush: AtomicU8, + pub future_age_to_flush: AtomicAge, /// rolling age that is effectively 'age' - 1 /// these items are expected to be flushed from the accounts write cache or otherwise modified before this age occurs - pub future_age_to_flush_cached: AtomicU8, + pub future_age_to_flush_cached: AtomicAge, pub stats: BucketMapHolderStats, @@ -255,11 +257,11 @@ impl + Into> BucketMapHolder ages_to_stay_in_cache, count_buckets_flushed: AtomicUsize::default(), // age = 0 - age: AtomicU8::default(), + age: AtomicAge::default(), // future age = age (=0) + ages_to_stay_in_cache - future_age_to_flush: AtomicU8::new(ages_to_stay_in_cache), + future_age_to_flush: AtomicAge::new(ages_to_stay_in_cache), // effectively age (0) - 1. So, the oldest possible age from 'now' - future_age_to_flush_cached: AtomicU8::new(0_u8.wrapping_sub(1)), + future_age_to_flush_cached: AtomicAge::new(Age::MAX), stats: BucketMapHolderStats::new(bins), wait_dirty_or_aged: Arc::default(), next_bucket_to_flush: AtomicUsize::new(0), @@ -442,7 +444,7 @@ pub mod tests { let test = BucketMapHolder::::new(bins, &Some(AccountsIndexConfig::default()), 1); assert_eq!(0, test.current_age()); assert_eq!(test.ages_to_stay_in_cache, test.future_age_to_flush(false)); - assert_eq!(u8::MAX, test.future_age_to_flush(true)); + assert_eq!(Age::MAX, test.future_age_to_flush(true)); (0..bins).for_each(|_| { test.bucket_flushed_at_current_age(false); }); diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 4df611539d16ed..9b5cd20f0cd9b5 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -1,12 +1,12 @@ use { crate::{ accounts_index::{DiskIndexValue, IndexValue}, - bucket_map_holder::BucketMapHolder, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, }, solana_sdk::timing::AtomicInterval, std::{ fmt::Debug, - sync::atomic::{AtomicBool, AtomicU64, AtomicU8, AtomicUsize, Ordering}, + sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, }, }; @@ -52,7 +52,7 @@ pub struct BucketMapHolderStats { pub flush_entries_evicted_from_mem: AtomicU64, pub active_threads: AtomicU64, pub get_range_us: AtomicU64, - last_age: AtomicU8, + last_age: AtomicAge, last_ages_flushed: AtomicU64, pub flush_scan_us: AtomicU64, pub flush_update_us: AtomicU64, @@ -120,7 +120,7 @@ impl BucketMapHolderStats { let mut age_now = age_now as u64; if last_age > age_now { // age wrapped - age_now += u8::MAX as u64 + 1; + age_now += Age::MAX as u64 + 1; } let age_delta = age_now.saturating_sub(last_age); if age_delta > 0 { diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index e136be4f11713c..c839a8338c2fc2 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -49,7 +49,7 @@ pub(crate) struct CacheHashDataFile { } impl CacheHashDataFileReference { - /// convert the open file refrence to a mmapped file that can be returned as a slice + /// convert the open file reference to a mmapped file that can be returned as a slice pub(crate) fn map(&self) -> Result { let file_len = self.file_len; let mut m1 = Measure::start("read_file"); diff --git a/accounts-db/src/contains.rs b/accounts-db/src/contains.rs index 622ddee7ab51ba..24ceff8d1ac38a 100644 --- a/accounts-db/src/contains.rs +++ b/accounts-db/src/contains.rs @@ -2,7 +2,7 @@ use std::{ borrow::Borrow, cmp::Eq, collections::{HashMap, HashSet}, - hash::Hash, + hash::{BuildHasher, Hash}, }; pub trait Contains<'a, T: Eq + Hash> { @@ -12,24 +12,24 @@ pub trait Contains<'a, T: Eq + Hash> { fn contains_iter(&'a self) -> Self::Iter; } -impl<'a, T: 'a + Eq + Hash, U: 'a> Contains<'a, T> for HashMap { +impl<'a, T: 'a + Eq + Hash, U: 'a, S: BuildHasher> Contains<'a, T> for HashMap { type Item = &'a T; type Iter = std::collections::hash_map::Keys<'a, T, U>; fn contains(&self, key: &T) -> bool { - >::contains_key(self, key) + >::contains_key(self, key) } fn contains_iter(&'a self) -> Self::Iter { self.keys() } } -impl<'a, T: 'a + Eq + Hash> Contains<'a, T> for HashSet { +impl<'a, T: 'a + Eq + Hash, S: BuildHasher> Contains<'a, T> for HashSet { type Item = &'a T; type Iter = std::collections::hash_set::Iter<'a, T>; fn contains(&self, key: &T) -> bool { - >::contains(self, key) + >::contains(self, key) } fn contains_iter(&'a self) -> Self::Iter { self.iter() diff --git a/accounts-db/src/in_mem_accounts_index.rs b/accounts-db/src/in_mem_accounts_index.rs index 3d943956cab23d..1e8e8a8fd73822 100644 --- a/accounts-db/src/in_mem_accounts_index.rs +++ b/accounts-db/src/in_mem_accounts_index.rs @@ -4,7 +4,7 @@ use { AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, DiskIndexValue, IndexValue, PreAllocatedAccountMapEntry, RefCount, SlotList, UpsertReclaim, ZeroLamport, }, - bucket_map_holder::{Age, BucketMapHolder}, + bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, bucket_map_holder_stats::BucketMapHolderStats, waitable_condvar::WaitableCondvar, }, @@ -17,7 +17,7 @@ use { fmt::Debug, ops::{Bound, RangeBounds, RangeInclusive}, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, Mutex, RwLock, RwLockWriteGuard, }, }, @@ -89,7 +89,7 @@ impl PossibleEvictions { // one instance of this represents one bin of the accounts index. pub struct InMemAccountsIndex + Into> { - last_age_flushed: AtomicU8, + last_age_flushed: AtomicAge, // backing store map_internal: RwLock>, @@ -115,7 +115,7 @@ pub struct InMemAccountsIndex + Into< /// how many more ages to skip before this bucket is flushed (as opposed to being skipped). /// When this reaches 0, this bucket is flushed. - remaining_ages_to_skip_flushing: AtomicU8, + remaining_ages_to_skip_flushing: AtomicAge, /// an individual bucket will evict its entries and write to disk every 1/NUM_AGES_TO_DISTRIBUTE_FLUSHES ages /// Higher numbers mean we flush less buckets/s @@ -181,12 +181,12 @@ impl + Into> InMemAccountsIndex RollingBitFieldOnesIter<'_> { + RollingBitFieldOnesIter::new(self) + } } #[cfg(test)] diff --git a/accounts-db/src/rolling_bit_field/iterators.rs b/accounts-db/src/rolling_bit_field/iterators.rs new file mode 100644 index 00000000000000..dd075037ee119c --- /dev/null +++ b/accounts-db/src/rolling_bit_field/iterators.rs @@ -0,0 +1,76 @@ +//! Iterators for RollingBitField + +use {super::RollingBitField, std::ops::Range}; + +/// Iterate over the 'set' bits of a RollingBitField +#[derive(Debug)] +pub struct RollingBitFieldOnesIter<'a> { + rolling_bit_field: &'a RollingBitField, + excess_iter: std::collections::hash_set::Iter<'a, u64>, + bit_range: Range, +} + +impl<'a> RollingBitFieldOnesIter<'a> { + #[must_use] + pub fn new(rolling_bit_field: &'a RollingBitField) -> Self { + Self { + rolling_bit_field, + excess_iter: rolling_bit_field.excess.iter(), + bit_range: rolling_bit_field.min..rolling_bit_field.max_exclusive, + } + } +} + +impl Iterator for RollingBitFieldOnesIter<'_> { + type Item = u64; + + fn next(&mut self) -> Option { + // Iterate over the excess first + if let Some(excess) = self.excess_iter.next() { + return Some(*excess); + } + + // Then iterate over the bit vec + loop { + // If there are no more bits in the range, then we've iterated over everything and are done + let Some(bit) = self.bit_range.next() else { + return None; + }; + + if self.rolling_bit_field.contains_assume_in_range(&bit) { + break Some(bit); + } + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, test_case::test_case}; + + #[test_case(128, vec![]; "empty")] + #[test_case(128, vec![128_007, 128_017, 128_107]; "without excess")] + #[test_case(128, vec![128_007, 128_017, 128_107, 3, 30, 300]; "with excess")] + // Even though these values are within the range, in an absolute sense, + // they will wrap around after multiples of 16. + #[test_case(16, vec![35, 40, 45 ])] + #[test_case(16, vec![ 40, 45, 50 ])] + #[test_case(16, vec![ 45, 50, 55 ])] + #[test_case(16, vec![ 50, 55, 60 ])] + #[test_case(16, vec![ 55, 60, 65 ])] + #[test_case(16, vec![ 60, 65, 70])] + fn test_rolling_bit_field_ones_iter(num_bits: u64, mut expected: Vec) { + let mut rolling_bit_field = RollingBitField::new(num_bits); + for val in &expected { + rolling_bit_field.insert(*val); + } + + let mut actual: Vec<_> = rolling_bit_field.iter_ones().collect(); + + // Since iteration order of the 'excess' is not deterministic, sort the 'actual' + // and 'expected' vectors to ensure they can compare deterministically. + actual.sort_unstable(); + expected.sort_unstable(); + assert_eq!(actual, expected); + } +} diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 7fbd31ee7d3ad7..1527d7c1a84ecc 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -106,7 +106,6 @@ impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [(&'a } } -#[allow(dead_code)] impl<'a, T: ReadableAccount + Sync> StorableAccounts<'a, T> for (Slot, &'a [&'a (Pubkey, T)]) { fn pubkey(&self, index: usize) -> &Pubkey { &self.1[index].0 @@ -172,7 +171,6 @@ pub struct StorableAccountsBySlot<'a> { } impl<'a> StorableAccountsBySlot<'a> { - #[allow(dead_code)] /// each element of slots_and_accounts is (source slot, accounts moving FROM source slot) pub fn new( target_slot: Slot, diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 6a9f0193fd0fee..829b0cb033b4f5 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -19,7 +19,7 @@ use { }, error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat}, - index::AccountIndexFormat, + index::IndexBlockFormat, readable::TieredStorageReader, solana_sdk::account::ReadableAccount, std::{ @@ -40,7 +40,7 @@ pub struct TieredStorageFormat { pub meta_entry_size: usize, pub account_meta_format: AccountMetaFormat, pub owners_block_format: OwnersBlockFormat, - pub account_index_format: AccountIndexFormat, + pub index_block_format: IndexBlockFormat, pub account_block_format: AccountBlockFormat, } @@ -236,7 +236,7 @@ mod tests { assert_eq!(tiered_storage_readonly.reader().unwrap().num_accounts(), 0); assert_eq!(footer.account_meta_format, HOT_FORMAT.account_meta_format); assert_eq!(footer.owners_block_format, HOT_FORMAT.owners_block_format); - assert_eq!(footer.account_index_format, HOT_FORMAT.account_index_format); + assert_eq!(footer.index_block_format, HOT_FORMAT.index_block_format); assert_eq!(footer.account_block_format, HOT_FORMAT.account_block_format); assert_eq!( tiered_storage_readonly.file_size().unwrap() as usize, @@ -379,7 +379,7 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: expected_format.account_meta_format, owners_block_format: expected_format.owners_block_format, - account_index_format: expected_format.account_index_format, + index_block_format: expected_format.index_block_format, account_block_format: expected_format.account_block_format, account_entry_count: expected_accounts.len() as u32, // Hash is not yet implemented, so we bypass the check diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index c88d665c4362e9..7763d8d5622a0a 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - error::TieredStorageError, file::TieredStorageFile, index::AccountIndexFormat, + error::TieredStorageError, file::TieredStorageFile, index::IndexBlockFormat, mmap_utils::get_type, TieredStorageResult as TsResult, }, memmap2::Mmap, @@ -95,7 +95,7 @@ pub struct TieredStorageFooter { /// The format of the owners block. pub owners_block_format: OwnersBlockFormat, /// The format of the account index block. - pub account_index_format: AccountIndexFormat, + pub index_block_format: IndexBlockFormat, /// The format of the account block. pub account_block_format: AccountBlockFormat, @@ -120,7 +120,7 @@ pub struct TieredStorageFooter { // Offsets // Note that offset to the account blocks is omitted as it's always 0. /// The offset pointing to the first byte of the account index block. - pub account_index_offset: u64, + pub index_block_offset: u64, /// The offset pointing to the first byte of the owners block. pub owners_offset: u64, @@ -149,14 +149,14 @@ impl Default for TieredStorageFooter { Self { account_meta_format: AccountMetaFormat::default(), owners_block_format: OwnersBlockFormat::default(), - account_index_format: AccountIndexFormat::default(), + index_block_format: IndexBlockFormat::default(), account_block_format: AccountBlockFormat::default(), account_entry_count: 0, account_meta_entry_size: 0, account_block_size: 0, owner_count: 0, owner_entry_size: 0, - account_index_offset: 0, + index_block_offset: 0, owners_offset: 0, hash: Hash::new_unique(), min_account_address: Pubkey::default(), @@ -241,14 +241,14 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 24, account_block_size: 4096, owner_count: 250, owner_entry_size: 32, - account_index_offset: 1069600, + index_block_offset: 1069600, owners_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), @@ -275,7 +275,7 @@ mod tests { fn test_footer_layout() { assert_eq!(offset_of!(TieredStorageFooter, account_meta_format), 0x00); assert_eq!(offset_of!(TieredStorageFooter, owners_block_format), 0x02); - assert_eq!(offset_of!(TieredStorageFooter, account_index_format), 0x04); + assert_eq!(offset_of!(TieredStorageFooter, index_block_format), 0x04); assert_eq!(offset_of!(TieredStorageFooter, account_block_format), 0x06); assert_eq!(offset_of!(TieredStorageFooter, account_entry_count), 0x08); assert_eq!( @@ -285,7 +285,7 @@ mod tests { assert_eq!(offset_of!(TieredStorageFooter, account_block_size), 0x10); assert_eq!(offset_of!(TieredStorageFooter, owner_count), 0x18); assert_eq!(offset_of!(TieredStorageFooter, owner_entry_size), 0x1C); - assert_eq!(offset_of!(TieredStorageFooter, account_index_offset), 0x20); + assert_eq!(offset_of!(TieredStorageFooter, index_block_offset), 0x20); assert_eq!(offset_of!(TieredStorageFooter, owners_offset), 0x28); assert_eq!(offset_of!(TieredStorageFooter, min_account_address), 0x30); assert_eq!(offset_of!(TieredStorageFooter, max_account_address), 0x50); diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 78271700686dd2..f2efc1a966ca11 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -9,7 +9,7 @@ use { footer::{ AccountBlockFormat, AccountMetaFormat, OwnersBlockFormat, TieredStorageFooter, }, - index::AccountIndexFormat, + index::{AccountOffset, IndexBlockFormat}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, mmap_utils::get_type, TieredStorageFormat, TieredStorageResult, @@ -25,7 +25,7 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { meta_entry_size: std::mem::size_of::(), account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, }; @@ -223,8 +223,11 @@ impl HotStorageReader { } /// Returns the account meta located at the specified offset. - fn get_account_meta_from_offset(&self, offset: usize) -> TieredStorageResult<&HotAccountMeta> { - let (meta, _) = get_type::(&self.mmap, offset)?; + fn get_account_meta_from_offset( + &self, + account_offset: AccountOffset, + ) -> TieredStorageResult<&HotAccountMeta> { + let (meta, _) = get_type::(&self.mmap, account_offset.block)?; Ok(meta) } } @@ -241,7 +244,7 @@ pub mod tests { FOOTER_SIZE, }, hot::{HotAccountMeta, HotStorageReader}, - index::AccountIndexFormat, + index::{AccountOffset, IndexBlockFormat}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, }, memoffset::offset_of, @@ -383,14 +386,14 @@ pub mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - account_index_format: AccountIndexFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndOffset, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 16, account_block_size: 4096, owner_count: 250, owner_entry_size: 32, - account_index_offset: 1069600, + index_block_offset: 1069600, owners_offset: 1081200, hash: Hash::new_unique(), min_account_address: Pubkey::default(), @@ -444,7 +447,7 @@ pub mod tests { .map(|meta| { let prev_offset = current_offset; current_offset += file.write_type(meta).unwrap(); - prev_offset + AccountOffset { block: prev_offset } }) .collect(); // while the test only focuses on account metas, writing a footer diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 656343fb78fc8b..cd8b2a33c82529 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -17,6 +17,21 @@ pub struct AccountIndexWriterEntry<'a> { pub intra_block_offset: u64, } +/// The offset to an account stored inside its accounts block. +/// This struct is used to access the meta and data of an account by looking through +/// its accounts block. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct AccountOffset { + /// The offset to the accounts block that contains the account meta/data. + pub block: usize, +} + +/// The offset to an account/address entry in the accounts index block. +/// This can be used to obtain the AccountOffset and address by looking through +/// the accounts index block. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct IndexOffset(usize); + /// The index format of a tiered accounts file. #[repr(u16)] #[derive( @@ -30,7 +45,7 @@ pub struct AccountIndexWriterEntry<'a> { num_enum::IntoPrimitive, num_enum::TryFromPrimitive, )] -pub enum AccountIndexFormat { +pub enum IndexBlockFormat { /// This format optimizes the storage size by storing only account addresses /// and offsets. It skips storing the size of account data by storing account /// block entries and index block entries in the same order. @@ -38,7 +53,7 @@ pub enum AccountIndexFormat { AddressAndOffset = 0, } -impl AccountIndexFormat { +impl IndexBlockFormat { /// Persists the specified index_entries to the specified file and returns /// the total number of bytes written. pub fn write_index_block( @@ -65,32 +80,33 @@ impl AccountIndexFormat { &self, map: &'a Mmap, footer: &TieredStorageFooter, - index: usize, + offset: IndexOffset, ) -> TieredStorageResult<&'a Pubkey> { let offset = match self { Self::AddressAndOffset => { - footer.account_index_offset as usize + std::mem::size_of::() * index + footer.index_block_offset as usize + std::mem::size_of::() * offset.0 } }; let (address, _) = get_type::(map, offset)?; Ok(address) } - /// Returns the offset to the account block that contains the account - /// associated with the specified index to the index block. - pub fn get_account_block_offset( + /// Returns the offset to the account given the specified index. + pub fn get_account_offset( &self, map: &Mmap, footer: &TieredStorageFooter, - index: usize, - ) -> TieredStorageResult { + offset: IndexOffset, + ) -> TieredStorageResult { match self { Self::AddressAndOffset => { - let offset = footer.account_index_offset as usize + let offset = footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize - + index * std::mem::size_of::(); + + offset.0 * std::mem::size_of::(); let (account_block_offset, _) = get_type(map, offset)?; - Ok(*account_block_offset) + Ok(AccountOffset { + block: *account_block_offset, + }) } } } @@ -134,11 +150,11 @@ mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); - let indexer = AccountIndexFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndOffset; indexer.write_index_block(&file, &index_entries).unwrap(); } - let indexer = AccountIndexFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndOffset; let file = OpenOptions::new() .read(true) .create(false) @@ -146,11 +162,13 @@ mod tests { .unwrap(); let map = unsafe { MmapOptions::new().map(&file).unwrap() }; for (i, index_entry) in index_entries.iter().enumerate() { - assert_eq!( - index_entry.block_offset, - indexer.get_account_block_offset(&map, &footer, i).unwrap() - ); - let address = indexer.get_account_address(&map, &footer, i).unwrap(); + let account_offset = indexer + .get_account_offset(&map, &footer, IndexOffset(i)) + .unwrap(); + assert_eq!(index_entry.block_offset, account_offset.block as u64); + let address = indexer + .get_account_address(&map, &footer, IndexOffset(i)) + .unwrap(); assert_eq!(index_entry.address, address); } } diff --git a/accounts-db/src/tiered_storage/writer.rs b/accounts-db/src/tiered_storage/writer.rs index dece0e42732f49..113d331e4a15c4 100644 --- a/accounts-db/src/tiered_storage/writer.rs +++ b/accounts-db/src/tiered_storage/writer.rs @@ -46,7 +46,7 @@ impl<'format> TieredStorageWriter<'format> { account_meta_format: self.format.account_meta_format, owners_block_format: self.format.owners_block_format, account_block_format: self.format.account_block_format, - account_index_format: self.format.account_index_format, + index_block_format: self.format.index_block_format, account_entry_count: accounts .accounts .len() diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs index e95f51d752552b..d1d013314eed95 100644 --- a/cargo-registry/src/crate_handler.rs +++ b/cargo-registry/src/crate_handler.rs @@ -31,6 +31,8 @@ use { tempfile::{tempdir, TempDir}, }; +const APPEND_CRATE_TO_ELF: bool = true; + pub(crate) type Error = Box; #[derive(Clone, Debug, Deserialize, Serialize)] @@ -99,6 +101,8 @@ pub(crate) struct Program { path: String, id: Pubkey, _tempdir: Arc, + meta: PackageMetaData, + packed_crate: PackedCrate, } impl Program { @@ -107,9 +111,17 @@ impl Program { return Err("Signer doesn't match program ID".into()); } - let program_data = read_and_verify_elf(self.path.as_ref()) + let mut program_data = read_and_verify_elf(self.path.as_ref()) .map_err(|e| format!("failed to read the program: {}", e))?; + if APPEND_CRATE_TO_ELF { + let program_id_str = Program::program_id_to_crate_name(self.id); + let crate_tar_gz = + PackedCrate::new_rebased(&self.packed_crate, &self.meta, &program_id_str)?; + let crate_len = u32::to_le_bytes(crate_tar_gz.0.len() as u32); + program_data.extend_from_slice(&crate_tar_gz.0); + program_data.extend_from_slice(&crate_len); + } let command_config = RPCCommandConfig::new(client.as_ref()); process_deploy_program( @@ -128,7 +140,7 @@ impl Program { Ok(()) } - fn dump(&self, client: Arc) -> Result<(), Error> { + fn dump(&mut self, client: Arc) -> Result<(), Error> { info!("Fetching program {:?}", self.id); let command_config = RPCCommandConfig::new(client.as_ref()); @@ -143,14 +155,41 @@ impl Program { format!("Failed to fetch the program: {}", e) })?; + if APPEND_CRATE_TO_ELF { + let Ok(buffer) = fs::read(&self.path) else { + return Err("Failed to read the program file".into()); + }; + + let data = Bytes::from(buffer); + + let data_len = data.len(); + let sizeof_length = size_of::(); + + // The crate length is at the tail of the data buffer, as 4 LE bytes. + let length_le = data.slice(data_len.saturating_sub(sizeof_length)..data_len); + let length = + u32::from_le_bytes(length_le.deref().try_into().expect("Failed to read length")); + + let crate_start = data_len + .saturating_sub(sizeof_length) + .saturating_sub(length as usize); + let crate_end = data_len.saturating_sub(sizeof_length); + + self.packed_crate = PackedCrate(Bytes::copy_from_slice(&data[crate_start..crate_end])); + } Ok(()) } pub(crate) fn crate_name_to_program_id(crate_name: &str) -> Option { - hex::decode(crate_name) + let (_, id_str) = crate_name.split_once('-')?; + hex::decode(id_str) .ok() .and_then(|bytes| Pubkey::try_from(bytes).ok()) } + + fn program_id_to_crate_name(id: Pubkey) -> String { + format!("sol-{}", hex::encode(id.to_bytes())) + } } impl From<&UnpackedCrate> for Program { @@ -159,20 +198,24 @@ impl From<&UnpackedCrate> for Program { path: value.program_path.clone(), id: value.program_id, _tempdir: value.tempdir.clone(), + meta: value.meta.clone(), + packed_crate: value.packed_crate.clone(), } } } -pub(crate) struct CratePackage(pub(crate) Bytes); +/// Contents of a .crate file +#[derive(Clone, Default)] +pub(crate) struct PackedCrate(pub(crate) Bytes); -impl From for Result { - fn from(value: UnpackedCrate) -> Self { +impl PackedCrate { + fn new(value: UnpackedCrate) -> Result { let mut archive = Builder::new(Vec::new()); archive.mode(HeaderMode::Deterministic); - let base_path = UnpackedCrate::make_path(&value.tempdir, &value.meta, "out"); + let base_path = UnpackedCrate::make_path(&value.tempdir, &value.meta, ""); archive.append_dir_all( - format!("{}-{}/out", value.meta.name, value.meta.vers), + format!("{}-{}/", value.meta.name, value.meta.vers), base_path, )?; let data = archive.into_inner()?; @@ -182,7 +225,50 @@ impl From for Result { let mut zipped_data = Vec::new(); encoder.read_to_end(&mut zipped_data)?; - Ok(CratePackage(Bytes::from(zipped_data))) + Ok(PackedCrate(Bytes::from(zipped_data))) + } + + fn new_rebased(&self, meta: &PackageMetaData, target_base: &str) -> Result { + let mut unpacked = UnpackedCrate::decompress(self.clone(), meta.clone())?; + + let name = Program::program_id_to_crate_name(unpacked.program_id); + UnpackedCrate::fixup_toml(&unpacked.tempdir, "Cargo.toml.orig", &unpacked.meta, &name)?; + UnpackedCrate::fixup_toml(&unpacked.tempdir, "Cargo.toml", &unpacked.meta, &name)?; + + let source_path = UnpackedCrate::make_path(&unpacked.tempdir, &unpacked.meta, ""); + unpacked.meta.name = target_base.to_string(); + let target_path = UnpackedCrate::make_path(&unpacked.tempdir, &unpacked.meta, ""); + fs::rename(source_path, target_path.clone()) + .map_err(|_| "Failed to rename the crate folder")?; + + Self::new(unpacked) + } + + fn version(&self) -> String { + let decoder = GzDecoder::new(self.0.as_ref()); + let mut archive = Archive::new(decoder); + + if let Some(Ok(entry)) = archive + .entries() + .ok() + .and_then(|mut entries| entries.nth(0)) + { + if let Ok(path) = entry.path() { + if let Some(path_str) = path.to_str() { + if let Some((_, vers)) = path_str.rsplit_once('-') { + let mut version = vers.to_string(); + // Removing trailing '/' + if version.ends_with('/') { + version.pop(); + } + return version; + } + } + } + } + + // Placeholder version. + "0.1.0".to_string() } } @@ -193,19 +279,14 @@ pub(crate) struct UnpackedCrate { program_path: String, program_id: Pubkey, keypair: Option, + packed_crate: PackedCrate, } -impl From for Result { - fn from(value: CratePackage) -> Self { - let bytes = value.0; - let (meta, offset) = PackageMetaData::new(&bytes)?; - - let (_crate_file_length, length_size) = - PackageMetaData::read_u32_length(&bytes.slice(offset..))?; - let crate_bytes = bytes.slice(offset.saturating_add(length_size)..); - let cksum = format!("{:x}", Sha256::digest(&crate_bytes)); +impl UnpackedCrate { + fn decompress(packed_crate: PackedCrate, meta: PackageMetaData) -> Result { + let cksum = format!("{:x}", Sha256::digest(&packed_crate.0)); - let decoder = GzDecoder::new(crate_bytes.as_ref()); + let decoder = GzDecoder::new(packed_crate.0.as_ref()); let mut archive = Archive::new(decoder); let tempdir = tempdir()?; @@ -213,10 +294,6 @@ impl From for Result { let lib_name = UnpackedCrate::program_library_name(&tempdir, &meta)?; - let base_path = UnpackedCrate::make_path(&tempdir, &meta, "out"); - fs::create_dir_all(base_path) - .map_err(|_| "Failed to create the base directory for output")?; - let program_path = UnpackedCrate::make_path(&tempdir, &meta, format!("out/{}.so", lib_name)) .into_os_string() @@ -237,11 +314,19 @@ impl From for Result { program_path, program_id: keypair.pubkey(), keypair: Some(keypair), + packed_crate, }) } -} -impl UnpackedCrate { + pub(crate) fn new(bytes: Bytes) -> Result { + let (meta, offset) = PackageMetaData::new(&bytes)?; + + let (_crate_file_length, length_size) = + PackageMetaData::read_u32_length(&bytes.slice(offset..))?; + let packed_crate = PackedCrate(bytes.slice(offset.saturating_add(length_size)..)); + UnpackedCrate::decompress(packed_crate, meta) + } + pub(crate) fn publish( &self, client: Arc, @@ -262,36 +347,38 @@ impl UnpackedCrate { } pub(crate) fn fetch_index(id: Pubkey, client: Arc) -> Result { - let (_program, unpacked_crate) = Self::fetch_program(id, client)?; - let mut entry: IndexEntry = unpacked_crate.meta.clone().into(); - - let packed_crate: Result = UnpackedCrate::into(unpacked_crate); - let packed_crate = packed_crate?; - + let (packed_crate, meta) = Self::fetch(id, "", client)?; + let mut entry: IndexEntry = meta.into(); entry.cksum = format!("{:x}", Sha256::digest(&packed_crate.0)); Ok(entry) } - pub(crate) fn fetch(id: Pubkey, client: Arc) -> Result { - let (_program, unpacked_crate) = Self::fetch_program(id, client)?; - UnpackedCrate::into(unpacked_crate) - } - - fn fetch_program(id: Pubkey, client: Arc) -> Result<(Program, UnpackedCrate), Error> { - let crate_obj = Self::new_empty(id)?; - let program = Program::from(&crate_obj); + pub(crate) fn fetch( + id: Pubkey, + vers: &str, + client: Arc, + ) -> Result<(PackedCrate, PackageMetaData), Error> { + let unpacked = Self::new_empty(id, vers)?; + let mut program = Program::from(&unpacked); program.dump(client)?; // Decompile the program // Generate a Cargo.toml - Ok((program, crate_obj)) + let mut meta = unpacked.meta.clone(); + + if APPEND_CRATE_TO_ELF { + meta.vers = program.packed_crate.version(); + Ok((program.packed_crate, meta)) + } else { + PackedCrate::new(unpacked).map(|file| (file, meta)) + } } - fn new_empty(id: Pubkey) -> Result { + fn new_empty(id: Pubkey, vers: &str) -> Result { let meta = PackageMetaData { - name: hex::encode(id.to_bytes()), - vers: "0.1.0".to_string(), + name: Program::program_id_to_crate_name(id), + vers: vers.to_string(), deps: vec![], features: BTreeMap::new(), authors: vec![], @@ -328,6 +415,7 @@ impl UnpackedCrate { program_path, program_id: id, keypair: None, + packed_crate: PackedCrate::default(), }) } @@ -348,4 +436,22 @@ impl UnpackedCrate { .ok_or("Failed to get module name")?; Ok(library_name.to_string()) } + + fn fixup_toml( + tempdir: &TempDir, + cargo_toml_name: &str, + meta: &PackageMetaData, + name: &str, + ) -> Result<(), Error> { + let toml_orig_path = Self::make_path(tempdir, meta, cargo_toml_name); + let toml_content = fs::read_to_string(&toml_orig_path)?; + let mut toml = toml_content.parse::()?; + toml.get_mut("package") + .and_then(|v| v.get_mut("name")) + .map(|v| *v = toml::Value::String(name.to_string())) + .ok_or("Failed to set package name")?; + + fs::write(toml_orig_path, toml.to_string())?; + Ok(()) + } } diff --git a/cargo-registry/src/main.rs b/cargo-registry/src/main.rs index 288e7fc9e69388..0bfc2c7f3ff004 100644 --- a/cargo-registry/src/main.rs +++ b/cargo-registry/src/main.rs @@ -2,7 +2,7 @@ use { crate::{ client::Client, - crate_handler::{CratePackage, Error, Program, UnpackedCrate}, + crate_handler::{Error, Program, UnpackedCrate}, sparse_index::RegistryIndex, }, hyper::{ @@ -38,14 +38,15 @@ impl CargoRegistryService { match bytes { Ok(data) => { - let Ok(crate_object) = CratePackage(data).into() else { + let Ok(unpacked_crate) = UnpackedCrate::new(data) else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, "Failed to parse the crate information", ); }; let Ok(result) = - tokio::task::spawn_blocking(move || crate_object.publish(client, index)).await + tokio::task::spawn_blocking(move || unpacked_crate.publish(client, index)) + .await else { return response_builder::error_response( hyper::StatusCode::INTERNAL_SERVER_ERROR, @@ -83,7 +84,7 @@ impl CargoRegistryService { _request: &hyper::Request, client: Arc, ) -> hyper::Response { - let Some((path, crate_name, _version)) = Self::get_crate_name_and_version(path) else { + let Some((path, crate_name, version)) = Self::get_crate_name_and_version(path) else { return response_builder::error_in_parsing(); }; @@ -92,10 +93,10 @@ impl CargoRegistryService { } let package = Program::crate_name_to_program_id(crate_name) - .and_then(|id| UnpackedCrate::fetch(id, client).ok()); + .and_then(|id| UnpackedCrate::fetch(id, version, client).ok()); // Return the package to the caller in the response - if let Some(package) = package { + if let Some((package, _meta)) = package { response_builder::success_response_bytes(package.0) } else { response_builder::error_response( diff --git a/ci/docker-run.sh b/ci/docker-run.sh index eb9d06836f692a..8e43bcad55ce5f 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -42,9 +42,6 @@ ARGS=( ) if [[ -n $CI ]]; then - # Share the real ~/.cargo between docker containers in CI for speed - ARGS+=(--volume "$HOME:/home") - if [[ -n $BUILDKITE ]]; then # I hate buildkite-esque echo is leaking into this generic shell wrapper. # but it's easiest to notify to users, and properly guarded under $BUILDKITE_ env @@ -54,28 +51,37 @@ if [[ -n $CI ]]; then # sccache-related bugs echo "--- $0 ... (with sccache being DISABLED due to many (${BUILDKITE_RETRY_COUNT}) retries)" else - echo "--- $0 ... (with sccache enabled with prefix: $SCCACHE_S3_KEY_PREFIX)" + echo "--- $0 ... (with sccache enabled with prefix: $SCCACHE_KEY_PREFIX)" + # sccache ARGS+=( --env "RUSTC_WRAPPER=/usr/local/cargo/bin/sccache" - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --env SCCACHE_BUCKET - --env SCCACHE_REGION - --env SCCACHE_S3_KEY_PREFIX ) + + # s3 + if [ -n "$AWS_ACCESS_KEY_ID" ]; then + ARGS+=( + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --env SCCACHE_BUCKET + --env SCCACHE_REGION + --env SCCACHE_S3_KEY_PREFIX + ) + fi + + # gcs + if [ -n "$SCCACHE_GCS_KEY_PATH" ]; then + ARGS+=( + --env SCCACHE_GCS_KEY_PATH + --volume "$SCCACHE_GCS_KEY_PATH:$SCCACHE_GCS_KEY_PATH" + --env SCCACHE_GCS_BUCKET + --env SCCACHE_GCS_RW_MODE + --env SCCACHE_GCS_KEY_PREFIX + ) + fi fi fi -else - # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux - # ~/.cargo - ARGS+=(--volume "$PWD:/home") fi -ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo") - -# kcov tries to set the personality of the binary which docker -# doesn't allow by default. -ARGS+=(--security-opt "seccomp=unconfined") # Ensure files are created with the current host uid/gid if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index a5d933b2a2d79f..baf7e09632bac6 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,17 +1,21 @@ FROM solanalabs/rust:1.73.0 + ARG date +ARG GRCOV_VERSION=v0.8.18 -RUN set -x \ - && rustup install nightly-$date \ - && rustup component add clippy --toolchain=nightly-$date \ - && rustup component add rustfmt --toolchain=nightly-$date \ - && rustup show \ - && rustc --version \ - && cargo --version \ - && cargo install grcov \ - && rustc +nightly-$date --version \ - && cargo +nightly-$date --version \ - # codecov - && curl -Os https://uploader.codecov.io/latest/linux/codecov \ - && chmod +x codecov \ - && mv codecov /usr/bin +RUN \ + rustup install nightly-$date && \ + rustup component add clippy --toolchain=nightly-$date && \ + rustup component add rustfmt --toolchain=nightly-$date && \ + rustup show && \ + rustc --version && \ + cargo --version && \ + # grcov + curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ + tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + mv ./grcov $CARGO_HOME/bin && \ + rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + # codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov && \ + chmod +x codecov && \ + mv codecov /usr/bin diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index c15b21636f365f..8619d5e68e30a0 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,55 +1,105 @@ -# Note: when the rust version is changed also modify -# ci/rust-version.sh to pick up the new image tag -FROM rust:1.73.0-bullseye +FROM ubuntu:20.04 -ARG NODE_MAJOR=18 +ARG \ + RUST_VERSION=1.73.0 \ + GOLANG_VERSION=1.21.3 \ + NODE_MAJOR=18 \ + SCCACHE_VERSION=v0.5.4 -RUN set -x \ - && apt update \ - && apt-get install apt-transport-https \ - && echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \ - && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \ - && apt update \ - && apt install -y \ - buildkite-agent \ - clang \ - cmake \ - jq \ - lcov \ - libudev-dev \ - mscgen \ - nodejs \ - net-tools \ - rsync \ - sudo \ - golang \ - unzip \ - lld \ - protobuf-compiler \ - \ - && apt remove -y libcurl4-openssl-dev \ - # node - && sudo apt-get update \ - && sudo apt-get install -y ca-certificates curl gnupg \ - && sudo mkdir -p /etc/apt/keyrings \ - && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ - && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list \ - && sudo apt-get update \ - && sudo apt-get install nodejs -y \ - && node --version \ - && npm --version \ - # rust - && rustup component add rustfmt \ - && rustup component add clippy \ - && rustup target add wasm32-unknown-unknown \ - && cargo install cargo-audit \ - && cargo install cargo-hack \ - && cargo install cargo-sort \ - && cargo install mdbook \ - && cargo install mdbook-linkcheck \ - && cargo install svgbob_cli \ - && cargo install wasm-pack \ - && cargo install sccache \ - && rustc --version \ - && cargo --version \ - && rm -rf /var/lib/apt/lists/* +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +ENV \ + DEBIAN_FRONTEND=noninteractive \ + TZ=UTC + +# golang +ENV PATH="/usr/local/go/bin:$PATH" + +# rust +ENV \ + RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH="$PATH:/usr/local/cargo/bin" + +RUN apt-get update && \ + apt-get install --no-install-recommends -y \ + # basic + tzdata \ + apt-transport-https \ + sudo \ + build-essential \ + git \ + vim \ + jq \ + ca-certificates \ + curl \ + gnupg \ + lld \ + cmake \ + # docs + mscgen \ + # solana compiling + libssl-dev \ + libudev-dev \ + pkg-config \ + zlib1g-dev \ + llvm \ + clang \ + cmake \ + make \ + libprotobuf-dev \ + protobuf-compiler \ + && \ + # buildkite + curl -fsSL https://keys.openpgp.org/vks/v1/by-fingerprint/32A37959C2FA5C3C99EFBC32A79206696452D198 | gpg --dearmor -o /usr/share/keyrings/buildkite-agent-archive-keyring.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/buildkite-agent-archive-keyring.gpg] https://apt.buildkite.com/buildkite-agent stable main" | tee /etc/apt/sources.list.d/buildkite-agent.list && \ + apt-get update && \ + apt-get install -y buildkite-agent && \ + # gh + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg && \ + sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null && \ + apt-get update && \ + apt-get install -y gh && \ + # rust + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --no-modify-path --profile minimal --default-toolchain $RUST_VERSION -y && \ + rustup component add rustfmt && \ + rustup component add clippy && \ + rustup target add wasm32-unknown-unknown && \ + cargo install cargo-audit && \ + cargo install cargo-hack && \ + cargo install cargo-sort && \ + cargo install mdbook && \ + cargo install mdbook-linkcheck && \ + cargo install svgbob_cli && \ + cargo install wasm-pack && \ + cargo install rustfilt && \ + chmod -R a+w $CARGO_HOME $RUSTUP_HOME && \ + rm -rf $CARGO_HOME/registry && \ + # sccache + curl -LOsS "https://github.com/mozilla/sccache/releases/download/$SCCACHE_VERSION/sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + tar -xzf "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + mv "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl"/sccache "$CARGO_HOME/bin/" && \ + rm "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl.tar.gz" && \ + rm -rf "sccache-$SCCACHE_VERSION-x86_64-unknown-linux-musl" && \ + # nextest + curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C "$CARGO_HOME/bin" && \ + # golang + curl -LOsS "https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + tar -C /usr/local -xzf "go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + rm "go$GOLANG_VERSION.linux-amd64.tar.gz" && \ + # nodejs + sudo mkdir -p /etc/apt/keyrings && \ + curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \ + echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list && \ + sudo apt-get update && \ + sudo apt-get install -y nodejs && \ + # setup path + mkdir /.cache && \ + chmod -R a+w /.cache && \ + mkdir /.config && \ + chmod -R a+w /.config && \ + mkdir /.npm && \ + chmod -R a+w /.npm && \ + # clean lists + rm -rf /var/lib/apt/lists/* diff --git a/ci/semver_bash/semver_test.sh b/ci/semver_bash/semver_test.sh index a0ff99461ec43b..a4cca97484de4f 100755 --- a/ci/semver_bash/semver_test.sh +++ b/ci/semver_bash/semver_test.sh @@ -21,7 +21,7 @@ echo "$A -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:" semverParseInto $E MAJOR MINOR PATCH SPECIAL echo "$E -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:a" -echo "Equality comparisions" +echo "Equality comparisons" semverEQ $A $A echo "$A == $A -> $?. Expect 0." @@ -32,7 +32,7 @@ semverGT $A $A echo "$A > $A -> $?. Expect 1." -echo "Major number comparisions" +echo "Major number comparisons" semverEQ $A $B echo "$A == $B -> $?. Expect 1." @@ -52,7 +52,7 @@ semverGT $B $A echo "$B > $A -> $?. Expect 0." -echo "Minor number comparisions" +echo "Minor number comparisons" semverEQ $A $C echo "$A == $C -> $?. Expect 1." @@ -71,7 +71,7 @@ echo "$C < $A -> $?. Expect 1." semverGT $C $A echo "$C > $A -> $?. Expect 0." -echo "patch number comparisions" +echo "patch number comparisons" semverEQ $A $D echo "$A == $D -> $?. Expect 1." @@ -90,7 +90,7 @@ echo "$D < $A -> $?. Expect 1." semverGT $D $A echo "$D > $A -> $?. Expect 0." -echo "special section vs no special comparisions" +echo "special section vs no special comparisons" semverEQ $A $E echo "$A == $E -> $?. Expect 1." @@ -109,7 +109,7 @@ echo "$E < $A -> $?. Expect 0." semverGT $E $A echo "$E > $A -> $?. Expect 1." -echo "special section vs special comparisions" +echo "special section vs special comparisons" semverEQ $E $F echo "$E == $F -> $?. Expect 1." diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 85375d6bbeec4b..3a4f15ec23d81f 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -69,35 +69,7 @@ fi _ ci/order-crates-for-publishing.py -nightly_clippy_allows=(--allow=clippy::redundant_clone) - -# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across -# various crates in this whole monorepo (frozen-abi is enabled only under nightly -# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' -# unit tests are only compiled under nightly. -# Similarly, nightly is desired to run clippy over all of bench files because -# the bench itself isn't stabilized yet... -# ref: https://github.com/rust-lang/rust/issues/66287 -_ scripts/cargo-for-all-lock-files.sh -- "+${rust_nightly}" clippy --workspace --all-targets --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding \ - "${nightly_clippy_allows[@]}" - -# temporarily run stable clippy as well to scan the codebase for -# `redundant_clone`s, which is disabled as nightly clippy is buggy: -# https://github.com/solana-labs/solana/issues/31834 -# -# can't use --all-targets: -# error[E0554]: `#![feature]` may not be used on the stable release channel -_ scripts/cargo-for-all-lock-files.sh -- clippy --workspace --tests --bins --examples --features dummy-for-ci-check -- \ - --deny=warnings \ - --deny=clippy::default_trait_access \ - --deny=clippy::arithmetic_side_effects \ - --deny=clippy::manual_let_else \ - --deny=clippy::used_underscore_binding +_ scripts/cargo-clippy.sh if [[ -n $CI ]]; then # exclude from printing "Checking xxx ..." diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index b68e23753c4664..96c0a8cd82c883 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -168,7 +168,7 @@ fn test_account_subscription() { // Transfer 100 lamports from alice to bob let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash); bank_forks - .write() + .read() .unwrap() .get(1) .unwrap() @@ -373,7 +373,7 @@ fn test_program_subscription() { // Create new program account at bob's address let tx = system_transaction::create_account(&alice, &bob, blockhash, 100, 0, &program_id); bank_forks - .write() + .read() .unwrap() .get(1) .unwrap() diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index 4617702059b202..8a9d82e32a38c0 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -96,6 +96,10 @@ impl ImmutableDeserializedPacket { self.priority_details.compute_unit_limit } + pub fn priority_details(&self) -> TransactionPriorityDetails { + self.priority_details.clone() + } + // This function deserializes packets into transactions, computes the blake3 hash of transaction // messages, and verifies secp256k1 instructions. pub fn build_sanitized_transaction( diff --git a/core/src/banking_stage/transaction_scheduler/mod.rs b/core/src/banking_stage/transaction_scheduler/mod.rs index bf6f761baca88c..0b65dce06a48fc 100644 --- a/core/src/banking_stage/transaction_scheduler/mod.rs +++ b/core/src/banking_stage/transaction_scheduler/mod.rs @@ -12,6 +12,8 @@ mod batch_id_generator; mod in_flight_tracker; #[allow(dead_code)] mod prio_graph_scheduler; +#[allow(dead_code)] +mod scheduler_controller; mod scheduler_error; #[allow(dead_code)] mod transaction_id_generator; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs new file mode 100644 index 00000000000000..8c1dc4f9172f73 --- /dev/null +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -0,0 +1,630 @@ +//! Control flow for BankingStage's transaction scheduler. +//! + +use { + super::{ + prio_graph_scheduler::PrioGraphScheduler, scheduler_error::SchedulerError, + transaction_id_generator::TransactionIdGenerator, + transaction_state::SanitizedTransactionTTL, + transaction_state_container::TransactionStateContainer, + }, + crate::banking_stage::{ + decision_maker::{BufferedPacketsDecision, DecisionMaker}, + immutable_deserialized_packet::ImmutableDeserializedPacket, + packet_deserializer::PacketDeserializer, + TOTAL_BUFFERED_PACKETS, + }, + crossbeam_channel::RecvTimeoutError, + solana_runtime::bank_forks::BankForks, + std::{ + sync::{Arc, RwLock}, + time::Duration, + }, +}; + +/// Controls packet and transaction flow into scheduler, and scheduling execution. +pub(crate) struct SchedulerController { + /// Decision maker for determining what should be done with transactions. + decision_maker: DecisionMaker, + /// Packet/Transaction ingress. + packet_receiver: PacketDeserializer, + bank_forks: Arc>, + /// Generates unique IDs for incoming transactions. + transaction_id_generator: TransactionIdGenerator, + /// Container for transaction state. + /// Shared resource between `packet_receiver` and `scheduler`. + container: TransactionStateContainer, + /// State for scheduling and communicating with worker threads. + scheduler: PrioGraphScheduler, +} + +impl SchedulerController { + pub fn new( + decision_maker: DecisionMaker, + packet_deserializer: PacketDeserializer, + bank_forks: Arc>, + scheduler: PrioGraphScheduler, + ) -> Self { + Self { + decision_maker, + packet_receiver: packet_deserializer, + bank_forks, + transaction_id_generator: TransactionIdGenerator::default(), + container: TransactionStateContainer::with_capacity(TOTAL_BUFFERED_PACKETS), + scheduler, + } + } + + pub fn run(mut self) -> Result<(), SchedulerError> { + loop { + // BufferedPacketsDecision is shared with legacy BankingStage, which will forward + // packets. Initially, not renaming these decision variants but the actions taken + // are different, since new BankingStage will not forward packets. + // For `Forward` and `ForwardAndHold`, we want to receive packets but will not + // forward them to the next leader. In this case, `ForwardAndHold` is + // indistiguishable from `Hold`. + // + // `Forward` will drop packets from the buffer instead of forwarding. + // During receiving, since packets would be dropped from buffer anyway, we can + // bypass sanitization and buffering and immediately drop the packets. + let decision = self.decision_maker.make_consume_or_forward_decision(); + + self.process_transactions(&decision)?; + self.scheduler.receive_completed(&mut self.container)?; + if !self.receive_packets(&decision) { + break; + } + } + + Ok(()) + } + + /// Process packets based on decision. + fn process_transactions( + &mut self, + decision: &BufferedPacketsDecision, + ) -> Result<(), SchedulerError> { + match decision { + BufferedPacketsDecision::Consume(_bank_start) => { + let _num_scheduled = self.scheduler.schedule(&mut self.container)?; + } + BufferedPacketsDecision::Forward => { + self.clear_container(); + } + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {} + } + + Ok(()) + } + + /// Clears the transaction state container. + /// This only clears pending transactions, and does **not** clear in-flight transactions. + fn clear_container(&mut self) { + while let Some(id) = self.container.pop() { + self.container.remove_by_id(&id.id); + } + } + + /// Returns whether the packet receiver is still connected. + fn receive_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { + let remaining_queue_capacity = self.container.remaining_queue_capacity(); + + const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); + let (recv_timeout, should_buffer) = match decision { + BufferedPacketsDecision::Consume(_) => ( + if self.container.is_empty() { + MAX_PACKET_RECEIVE_TIME + } else { + Duration::ZERO + }, + true, + ), + BufferedPacketsDecision::Forward => (MAX_PACKET_RECEIVE_TIME, false), + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => { + (MAX_PACKET_RECEIVE_TIME, true) + } + }; + + let received_packet_results = self + .packet_receiver + .receive_packets(recv_timeout, remaining_queue_capacity); + + match (received_packet_results, should_buffer) { + (Ok(receive_packet_results), true) => { + self.buffer_packets(receive_packet_results.deserialized_packets) + } + (Ok(receive_packet_results), false) => drop(receive_packet_results), + (Err(RecvTimeoutError::Timeout), _) => {} + (Err(RecvTimeoutError::Disconnected), _) => return false, + } + + true + } + + fn buffer_packets(&mut self, packets: Vec) { + // Sanitize packets, generate IDs, and insert into the container. + let bank = self.bank_forks.read().unwrap().working_bank(); + let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); + let feature_set = &bank.feature_set; + let vote_only = bank.vote_only_bank(); + for packet in packets { + let Some(transaction) = + packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) + else { + continue; + }; + + let transaction_id = self.transaction_id_generator.next(); + let transaction_ttl = SanitizedTransactionTTL { + transaction, + max_age_slot: last_slot_in_epoch, + }; + let transaction_priority_details = packet.priority_details(); + self.container.insert_new_transaction( + transaction_id, + transaction_ttl, + transaction_priority_details, + ); + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + banking_stage::{ + consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, + scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId}, + tests::create_slow_genesis_config, + }, + banking_trace::BankingPacketBatch, + sigverify::SigverifyTracerPacketStats, + }, + crossbeam_channel::{unbounded, Receiver, Sender}, + itertools::Itertools, + solana_ledger::{ + blockstore::Blockstore, genesis_utils::GenesisConfigInfo, + get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, + }, + solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, + solana_poh::poh_recorder::{PohRecorder, Record, WorkingBankEntry}, + solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, + poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, signer::Signer, + system_instruction, transaction::Transaction, + }, + std::sync::{atomic::AtomicBool, Arc, RwLock}, + tempfile::TempDir, + }; + + const TEST_TIMEOUT: Duration = Duration::from_millis(1000); + + fn create_channels(num: usize) -> (Vec>, Vec>) { + (0..num).map(|_| unbounded()).unzip() + } + + // Helper struct to create tests that hold channels, files, etc. + // such that our tests can be more easily set up and run. + struct TestFrame { + bank: Arc, + _ledger_path: TempDir, + _entry_receiver: Receiver, + _record_receiver: Receiver, + poh_recorder: Arc>, + banking_packet_sender: Sender, Option)>>, + + consume_work_receivers: Vec>, + finished_consume_work_sender: Sender, + } + + fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { + let GenesisConfigInfo { genesis_config, .. } = create_slow_genesis_config(10_000); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank_forks = BankForks::new_rw_arc(bank); + let bank = bank_forks.read().unwrap().working_bank(); + + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + bank.tick_height(), + bank.last_blockhash(), + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + &Pubkey::new_unique(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let decision_maker = DecisionMaker::new(Pubkey::new_unique(), poh_recorder.clone()); + + let (banking_packet_sender, banking_packet_receiver) = unbounded(); + let packet_deserializer = + PacketDeserializer::new(banking_packet_receiver, bank_forks.clone()); + + let (consume_work_senders, consume_work_receivers) = create_channels(num_threads); + let (finished_consume_work_sender, finished_consume_work_receiver) = unbounded(); + + let test_frame = TestFrame { + bank, + _ledger_path: ledger_path, + _entry_receiver: entry_receiver, + _record_receiver: record_receiver, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + finished_consume_work_sender, + }; + let scheduler_controller = SchedulerController::new( + decision_maker, + packet_deserializer, + bank_forks, + PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), + ); + + (test_frame, scheduler_controller) + } + + fn prioritized_tranfer( + from_keypair: &Keypair, + to_pubkey: &Pubkey, + lamports: u64, + priority: u64, + recent_blockhash: Hash, + ) -> Transaction { + let transfer = system_instruction::transfer(&from_keypair.pubkey(), to_pubkey, lamports); + let prioritization = ComputeBudgetInstruction::set_compute_unit_price(priority); + let message = Message::new(&[transfer, prioritization], Some(&from_keypair.pubkey())); + Transaction::new(&vec![from_keypair], message, recent_blockhash) + } + + fn to_banking_packet_batch(txs: &[Transaction]) -> BankingPacketBatch { + let packet_batch = to_packet_batches(txs, NUM_PACKETS); + Arc::new((packet_batch, None)) + } + + #[test] + #[should_panic(expected = "batch id 0 is not being tracked")] + fn test_unexpected_batch_id() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + finished_consume_work_sender, + .. + } = &test_frame; + + finished_consume_work_sender + .send(FinishedConsumeWork { + work: ConsumeWork { + batch_id: TransactionBatchId::new(0), + ids: vec![], + transactions: vec![], + max_age_slots: vec![], + }, + retryable_indexes: vec![], + }) + .unwrap(); + + central_scheduler_banking_stage.run().unwrap(); + } + + #[test] + fn test_schedule_consume_single_threaded_no_conflicts() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send packet batch to the scheduler - should do nothing until we become the leader. + let tx1 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 1, + bank.last_blockhash(), + ); + let tx2 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 2, + bank.last_blockhash(), + ); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 2); + assert_eq!(consume_work.transactions.len(), 2); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_single_threaded_conflict() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + let pk = Pubkey::new_unique(); + let tx1 = prioritized_tranfer(&Keypair::new(), &pk, 1, 1, bank.last_blockhash()); + let tx2 = prioritized_tranfer(&Keypair::new(), &pk, 1, 2, bank.last_blockhash()); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + // We expect 2 batches to be scheduled + let consume_works = (0..2) + .map(|_| { + consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + }) + .collect_vec(); + + let num_txs_per_batch = consume_works.iter().map(|cw| cw.ids.len()).collect_vec(); + let message_hashes = consume_works + .iter() + .flat_map(|cw| cw.transactions.iter().map(|tx| tx.message_hash())) + .collect_vec(); + assert_eq!(num_txs_per_batch, vec![1; 2]); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_single_threaded_multi_batch() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + + // Send multiple batches - all get scheduled + let txs1 = (0..2 * TARGET_NUM_TRANSACTIONS_PER_BATCH) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + i as u64, + 1, + bank.last_blockhash(), + ) + }) + .collect_vec(); + let txs2 = (0..2 * TARGET_NUM_TRANSACTIONS_PER_BATCH) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + i as u64, + 2, + bank.last_blockhash(), + ) + }) + .collect_vec(); + + banking_packet_sender + .send(to_banking_packet_batch(&txs1)) + .unwrap(); + banking_packet_sender + .send(to_banking_packet_batch(&txs2)) + .unwrap(); + + // We expect 4 batches to be scheduled + let consume_works = (0..4) + .map(|_| { + consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + }) + .collect_vec(); + + assert_eq!( + consume_works.iter().map(|cw| cw.ids.len()).collect_vec(), + vec![TARGET_NUM_TRANSACTIONS_PER_BATCH; 4] + ); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_simple_thread_selection() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(2); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send 4 transactions w/o conflicts. 2 should be scheduled on each thread + let txs = (0..4) + .map(|i| { + prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + i, + bank.last_blockhash(), + ) + }) + .collect_vec(); + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + // Priority Expectation: + // Thread 0: [3, 1] + // Thread 1: [2, 0] + let t0_expected = [3, 1] + .into_iter() + .map(|i| txs[i].message().hash()) + .collect_vec(); + let t1_expected = [2, 0] + .into_iter() + .map(|i| txs[i].message().hash()) + .collect_vec(); + let t0_actual = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + .transactions + .iter() + .map(|tx| *tx.message_hash()) + .collect_vec(); + let t1_actual = consume_work_receivers[1] + .recv_timeout(TEST_TIMEOUT) + .unwrap() + .transactions + .iter() + .map(|tx| *tx.message_hash()) + .collect_vec(); + + assert_eq!(t0_actual, t0_expected); + assert_eq!(t1_actual, t1_expected); + + drop(test_frame); + let _ = scheduler_thread.join(); + } + + #[test] + fn test_schedule_consume_retryable() { + let (test_frame, central_scheduler_banking_stage) = create_test_frame(1); + let TestFrame { + bank, + poh_recorder, + banking_packet_sender, + consume_work_receivers, + finished_consume_work_sender, + .. + } = &test_frame; + + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + let scheduler_thread = std::thread::spawn(move || central_scheduler_banking_stage.run()); + + // Send packet batch to the scheduler - should do nothing until we become the leader. + let tx1 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 1, + bank.last_blockhash(), + ); + let tx2 = prioritized_tranfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + 2, + bank.last_blockhash(), + ); + let tx1_hash = tx1.message().hash(); + let tx2_hash = tx2.message().hash(); + + let txs = vec![tx1, tx2]; + banking_packet_sender + .send(to_banking_packet_batch(&txs)) + .unwrap(); + + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 2); + assert_eq!(consume_work.transactions.len(), 2); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx2_hash, &tx1_hash]); + + // Complete the batch - marking the second transaction as retryable + finished_consume_work_sender + .send(FinishedConsumeWork { + work: consume_work, + retryable_indexes: vec![1], + }) + .unwrap(); + + // Transaction should be rescheduled + let consume_work = consume_work_receivers[0] + .recv_timeout(TEST_TIMEOUT) + .unwrap(); + assert_eq!(consume_work.ids.len(), 1); + assert_eq!(consume_work.transactions.len(), 1); + let message_hashes = consume_work + .transactions + .iter() + .map(|tx| tx.message_hash()) + .collect_vec(); + assert_eq!(message_hashes, vec![&tx1_hash]); + + drop(test_frame); + let _ = scheduler_thread.join(); + } +} diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index b15c6d2fec5fe0..84242b44c6433a 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -510,12 +510,11 @@ mod tests { let bank0 = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank0); - let mut bank_forks = bank_forks.write().unwrap(); // Fill bank_forks with banks with votes landing in the next slot // Create enough banks such that vote account will root slots 0 and 1 for x in 0..33 { - let previous_bank = bank_forks.get(x).unwrap(); + let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); let vote = vote_transaction::new_vote_transaction( vec![x], @@ -527,20 +526,23 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.insert(bank); + bank_forks.write().unwrap().insert(bank); } - let working_bank = bank_forks.working_bank(); + let working_bank = bank_forks.read().unwrap().working_bank(); let root = get_vote_account_root_slot( validator_vote_keypairs.vote_keypair.pubkey(), &working_bank, ); for x in 0..root { - bank_forks.set_root(x, &AbsRequestSender::default(), None); + bank_forks + .write() + .unwrap() + .set_root(x, &AbsRequestSender::default(), None); } // Add an additional bank/vote that will root slot 2 - let bank33 = bank_forks.get(33).unwrap(); + let bank33 = bank_forks.read().unwrap().get(33).unwrap(); let bank34 = Bank::new_from_parent(bank33.clone(), &Pubkey::default(), 34); let vote33 = vote_transaction::new_vote_transaction( vec![33], @@ -552,9 +554,9 @@ mod tests { None, ); bank34.process_transaction(&vote33).unwrap(); - bank_forks.insert(bank34); + bank_forks.write().unwrap().insert(bank34); - let working_bank = bank_forks.working_bank(); + let working_bank = bank_forks.read().unwrap().working_bank(); let root = get_vote_account_root_slot( validator_vote_keypairs.vote_keypair.pubkey(), &working_bank, @@ -573,21 +575,22 @@ mod tests { .read() .unwrap() .highest_super_majority_root(); - bank_forks.set_root( + bank_forks.write().unwrap().set_root( root, &AbsRequestSender::default(), Some(highest_super_majority_root), ); - let highest_super_majority_root_bank = bank_forks.get(highest_super_majority_root); + let highest_super_majority_root_bank = + bank_forks.read().unwrap().get(highest_super_majority_root); assert!(highest_super_majority_root_bank.is_some()); // Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote // account's root (and thus the highest_super_majority_root) rolls back to slot 1 - let bank33 = bank_forks.get(33).unwrap(); + let bank33 = bank_forks.read().unwrap().get(33).unwrap(); let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35); - bank_forks.insert(bank35); + bank_forks.write().unwrap().insert(bank35); - let working_bank = bank_forks.working_bank(); + let working_bank = bank_forks.read().unwrap().working_bank(); let ancestors = working_bank.status_cache_ancestors(); let _ = AggregateCommitmentService::update_commitment_cache( &block_commitment_cache, @@ -602,13 +605,14 @@ mod tests { .read() .unwrap() .highest_super_majority_root(); - let highest_super_majority_root_bank = bank_forks.get(highest_super_majority_root); + let highest_super_majority_root_bank = + bank_forks.read().unwrap().get(highest_super_majority_root); assert!(highest_super_majority_root_bank.is_some()); // Add additional banks beyond lockout built on the new fork to ensure that behavior // continues normally for x in 35..=37 { - let previous_bank = bank_forks.get(x).unwrap(); + let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); let vote = vote_transaction::new_vote_transaction( vec![x], @@ -620,10 +624,10 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.insert(bank); + bank_forks.write().unwrap().insert(bank); } - let working_bank = bank_forks.working_bank(); + let working_bank = bank_forks.read().unwrap().working_bank(); let root = get_vote_account_root_slot( validator_vote_keypairs.vote_keypair.pubkey(), &working_bank, @@ -642,12 +646,13 @@ mod tests { .read() .unwrap() .highest_super_majority_root(); - bank_forks.set_root( + bank_forks.write().unwrap().set_root( root, &AbsRequestSender::default(), Some(highest_super_majority_root), ); - let highest_super_majority_root_bank = bank_forks.get(highest_super_majority_root); + let highest_super_majority_root_bank = + bank_forks.read().unwrap().get(highest_super_majority_root); assert!(highest_super_majority_root_bank.is_some()); } } diff --git a/core/src/lib.rs b/core/src/lib.rs index 99ac98b5d422cc..44e7a8ab89aa4f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,8 +22,6 @@ pub mod cost_update_service; pub mod drop_bank_service; pub mod fetch_stage; pub mod gen_keys; -pub mod ledger_cleanup_service; -pub mod ledger_metric_report_service; pub mod next_leader; pub mod optimistic_confirmation_verifier; pub mod poh_timing_report_service; diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 031de37f94b5a0..c6f2e00df53a26 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -6,8 +6,8 @@ use { log::error, quinn::{ ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, - TransportConfig, VarInt, WriteError, + EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, + ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, }, rcgen::RcgenError, rustls::{Certificate, PrivateKey}, @@ -24,7 +24,7 @@ use { io::{Cursor, Error as IoError}, net::{IpAddr, SocketAddr, UdpSocket}, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, }, time::Duration, @@ -46,7 +46,13 @@ const CONNECT_SERVER_NAME: &str = "solana-repair"; const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 3072; + +// Transport config. +// Repair randomly samples peers, uses bi-directional streams and generally has +// low to moderate load and so is configured separately from other protocols. +const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(4); const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); +const MAX_IDLE_TIMEOUT: Duration = Duration::from_secs(10); const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); @@ -82,16 +88,14 @@ pub struct RemoteRequest { #[derive(Error, Debug)] #[allow(clippy::enum_variant_names)] pub(crate) enum Error { - #[error(transparent)] - BincodeError(#[from] bincode::Error), #[error(transparent)] CertificateError(#[from] RcgenError), + #[error("Channel Send Error")] + ChannelSendError, #[error(transparent)] ConnectError(#[from] ConnectError), #[error(transparent)] ConnectionError(#[from] ConnectionError), - #[error("Channel Send Error")] - ChannelSendError, #[error("Invalid Identity: {0:?}")] InvalidIdentity(SocketAddr), #[error(transparent)] @@ -103,9 +107,15 @@ pub(crate) enum Error { #[error("read_to_end Timeout")] ReadToEndTimeout, #[error(transparent)] - WriteError(#[from] WriteError), - #[error(transparent)] TlsError(#[from] rustls::Error), + #[error(transparent)] + WriteError(#[from] WriteError), +} + +macro_rules! add_metric { + ($metric: expr) => {{ + $metric.fetch_add(1, Ordering::Relaxed); + }}; } #[allow(clippy::type_complexity)] @@ -191,11 +201,15 @@ fn new_client_config(cert: Certificate, key: PrivateKey) -> Result TransportConfig { + let max_idle_timeout = IdleTimeout::try_from(MAX_IDLE_TIMEOUT).unwrap(); let mut config = TransportConfig::default(); + // Disable datagrams and uni streams. config + .datagram_receive_buffer_size(None) + .keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)) .max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS) .max_concurrent_uni_streams(VarInt::from(0u8)) - .datagram_receive_buffer_size(None); + .max_idle_timeout(Some(max_idle_timeout)); config } @@ -207,8 +221,11 @@ async fn run_server( router: Arc>>>, cache: Arc>>, ) { + let stats = Arc::::default(); + let report_metrics_task = + tokio::task::spawn(report_metrics_task("repair_quic_server", stats.clone())); while let Some(connecting) = endpoint.accept().await { - tokio::task::spawn(handle_connecting_error( + tokio::task::spawn(handle_connecting_task( endpoint.clone(), connecting, remote_request_sender.clone(), @@ -216,8 +233,10 @@ async fn run_server( prune_cache_pending.clone(), router.clone(), cache.clone(), + stats.clone(), )); } + report_metrics_task.abort(); } async fn run_client( @@ -229,14 +248,17 @@ async fn run_client( router: Arc>>>, cache: Arc>>, ) { + let stats = Arc::::default(); + let report_metrics_task = + tokio::task::spawn(report_metrics_task("repair_quic_client", stats.clone())); while let Some(request) = receiver.recv().await { - let Some(request) = try_route_request(request, &*router.read().await) else { + let Some(request) = try_route_request(request, &*router.read().await, &stats) else { continue; }; let remote_address = request.remote_address; let receiver = { let mut router = router.write().await; - let Some(request) = try_route_request(request, &router) else { + let Some(request) = try_route_request(request, &router, &stats) else { continue; }; let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); @@ -253,11 +275,13 @@ async fn run_client( prune_cache_pending.clone(), router.clone(), cache.clone(), + stats.clone(), )); } close_quic_endpoint(&endpoint); // Drop sender channels to unblock threads waiting on the receiving end. router.write().await.clear(); + report_metrics_task.abort(); } // Routes the local request to respective channel. Drops the request if the @@ -266,13 +290,15 @@ async fn run_client( fn try_route_request( request: LocalRequest, router: &HashMap>, + stats: &RepairQuicStats, ) -> Option { match router.get(&request.remote_address) { None => Some(request), Some(sender) => match sender.try_send(request) { Ok(()) => None, Err(TrySendError::Full(request)) => { - error!("TrySendError::Full {}", request.remote_address); + debug!("TrySendError::Full {}", request.remote_address); + add_metric!(stats.router_try_send_error_full); None } Err(TrySendError::Closed(request)) => Some(request), @@ -280,7 +306,7 @@ fn try_route_request( } } -async fn handle_connecting_error( +async fn handle_connecting_task( endpoint: Endpoint, connecting: Connecting, remote_request_sender: Sender, @@ -288,6 +314,7 @@ async fn handle_connecting_error( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { if let Err(err) = handle_connecting( endpoint, @@ -297,10 +324,12 @@ async fn handle_connecting_error( prune_cache_pending, router, cache, + stats.clone(), ) .await { - error!("handle_connecting: {err:?}"); + debug!("handle_connecting: {err:?}"); + record_error(&err, &stats); } } @@ -312,6 +341,7 @@ async fn handle_connecting( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) -> Result<(), Error> { let connection = connecting.await?; let remote_address = connection.remote_address(); @@ -332,6 +362,7 @@ async fn handle_connecting( prune_cache_pending, router, cache, + stats, ) .await; Ok(()) @@ -349,6 +380,7 @@ async fn handle_connection( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { cache_connection( remote_pubkey, @@ -361,8 +393,10 @@ async fn handle_connection( .await; let send_requests_task = tokio::task::spawn(send_requests_task( endpoint.clone(), + remote_address, connection.clone(), receiver, + stats.clone(), )); let recv_requests_task = tokio::task::spawn(recv_requests_task( endpoint, @@ -370,11 +404,13 @@ async fn handle_connection( remote_pubkey, connection.clone(), remote_request_sender, + stats.clone(), )); match futures::future::try_join(send_requests_task, recv_requests_task).await { Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), - Ok(((), Err(ref err))) => { - error!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + Ok(((), Err(err))) => { + debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(&err, &stats); } Ok(((), Ok(()))) => (), } @@ -392,6 +428,7 @@ async fn recv_requests_task( remote_pubkey: Pubkey, connection: Connection, remote_request_sender: Sender, + stats: Arc, ) -> Result<(), Error> { loop { let (send_stream, recv_stream) = connection.accept_bi().await?; @@ -402,6 +439,7 @@ async fn recv_requests_task( send_stream, recv_stream, remote_request_sender.clone(), + stats.clone(), )); } } @@ -413,6 +451,7 @@ async fn handle_streams_task( send_stream: SendStream, recv_stream: RecvStream, remote_request_sender: Sender, + stats: Arc, ) { if let Err(err) = handle_streams( &endpoint, @@ -424,7 +463,8 @@ async fn handle_streams_task( ) .await { - error!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); + debug!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); + record_error(&err, &stats); } } @@ -469,21 +509,32 @@ async fn handle_streams( async fn send_requests_task( endpoint: Endpoint, + remote_address: SocketAddr, connection: Connection, mut receiver: AsyncReceiver, + stats: Arc, ) { while let Some(request) = receiver.recv().await { tokio::task::spawn(send_request_task( endpoint.clone(), + remote_address, connection.clone(), request, + stats.clone(), )); } } -async fn send_request_task(endpoint: Endpoint, connection: Connection, request: LocalRequest) { +async fn send_request_task( + endpoint: Endpoint, + remote_address: SocketAddr, + connection: Connection, + request: LocalRequest, + stats: Arc, +) { if let Err(err) = send_request(endpoint, connection, request).await { - error!("send_request: {err:?}") + debug!("send_request: {remote_address}, {err:?}"); + record_error(&err, &stats); } } @@ -542,6 +593,7 @@ async fn make_connection_task( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) { if let Err(err) = make_connection( endpoint, @@ -552,10 +604,12 @@ async fn make_connection_task( prune_cache_pending, router, cache, + stats.clone(), ) .await { - error!("make_connection: {remote_address}, {err:?}"); + debug!("make_connection: {remote_address}, {err:?}"); + record_error(&err, &stats); } } @@ -568,6 +622,7 @@ async fn make_connection( prune_cache_pending: Arc, router: Arc>>>, cache: Arc>>, + stats: Arc, ) -> Result<(), Error> { let connection = endpoint .connect(remote_address, CONNECT_SERVER_NAME)? @@ -583,6 +638,7 @@ async fn make_connection( prune_cache_pending, router, cache, + stats, ) .await; Ok(()) @@ -698,6 +754,250 @@ impl From> for Error { } } +#[derive(Default)] +struct RepairQuicStats { + connect_error_invalid_remote_address: AtomicU64, + connect_error_other: AtomicU64, + connect_error_too_many_connections: AtomicU64, + connection_error_application_closed: AtomicU64, + connection_error_connection_closed: AtomicU64, + connection_error_locally_closed: AtomicU64, + connection_error_reset: AtomicU64, + connection_error_timed_out: AtomicU64, + connection_error_transport_error: AtomicU64, + connection_error_version_mismatch: AtomicU64, + invalid_identity: AtomicU64, + no_response_received: AtomicU64, + read_to_end_error_connection_lost: AtomicU64, + read_to_end_error_illegal_ordered_read: AtomicU64, + read_to_end_error_reset: AtomicU64, + read_to_end_error_too_long: AtomicU64, + read_to_end_error_unknown_stream: AtomicU64, + read_to_end_error_zero_rtt_rejected: AtomicU64, + read_to_end_timeout: AtomicU64, + router_try_send_error_full: AtomicU64, + write_error_connection_lost: AtomicU64, + write_error_stopped: AtomicU64, + write_error_unknown_stream: AtomicU64, + write_error_zero_rtt_rejected: AtomicU64, +} + +async fn report_metrics_task(name: &'static str, stats: Arc) { + const METRICS_SUBMIT_CADENCE: Duration = Duration::from_secs(2); + loop { + tokio::time::sleep(METRICS_SUBMIT_CADENCE).await; + report_metrics(name, &stats); + } +} + +fn record_error(err: &Error, stats: &RepairQuicStats) { + match err { + Error::CertificateError(_) => (), + Error::ChannelSendError => (), + Error::ConnectError(ConnectError::EndpointStopping) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::TooManyConnections) => { + add_metric!(stats.connect_error_too_many_connections) + } + Error::ConnectError(ConnectError::InvalidDnsName(_)) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::InvalidRemoteAddress(_)) => { + add_metric!(stats.connect_error_invalid_remote_address) + } + Error::ConnectError(ConnectError::NoDefaultClientConfig) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectError(ConnectError::UnsupportedVersion) => { + add_metric!(stats.connect_error_other) + } + Error::ConnectionError(ConnectionError::VersionMismatch) => { + add_metric!(stats.connection_error_version_mismatch) + } + Error::ConnectionError(ConnectionError::TransportError(_)) => { + add_metric!(stats.connection_error_transport_error) + } + Error::ConnectionError(ConnectionError::ConnectionClosed(_)) => { + add_metric!(stats.connection_error_connection_closed) + } + Error::ConnectionError(ConnectionError::ApplicationClosed(_)) => { + add_metric!(stats.connection_error_application_closed) + } + Error::ConnectionError(ConnectionError::Reset) => add_metric!(stats.connection_error_reset), + Error::ConnectionError(ConnectionError::TimedOut) => { + add_metric!(stats.connection_error_timed_out) + } + Error::ConnectionError(ConnectionError::LocallyClosed) => { + add_metric!(stats.connection_error_locally_closed) + } + Error::InvalidIdentity(_) => add_metric!(stats.invalid_identity), + Error::IoError(_) => (), + Error::NoResponseReceived => add_metric!(stats.no_response_received), + Error::ReadToEndError(ReadToEndError::Read(ReadError::Reset(_))) => { + add_metric!(stats.read_to_end_error_reset) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::ConnectionLost(_))) => { + add_metric!(stats.read_to_end_error_connection_lost) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::UnknownStream)) => { + add_metric!(stats.read_to_end_error_unknown_stream) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::IllegalOrderedRead)) => { + add_metric!(stats.read_to_end_error_illegal_ordered_read) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::ZeroRttRejected)) => { + add_metric!(stats.read_to_end_error_zero_rtt_rejected) + } + Error::ReadToEndError(ReadToEndError::TooLong) => { + add_metric!(stats.read_to_end_error_too_long) + } + Error::ReadToEndTimeout => add_metric!(stats.read_to_end_timeout), + Error::TlsError(_) => (), + Error::WriteError(WriteError::Stopped(_)) => add_metric!(stats.write_error_stopped), + Error::WriteError(WriteError::ConnectionLost(_)) => { + add_metric!(stats.write_error_connection_lost) + } + Error::WriteError(WriteError::UnknownStream) => { + add_metric!(stats.write_error_unknown_stream) + } + Error::WriteError(WriteError::ZeroRttRejected) => { + add_metric!(stats.write_error_zero_rtt_rejected) + } + } +} + +fn report_metrics(name: &'static str, stats: &RepairQuicStats) { + macro_rules! reset_metric { + ($metric: expr) => { + $metric.swap(0, Ordering::Relaxed) + }; + } + datapoint_info!( + name, + ( + "connect_error_invalid_remote_address", + reset_metric!(stats.connect_error_invalid_remote_address), + i64 + ), + ( + "connect_error_other", + reset_metric!(stats.connect_error_other), + i64 + ), + ( + "connect_error_too_many_connections", + reset_metric!(stats.connect_error_too_many_connections), + i64 + ), + ( + "connection_error_application_closed", + reset_metric!(stats.connection_error_application_closed), + i64 + ), + ( + "connection_error_connection_closed", + reset_metric!(stats.connection_error_connection_closed), + i64 + ), + ( + "connection_error_locally_closed", + reset_metric!(stats.connection_error_locally_closed), + i64 + ), + ( + "connection_error_reset", + reset_metric!(stats.connection_error_reset), + i64 + ), + ( + "connection_error_timed_out", + reset_metric!(stats.connection_error_timed_out), + i64 + ), + ( + "connection_error_transport_error", + reset_metric!(stats.connection_error_transport_error), + i64 + ), + ( + "connection_error_version_mismatch", + reset_metric!(stats.connection_error_version_mismatch), + i64 + ), + ( + "invalid_identity", + reset_metric!(stats.invalid_identity), + i64 + ), + ( + "no_response_received", + reset_metric!(stats.no_response_received), + i64 + ), + ( + "read_to_end_error_connection_lost", + reset_metric!(stats.read_to_end_error_connection_lost), + i64 + ), + ( + "read_to_end_error_illegal_ordered_read", + reset_metric!(stats.read_to_end_error_illegal_ordered_read), + i64 + ), + ( + "read_to_end_error_reset", + reset_metric!(stats.read_to_end_error_reset), + i64 + ), + ( + "read_to_end_error_too_long", + reset_metric!(stats.read_to_end_error_too_long), + i64 + ), + ( + "read_to_end_error_unknown_stream", + reset_metric!(stats.read_to_end_error_unknown_stream), + i64 + ), + ( + "read_to_end_error_zero_rtt_rejected", + reset_metric!(stats.read_to_end_error_zero_rtt_rejected), + i64 + ), + ( + "read_to_end_timeout", + reset_metric!(stats.read_to_end_timeout), + i64 + ), + ( + "router_try_send_error_full", + reset_metric!(stats.router_try_send_error_full), + i64 + ), + ( + "write_error_connection_lost", + reset_metric!(stats.write_error_connection_lost), + i64 + ), + ( + "write_error_stopped", + reset_metric!(stats.write_error_stopped), + i64 + ), + ( + "write_error_unknown_stream", + reset_metric!(stats.write_error_unknown_stream), + i64 + ), + ( + "write_error_zero_rtt_rejected", + reset_metric!(stats.write_error_zero_rtt_rejected), + i64 + ), + ); +} + #[cfg(test)] mod tests { use { diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 20d6a01084f212..36ba4978e1c793 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -457,16 +457,17 @@ impl RepairService { let mut batch_send_repairs_elapsed = Measure::start("batch_send_repairs_elapsed"); if !batch.is_empty() { - if let Err(SendPktsError::IoError(err, num_failed)) = - batch_send(repair_socket, &batch) - { - error!( - "{} batch_send failed to send {}/{} packets first error {:?}", - id, - num_failed, - batch.len(), - err - ); + match batch_send(repair_socket, &batch) { + Ok(()) => (), + Err(SendPktsError::IoError(err, num_failed)) => { + error!( + "{} batch_send failed to send {}/{} packets first error {:?}", + id, + num_failed, + batch.len(), + err + ); + } } } batch_send_repairs_elapsed.stop(); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index ebb2d218658153..2662d487f13b0e 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1221,15 +1221,16 @@ impl ServeRepair { } } if !pending_pongs.is_empty() { - if let Err(SendPktsError::IoError(err, num_failed)) = - batch_send(repair_socket, &pending_pongs) - { - warn!( - "batch_send failed to send {}/{} packets. First error: {:?}", - num_failed, - pending_pongs.len(), - err - ); + match batch_send(repair_socket, &pending_pongs) { + Ok(()) => (), + Err(SendPktsError::IoError(err, num_failed)) => { + warn!( + "batch_send failed to send {}/{} packets. First error: {:?}", + num_failed, + pending_pongs.len(), + err + ); + } } } } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2c5c0ff9f526bd..3c2c7d39d06610 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -36,13 +36,14 @@ use { lazy_static::lazy_static, rayon::{prelude::*, ThreadPool}, solana_entry::entry::VerifyRecyclers, - solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, + solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ block_error::BlockError, blockstore::Blockstore, blockstore_processor::{ - self, BlockstoreProcessorError, ConfirmationProgress, TransactionStatusSender, + self, BlockstoreProcessorError, ConfirmationProgress, ExecuteBatchesInternalMetrics, + TransactionStatusSender, }, entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, @@ -259,8 +260,6 @@ pub struct ReplayTiming { start_leader_elapsed: u64, reset_bank_elapsed: u64, voting_elapsed: u64, - vote_push_us: u64, - vote_send_us: u64, generate_vote_us: u64, update_commitment_cache_us: u64, select_forks_elapsed: u64, @@ -336,8 +335,6 @@ impl ReplayTiming { if elapsed_ms > 1000 { datapoint_info!( "replay-loop-voting-stats", - ("vote_push_us", self.vote_push_us, i64), - ("vote_send_us", self.vote_send_us, i64), ("generate_vote_us", self.generate_vote_us, i64), ( "update_commitment_cache_us", @@ -495,7 +492,7 @@ impl ReplayStage { cost_update_sender: Sender, voting_sender: Sender, drop_bank_sender: Sender>>, - block_metadata_notifier: Option, + block_metadata_notifier: Option, log_messages_bytes_limit: Option, prioritization_fee_cache: Arc, dumped_slots_sender: DumpedSlotsSender, @@ -2763,7 +2760,7 @@ impl ReplayStage { cost_update_sender: &Sender, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, - block_metadata_notifier: Option, + block_metadata_notifier: Option, replay_result_vec: &[ReplaySlotFromBlockstore], purge_repair_slot_counter: &mut PurgeRepairSlotCounter, ) -> bool { @@ -2786,7 +2783,6 @@ impl ReplayStage { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { - // Error means the slot needs to be marked as dead Self::mark_dead_slot( blockstore, bank, @@ -2802,8 +2798,7 @@ impl ReplayStage { ancestor_hashes_replay_update_sender, purge_repair_slot_counter, ); - // If the bank was corrupted, don't try to run the below logic to check if the - // bank is completed + // don't try to run the below logic to check if the bank is completed continue; } } @@ -2817,6 +2812,40 @@ impl ReplayStage { .expect("Bank fork progress entry missing for completed bank"); let replay_stats = bank_progress.replay_stats.clone(); + + if let Some((result, completed_execute_timings)) = + bank.wait_for_completed_scheduler() + { + let metrics = ExecuteBatchesInternalMetrics::new_with_timings_from_all_threads( + completed_execute_timings, + ); + replay_stats + .write() + .unwrap() + .batch_execute + .accumulate(metrics); + + if let Err(err) = result { + Self::mark_dead_slot( + blockstore, + bank, + bank_forks.read().unwrap().root(), + &BlockstoreProcessorError::InvalidTransaction(err), + rpc_subscriptions, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + epoch_slots_frozen_slots, + progress, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + ); + // don't try to run the remaining normal processing for the completed bank + continue; + } + } + let r_replay_stats = replay_stats.read().unwrap(); let replay_progress = bank_progress.replay_progress.clone(); let r_replay_progress = replay_progress.read().unwrap(); @@ -2922,10 +2951,13 @@ impl ReplayStage { } Self::record_rewards(bank, rewards_recorder_sender); if let Some(ref block_metadata_notifier) = block_metadata_notifier { - let block_metadata_notifier = block_metadata_notifier.read().unwrap(); + let parent_blockhash = bank + .parent() + .map(|bank| bank.last_blockhash()) + .unwrap_or_default(); block_metadata_notifier.notify_block_metadata( bank.parent_slot(), - &bank.parent_hash().to_string(), + &parent_blockhash.to_string(), bank.slot(), &bank.last_blockhash().to_string(), &bank.rewards, @@ -2983,7 +3015,7 @@ impl ReplayStage { cost_update_sender: &Sender, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, - block_metadata_notifier: Option, + block_metadata_notifier: Option, replay_timing: &mut ReplayTiming, log_messages_bytes_limit: Option, replay_slots_concurrently: bool, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 639670479f38df..8e479aa92b792d 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -14,7 +14,6 @@ use { consensus::{tower_storage::TowerStorage, Tower}, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, - ledger_cleanup_service::LedgerCleanupService, repair::{quic_endpoint::LocalRequest, repair_service::RepairInfo}, replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, @@ -26,14 +25,15 @@ use { bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_client::connection_cache::ConnectionCache, - solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, + solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::{ cluster_info::ClusterInfo, duplicate_shred_handler::DuplicateShredHandler, duplicate_shred_listener::DuplicateShredListener, }, solana_ledger::{ - blockstore::Blockstore, blockstore_processor::TransactionStatusSender, - entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, + blockstore::Blockstore, blockstore_cleanup_service::BlockstoreCleanupService, + blockstore_processor::TransactionStatusSender, entry_notifier_service::EntryNotifierSender, + leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::PohRecorder, solana_rpc::{ @@ -63,7 +63,7 @@ pub struct Tvu { window_service: WindowService, cluster_slots_service: ClusterSlotsService, replay_stage: ReplayStage, - ledger_cleanup_service: Option, + blockstore_cleanup_service: Option, cost_update_service: CostUpdateService, voting_service: VotingService, warm_quic_cache_service: Option, @@ -128,7 +128,7 @@ impl Tvu { gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, tvu_config: TvuConfig, max_slots: &Arc, - block_metadata_notifier: Option, + block_metadata_notifier: Option, wait_to_vote_slot: Option, accounts_background_request_sender: AbsRequestSender, log_messages_bytes_limit: Option, @@ -236,14 +236,14 @@ impl Tvu { exit.clone(), ); - let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = unbounded(); + let (blockstore_cleanup_slot_sender, blockstore_cleanup_slot_receiver) = unbounded(); let replay_stage_config = ReplayStageConfig { vote_account: *vote_account, authorized_voter_keypairs, exit: exit.clone(), rpc_subscriptions: rpc_subscriptions.clone(), leader_schedule_cache: leader_schedule_cache.clone(), - latest_root_senders: vec![ledger_cleanup_slot_sender], + latest_root_senders: vec![blockstore_cleanup_slot_sender], accounts_background_request_sender, block_commitment_cache, transaction_status_sender, @@ -311,9 +311,9 @@ impl Tvu { popular_pruned_forks_receiver, )?; - let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { - LedgerCleanupService::new( - ledger_cleanup_slot_receiver, + let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { + BlockstoreCleanupService::new( + blockstore_cleanup_slot_receiver, blockstore.clone(), max_ledger_shreds, exit.clone(), @@ -337,7 +337,7 @@ impl Tvu { window_service, cluster_slots_service, replay_stage, - ledger_cleanup_service, + blockstore_cleanup_service, cost_update_service, voting_service, warm_quic_cache_service, @@ -352,8 +352,8 @@ impl Tvu { self.cluster_slots_service.join()?; self.fetch_stage.join()?; self.shred_sigverify.join()?; - if self.ledger_cleanup_service.is_some() { - self.ledger_cleanup_service.unwrap().join()?; + if self.blockstore_cleanup_service.is_some() { + self.blockstore_cleanup_service.unwrap().join()?; } self.replay_stage.join()?; self.cost_update_service.join()?; diff --git a/core/src/validator.rs b/core/src/validator.rs index b67c2f01466d66..241105e28ccdf0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -14,7 +14,6 @@ use { tower_storage::{NullTowerStorage, TowerStorage}, ExternalRootSource, Tower, }, - ledger_metric_report_service::LedgerMetricReportService, poh_timing_report_service::PohTimingReportService, repair::{self, serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, @@ -56,9 +55,10 @@ use { blockstore::{ Blockstore, BlockstoreError, BlockstoreSignals, CompletedSlotsReceiver, PurgeType, }, + blockstore_metric_report_service::BlockstoreMetricReportService, blockstore_options::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions}, blockstore_processor::{self, TransactionStatusSender}, - entry_notifier_interface::EntryNotifierLock, + entry_notifier_interface::EntryNotifierArc, entry_notifier_service::{EntryNotifierSender, EntryNotifierService}, leader_schedule::FixedSchedule, leader_schedule_cache::LeaderScheduleCache, @@ -83,7 +83,7 @@ use { rpc_pubsub_service::{PubSubConfig, PubSubService}, rpc_service::JsonRpcService, rpc_subscriptions::RpcSubscriptions, - transaction_notifier_interface::TransactionNotifierLock, + transaction_notifier_interface::TransactionNotifierArc, transaction_status_service::TransactionStatusService, }, solana_runtime::{ @@ -465,7 +465,7 @@ pub struct Validator { pub bank_forks: Arc>, pub blockstore: Arc, geyser_plugin_service: Option, - ledger_metric_report_service: LedgerMetricReportService, + blockstore_metric_report_service: BlockstoreMetricReportService, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, turbine_quic_endpoint: Endpoint, @@ -812,6 +812,12 @@ impl Validator { config.block_verification_method, config.block_production_method ); + let (replay_vote_sender, replay_vote_receiver) = unbounded(); + + // block min prioritization fee cache should be readable by RPC, and writable by validator + // (by both replay stage and banking stage) + let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); + let leader_schedule_cache = Arc::new(leader_schedule_cache); let entry_notification_sender = entry_notifier_service .as_ref() @@ -926,7 +932,7 @@ impl Validator { &identity_keypair, node.info .tpu(Protocol::UDP) - .expect("Operator must spin up node with valid TPU address") + .map_err(|err| format!("Invalid TPU address: {err:?}"))? .ip(), )), Some((&staked_nodes, &identity_keypair.pubkey())), @@ -939,10 +945,6 @@ impl Validator { )), }; - // block min prioritization fee cache should be readable by RPC, and writable by validator - // (by both replay stage and banking stage) - let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); - let rpc_override_health_check = Arc::new(AtomicBool::new(config.rpc_config.disable_health_check)); let ( @@ -1100,8 +1102,8 @@ impl Validator { ) .map_err(|err| format!("wait_for_supermajority failed: {err:?}"))?; - let ledger_metric_report_service = - LedgerMetricReportService::new(blockstore.clone(), exit.clone()); + let blockstore_metric_report_service = + BlockstoreMetricReportService::new(blockstore.clone(), exit.clone()); let wait_for_vote_to_start_leader = !waited_for_supermajority && !config.no_wait_for_vote_to_start_leader; @@ -1181,7 +1183,7 @@ impl Validator { node.sockets.tvu_quic, node.info .tvu(Protocol::QUIC) - .expect("Operator must spin up node with valid QUIC TVU address") + .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? .ip(), turbine_quic_endpoint_sender, bank_forks.clone(), @@ -1206,7 +1208,7 @@ impl Validator { node.sockets.serve_repair_quic, node.info .serve_repair(Protocol::QUIC) - .expect("Operator must spin up node with valid QUIC serve-repair address") + .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? .ip(), repair_quic_endpoint_sender, bank_forks.clone(), @@ -1229,7 +1231,6 @@ impl Validator { }; let last_vote = tower.last_vote(); - let (replay_vote_sender, replay_vote_receiver) = unbounded(); let tvu = Tvu::new( vote_account, authorized_voter_keypairs, @@ -1377,7 +1378,7 @@ impl Validator { bank_forks, blockstore, geyser_plugin_service, - ledger_metric_report_service, + blockstore_metric_report_service, accounts_background_service, accounts_hash_verifier, turbine_quic_endpoint, @@ -1506,7 +1507,7 @@ impl Validator { self.stats_reporter_service .join() .expect("stats_reporter_service"); - self.ledger_metric_report_service + self.blockstore_metric_report_service .join() .expect("ledger_metric_report_service"); self.accounts_background_service @@ -1688,8 +1689,8 @@ fn load_blockstore( exit: Arc, start_progress: &Arc>, accounts_update_notifier: Option, - transaction_notifier: Option, - entry_notifier: Option, + transaction_notifier: Option, + entry_notifier: Option, poh_timing_point_sender: Option, ) -> Result< ( @@ -1741,7 +1742,8 @@ fn load_blockstore( completed_slots_receiver, .. } = Blockstore::open_with_signal(ledger_path, blockstore_options_from_config(config)) - .expect("Failed to open ledger database"); + .map_err(|err| format!("Failed to open Blockstore: {err:?}"))?; + blockstore.shred_timing_point_sender = poh_timing_point_sender; // following boot sequence (esp BankForks) could set root. so stash the original value // of blockstore root away here as soon as possible. @@ -2165,7 +2167,7 @@ fn initialize_rpc_transaction_history_services( exit: Arc, enable_rpc_transaction_history: bool, enable_extended_tx_metadata_storage: bool, - transaction_notifier: Option, + transaction_notifier: Option, ) -> TransactionHistoryServices { let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); let (transaction_status_sender, transaction_status_receiver) = unbounded(); diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs deleted file mode 100644 index 1a096c738bf8ff..00000000000000 --- a/core/tests/ledger_cleanup.rs +++ /dev/null @@ -1,613 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -// Long-running ledger_cleanup tests - -#[cfg(test)] -mod tests { - use { - crossbeam_channel::unbounded, - log::*, - solana_core::ledger_cleanup_service::LedgerCleanupService, - solana_ledger::{ - blockstore::{make_many_slot_shreds, Blockstore}, - blockstore_options::{ - BlockstoreOptions, BlockstoreRocksFifoOptions, LedgerColumnOptions, - ShredStorageType, - }, - get_tmp_ledger_path, - }, - solana_measure::measure::Measure, - std::{ - collections::VecDeque, - str::FromStr, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, - }, - thread::{self, Builder, JoinHandle}, - time::{Duration, Instant}, - }, - systemstat::{CPULoad, Platform, System}, - }; - - const DEFAULT_BENCHMARK_SLOTS: u64 = 50; - const DEFAULT_BATCH_SIZE_SLOTS: u64 = 1; - const DEFAULT_MAX_LEDGER_SHREDS: u64 = 50; - const DEFAULT_SHREDS_PER_SLOT: u64 = 25; - const DEFAULT_STOP_SIZE_BYTES: u64 = 0; - const DEFAULT_STOP_SIZE_ITERATIONS: u64 = 0; - const DEFAULT_STOP_SIZE_CF_DATA_BYTES: u64 = 0; - const DEFAULT_SHRED_DATA_CF_SIZE_BYTES: u64 = 125 * 1024 * 1024 * 1024; - - #[derive(Debug)] - struct BenchmarkConfig { - benchmark_slots: u64, - batch_size_slots: u64, - max_ledger_shreds: u64, - shreds_per_slot: u64, - stop_size_bytes: u64, - stop_size_iterations: u64, - stop_size_cf_data_bytes: u64, - pre_generate_data: bool, - cleanup_blockstore: bool, - num_writers: u64, - cleanup_service: bool, - fifo_compaction: bool, - shred_data_cf_size: u64, - } - - #[derive(Clone, Copy, Debug)] - struct CpuStatsInner { - cpu_user: f32, - cpu_system: f32, - cpu_idle: f32, - } - - impl From for CpuStatsInner { - fn from(cpu: CPULoad) -> Self { - Self { - cpu_user: cpu.user * 100.0, - cpu_system: cpu.system * 100.0, - cpu_idle: cpu.idle * 100.0, - } - } - } - - impl Default for CpuStatsInner { - fn default() -> Self { - Self { - cpu_user: 0.0, - cpu_system: 0.0, - cpu_idle: 0.0, - } - } - } - - struct CpuStats { - stats: RwLock, - sys: System, - } - - impl Default for CpuStats { - fn default() -> Self { - Self { - stats: RwLock::new(CpuStatsInner::default()), - sys: System::new(), - } - } - } - - impl CpuStats { - fn update(&self) { - if let Ok(cpu) = self.sys.cpu_load_aggregate() { - std::thread::sleep(Duration::from_millis(400)); - let cpu_new = CpuStatsInner::from(cpu.done().unwrap()); - *self.stats.write().unwrap() = cpu_new; - } - } - - fn get_stats(&self) -> CpuStatsInner { - *self.stats.read().unwrap() - } - } - - struct CpuStatsUpdater { - cpu_stats: Arc, - t_cleanup: JoinHandle<()>, - } - - impl CpuStatsUpdater { - pub fn new(exit: Arc) -> Self { - let cpu_stats = Arc::new(CpuStats::default()); - let cpu_stats_clone = cpu_stats.clone(); - - let t_cleanup = Builder::new() - .name("cpu_info".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - cpu_stats_clone.update(); - }) - .unwrap(); - - Self { - cpu_stats, - t_cleanup, - } - } - - pub fn get_stats(&self) -> CpuStatsInner { - self.cpu_stats.get_stats() - } - - pub fn join(self) -> std::thread::Result<()> { - self.t_cleanup.join() - } - } - - fn read_env(key: &str, default: T) -> T - where - T: FromStr, - { - match std::env::var(key) { - Ok(val) => val.parse().unwrap_or(default), - Err(_e) => default, - } - } - - /// Obtains the benchmark config from the following environmental arguments: - /// - /// Basic benchmark settings: - /// - `BENCHMARK_SLOTS`: the number of slots in the benchmark. - /// - `BATCH_SIZE`: the number of slots in each write batch. - /// - `SHREDS_PER_SLOT`: the number of shreds in each slot. Together with - /// the `BATCH_SIZE` and `BENCHMARK_SLOTS`, it means: - /// - the number of shreds in one write batch is `BATCH_SIZE` * `SHREDS_PER_SLOT`. - /// - the total number of batches is `BENCHMARK_SLOTS` / `BATCH_SIZE`. - /// - the total number of shreds is `BENCHMARK_SLOTS` * `SHREDS_PER_SLOT`. - /// - `NUM_WRITERS`: controls the number of concurrent threads performing - /// shred insertion. Default: 1. - /// - /// Advanced benchmark settings: - /// - `STOP_SIZE_BYTES`: if specified, the benchmark will count how - /// many times the ledger store size exceeds the specified threshold. - /// - `STOP_SIZE_CF_DATA_BYTES`: if specified, the benchmark will count how - /// many times the storage size of `cf::ShredData` which stores data shred - /// exceeds the specified threshold. - /// - `STOP_SIZE_ITERATIONS`: when any of the stop size is specified, the - /// benchmark will stop immediately when the number of consecutive times - /// where the ledger store size exceeds the configured `STOP_SIZE_BYTES`. - /// These configs are used to make sure the benchmark runs successfully - /// under the storage limitation. - /// - `CLEANUP_BLOCKSTORE`: if true, the ledger store created in the current - /// benchmark run will be deleted. Default: true. - /// - /// Cleanup-service related settings: - /// - `MAX_LEDGER_SHREDS`: when the clean-up service is on, the service will - /// clean up the ledger store when the number of shreds exceeds this value. - /// - `CLEANUP_SERVICE`: whether to enable the background cleanup service. - /// If set to false, the ledger store in the benchmark will be purely relied - /// on RocksDB's compaction. Default: true. - /// - /// Fifo-compaction settings: - /// - `FIFO_COMPACTION`: if true, then RocksDB's Fifo compaction will be - /// used for storing data shreds. Default: false. - /// - `SHRED_DATA_CF_SIZE_BYTES`: the maximum size of the data-shred column family. - /// Default: 125 * 1024 * 1024 * 1024. - fn get_benchmark_config() -> BenchmarkConfig { - let benchmark_slots = read_env("BENCHMARK_SLOTS", DEFAULT_BENCHMARK_SLOTS); - let batch_size_slots = read_env("BATCH_SIZE", DEFAULT_BATCH_SIZE_SLOTS); - let max_ledger_shreds = read_env("MAX_LEDGER_SHREDS", DEFAULT_MAX_LEDGER_SHREDS); - let shreds_per_slot = read_env("SHREDS_PER_SLOT", DEFAULT_SHREDS_PER_SLOT); - let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES); - let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS); - let stop_size_cf_data_bytes = - read_env("STOP_SIZE_CF_DATA_BYTES", DEFAULT_STOP_SIZE_CF_DATA_BYTES); - let pre_generate_data = read_env("PRE_GENERATE_DATA", false); - let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true); - let num_writers = read_env("NUM_WRITERS", 1); - // A flag indicating whether to have a background clean-up service. - // If set to false, the ledger store will purely rely on RocksDB's - // compaction to perform the clean-up. - let cleanup_service = read_env("CLEANUP_SERVICE", true); - let fifo_compaction = read_env("FIFO_COMPACTION", false); - let shred_data_cf_size = - read_env("SHRED_DATA_CF_SIZE_BYTES", DEFAULT_SHRED_DATA_CF_SIZE_BYTES); - - BenchmarkConfig { - benchmark_slots, - batch_size_slots, - max_ledger_shreds, - shreds_per_slot, - stop_size_bytes, - stop_size_iterations, - stop_size_cf_data_bytes, - pre_generate_data, - cleanup_blockstore, - num_writers, - cleanup_service, - fifo_compaction, - shred_data_cf_size, - } - } - - fn emit_header() { - println!("TIME_MS,DELTA_MS,START_SLOT,BATCH_SIZE,SHREDS,MAX,SIZE,DELTA_SIZE,DATA_SHRED_SIZE,DATA_SHRED_SIZE_DELTA,CPU_USER,CPU_SYSTEM,CPU_IDLE"); - } - - #[allow(clippy::too_many_arguments)] - fn emit_stats( - time_initial: Instant, - time_previous: &mut Instant, - storage_previous: &mut u64, - data_shred_storage_previous: &mut u64, - start_slot: u64, - batch_size: u64, - num_shreds: u64, - max_shreds: i64, - blockstore: &Blockstore, - cpu: &CpuStatsInner, - ) { - let time_now = Instant::now(); - let storage_now = blockstore.storage_size().unwrap_or(0); - let data_shred_storage_now = blockstore.total_data_shred_storage_size().unwrap(); - let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle); - - info!( - "{},{},{},{},{},{},{},{},{},{},{:.2},{:.2},{:.2}", - time_now.duration_since(time_initial).as_millis(), - time_now.duration_since(*time_previous).as_millis(), - start_slot, - batch_size, - num_shreds, - max_shreds, - storage_now, - storage_now as i64 - *storage_previous as i64, - data_shred_storage_now, - data_shred_storage_now - *data_shred_storage_previous as i64, - cpu_user, - cpu_system, - cpu_idle, - ); - - *time_previous = time_now; - *storage_previous = storage_now; - *data_shred_storage_previous = data_shred_storage_now.try_into().unwrap(); - } - - /// Helper function of the benchmark `test_ledger_cleanup_compaction` which - /// returns true if the benchmark fails the size limitation check. - fn is_exceeded_stop_size_iterations( - storage_size: u64, - stop_size: u64, - exceeded_iterations: &mut u64, - iteration_limit: u64, - storage_desc: &str, - ) -> bool { - if stop_size > 0 { - if storage_size >= stop_size { - *exceeded_iterations += 1; - warn!( - "{} size {} exceeds the stop size {} for {} times!", - storage_desc, storage_size, stop_size, exceeded_iterations - ); - } else { - *exceeded_iterations = 0; - } - - if *exceeded_iterations >= iteration_limit { - error!( - "{} size exceeds the configured limit {} for {} times", - storage_desc, stop_size, exceeded_iterations, - ); - return true; - } - } - false - } - - /// The ledger cleanup test which can also be used as a benchmark - /// measuring shred insertion performance of the blockstore. - /// - /// The benchmark is controlled by several environmental arguments. - /// Check [`get_benchmark_config`] for the full list of arguments. - /// - /// Example command: - /// BENCHMARK_SLOTS=1000000 BATCH_SIZE=1 SHREDS_PER_SLOT=25 NUM_WRITERS=8 \ - /// PRE_GENERATE_DATA=false cargo test --release tests::test_ledger_cleanup \ - /// -- --exact --nocapture - #[test] - fn test_ledger_cleanup() { - solana_logger::setup_with("error,ledger_cleanup::tests=info"); - - let ledger_path = get_tmp_ledger_path!(); - let config = get_benchmark_config(); - let blockstore = Blockstore::open_with_options( - &ledger_path, - if config.fifo_compaction { - BlockstoreOptions { - column_options: LedgerColumnOptions { - shred_storage_type: ShredStorageType::RocksFifo( - BlockstoreRocksFifoOptions { - shred_data_cf_size: config.shred_data_cf_size, - shred_code_cf_size: config.shred_data_cf_size, - }, - ), - ..LedgerColumnOptions::default() - }, - ..BlockstoreOptions::default() - } - } else { - BlockstoreOptions::default() - }, - ) - .unwrap(); - let blockstore = Arc::new(blockstore); - - info!("Benchmark configuration: {:#?}", config); - info!("Ledger path: {:?}", &ledger_path); - - let benchmark_slots = config.benchmark_slots; - let batch_size_slots = config.batch_size_slots; - let max_ledger_shreds = config.max_ledger_shreds; - let shreds_per_slot = config.shreds_per_slot; - let stop_size_bytes = config.stop_size_bytes; - let stop_size_iterations = config.stop_size_iterations; - let stop_size_cf_data_bytes = config.stop_size_cf_data_bytes; - let pre_generate_data = config.pre_generate_data; - let num_writers = config.num_writers; - let cleanup_service = config.cleanup_service; - - let num_batches = benchmark_slots / batch_size_slots; - let num_shreds_total = benchmark_slots * shreds_per_slot; - - let (sender, receiver) = unbounded(); - let exit = Arc::new(AtomicBool::new(false)); - - let cleaner = if cleanup_service { - Some(LedgerCleanupService::new( - receiver, - blockstore.clone(), - max_ledger_shreds, - exit.clone(), - )) - } else { - None - }; - - let exit_cpu = Arc::new(AtomicBool::new(false)); - let sys = CpuStatsUpdater::new(exit_cpu.clone()); - - let mut shreds = VecDeque::new(); - - if pre_generate_data { - let mut pre_generate_data_timer = Measure::start("Pre-generate data"); - info!("Pre-generate data ... this may take a while"); - for i in 0..num_batches { - let start_slot = i * batch_size_slots; - let (new_shreds, _) = - make_many_slot_shreds(start_slot, batch_size_slots, shreds_per_slot); - shreds.push_back(new_shreds); - } - pre_generate_data_timer.stop(); - info!("{}", pre_generate_data_timer); - } - let shreds = Arc::new(Mutex::new(shreds)); - - info!( - "Bench info num_batches: {}, batch size (slots): {}, shreds_per_slot: {}, num_shreds_total: {}", - num_batches, - batch_size_slots, - shreds_per_slot, - num_shreds_total - ); - - let time_initial = Instant::now(); - let mut time_previous = time_initial; - let mut storage_previous = 0; - let mut data_shred_storage_previous = 0; - let mut stop_size_bytes_exceeded_iterations = 0; - let mut stop_size_cf_data_exceeded_iterations = 0; - - emit_header(); - emit_stats( - time_initial, - &mut time_previous, - &mut storage_previous, - &mut data_shred_storage_previous, - 0, - 0, - 0, - 0, - &blockstore, - &sys.get_stats(), - ); - - let mut insert_threads = vec![]; - let insert_exit = Arc::new(AtomicBool::new(false)); - - info!("Begin inserting shreds ..."); - let mut insert_timer = Measure::start("Shred insertion"); - let current_batch_id = Arc::new(AtomicU64::new(0)); - let finished_batch_count = Arc::new(AtomicU64::new(0)); - - for i in 0..num_writers { - let cloned_insert_exit = insert_exit.clone(); - let cloned_blockstore = blockstore.clone(); - let cloned_shreds = shreds.clone(); - let shared_batch_id = current_batch_id.clone(); - let shared_finished_count = finished_batch_count.clone(); - let insert_thread = Builder::new() - .name(format!("insert_shreds-{i}")) - .spawn(move || { - let start = Instant::now(); - let mut now = Instant::now(); - let mut total = 0; - let mut total_batches = 0; - let mut total_inserted_shreds = 0; - let mut num_shreds = 0; - let mut max_speed = 0f32; - let mut min_speed = f32::MAX; - let (first_shreds, _) = make_many_slot_shreds( - 0, batch_size_slots, shreds_per_slot); - loop { - let batch_id = shared_batch_id.fetch_add(1, Ordering::Relaxed); - let start_slot = batch_id * batch_size_slots; - if start_slot >= benchmark_slots { - break; - } - let len = batch_id; - - // No duplicates being generated, so all shreds - // being passed to insert() are getting inserted - let num_shred_inserted = if pre_generate_data { - let mut sl = cloned_shreds.lock().unwrap(); - if let Some(shreds_from_queue) = sl.pop_front() { - let num_shreds = shreds_from_queue.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - shreds_from_queue, None, false).unwrap(); - num_shreds - } else { - // If the queue is empty, we're done! - break; - } - } else { - let slot_id = start_slot; - if slot_id > 0 { - let (shreds_with_parent, _) = make_many_slot_shreds( - slot_id, batch_size_slots, shreds_per_slot); - let num_shreds = shreds_with_parent.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - shreds_with_parent.clone(), None, false).unwrap(); - num_shreds - } else { - let num_shreds = first_shreds.len(); - total += num_shreds; - cloned_blockstore.insert_shreds( - first_shreds.clone(), None, false).unwrap(); - num_shreds - } - }; - - total_batches += 1; - total_inserted_shreds += num_shred_inserted; - num_shreds += num_shred_inserted; - shared_finished_count.fetch_add(1, Ordering::Relaxed); - - // as_secs() returns whole number of seconds, so this runs every second - if now.elapsed().as_secs() > 0 { - let shreds_per_second = num_shreds as f32 / now.elapsed().as_secs() as f32; - warn!( - "insert-{} tried: {} inserted: {} batches: {} len: {} shreds_per_second: {}", - i, total, total_inserted_shreds, total_batches, len, shreds_per_second, - ); - let average_speed = - total_inserted_shreds as f32 / start.elapsed().as_secs() as f32; - max_speed = max_speed.max(shreds_per_second); - min_speed = min_speed.min(shreds_per_second); - warn!( - "highest: {} lowest: {} avg: {}", - max_speed, min_speed, average_speed - ); - now = Instant::now(); - num_shreds = 0; - } - - if cloned_insert_exit.load(Ordering::Relaxed) { - if max_speed > 0.0 { - info!( - "insert-{} exiting highest shreds/s: {}, lowest shreds/s: {}", - i, max_speed, min_speed - ); - } else { - // Not enough time elapsed to sample - info!( - "insert-{} exiting", - i - ); - } - break; - } - } - }) - .unwrap(); - insert_threads.push(insert_thread); - } - - loop { - let finished_batch = finished_batch_count.load(Ordering::Relaxed); - let finished_slot = (finished_batch + 1) * batch_size_slots - 1; - - if cleanup_service { - sender.send(finished_slot).unwrap(); - } - - emit_stats( - time_initial, - &mut time_previous, - &mut storage_previous, - &mut data_shred_storage_previous, - finished_slot, - batch_size_slots, - shreds_per_slot, - max_ledger_shreds as i64, - &blockstore, - &sys.get_stats(), - ); - - if is_exceeded_stop_size_iterations( - storage_previous, - stop_size_bytes, - &mut stop_size_bytes_exceeded_iterations, - stop_size_iterations, - "Storage", - ) { - break; - } - - if is_exceeded_stop_size_iterations( - data_shred_storage_previous, - stop_size_cf_data_bytes, - &mut stop_size_cf_data_exceeded_iterations, - stop_size_iterations, - "cf::ShredData", - ) { - break; - } - - if finished_batch >= num_batches { - break; - } else { - thread::sleep(Duration::from_millis(500)); - } - } - // Send exit signal to stop all the writer threads. - insert_exit.store(true, Ordering::Relaxed); - - while let Some(thread) = insert_threads.pop() { - thread.join().unwrap(); - } - insert_timer.stop(); - - info!( - "Done inserting shreds: {}, {} shreds/s", - insert_timer, - num_shreds_total as f32 / insert_timer.as_s(), - ); - - exit.store(true, Ordering::SeqCst); - if cleanup_service { - cleaner.unwrap().join().unwrap(); - } - - exit_cpu.store(true, Ordering::SeqCst); - sys.join().unwrap(); - - if config.cleanup_blockstore { - drop(blockstore); - Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); - } - } -} diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index a0c1aa90216747..71e46f2b66b2d2 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -221,12 +221,13 @@ fn run_bank_forks_snapshot_n( accounts_package_sender, }; for slot in 1..=last_slot { - let mut bank_forks_w = bank_forks.write().unwrap(); - let mut bank = - Bank::new_from_parent(bank_forks_w[slot - 1].clone(), &Pubkey::default(), slot); + let mut bank = Bank::new_from_parent( + bank_forks.read().unwrap().get(slot - 1).unwrap().clone(), + &Pubkey::default(), + slot, + ); f(&mut bank, mint_keypair); - let bank = bank_forks_w.insert(bank); - drop(bank_forks_w); + let bank = bank_forks.write().unwrap().insert(bank); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to // kick in @@ -352,7 +353,7 @@ fn test_concurrent_snapshot_packaging( DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, ); - let mut bank_forks = snapshot_test_config.bank_forks.write().unwrap(); + let bank_forks = snapshot_test_config.bank_forks.clone(); let snapshot_config = &snapshot_test_config.snapshot_config; let bank_snapshots_dir = &snapshot_config.bank_snapshots_dir; let full_snapshot_archives_dir = &snapshot_config.full_snapshot_archives_dir; @@ -361,7 +362,7 @@ fn test_concurrent_snapshot_packaging( let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; // Take snapshot of zeroth bank - let bank0 = bank_forks.get(0).unwrap(); + let bank0 = bank_forks.read().unwrap().get(0).unwrap(); let storages = bank0.get_snapshot_storages(None); let slot_deltas = bank0.status_cache.read().unwrap().root_slot_deltas(); snapshot_bank_utils::add_bank_snapshot( @@ -394,7 +395,7 @@ fn test_concurrent_snapshot_packaging( for i in 0..MAX_BANK_SNAPSHOTS_TO_RETAIN + 2 { let parent_slot = i as Slot; let bank = Bank::new_from_parent( - bank_forks[parent_slot].clone(), + bank_forks.read().unwrap().get(parent_slot).unwrap().clone(), &Pubkey::default(), parent_slot + 1, ); @@ -438,10 +439,14 @@ fn test_concurrent_snapshot_packaging( ); accounts_package_sender.send(accounts_package).unwrap(); - bank_forks.insert(bank); + bank_forks.write().unwrap().insert(bank); if slot == saved_slot { // Find the relevant snapshot storages - let snapshot_storage_files: HashSet<_> = bank_forks[slot] + let snapshot_storage_files: HashSet<_> = bank_forks + .read() + .unwrap() + .get(slot) + .unwrap() .get_snapshot_storages(None) .into_iter() .map(|s| s.get_path()) diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 0e8d6954202351..bb3e296d6dcbe0 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -8,17 +8,17 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, log::*, - solana_program_runtime::compute_budget::{ - ComputeBudget, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + solana_program_runtime::{ + compute_budget::DEFAULT_HEAP_COST, + compute_budget_processor::{ + process_compute_budget_instructions, ComputeBudgetLimits, + DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + }, }, solana_sdk::{ borsh0_10::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_deprecated_request_unit_ix, FeatureSet, - }, + feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, @@ -62,10 +62,12 @@ impl CostModel { // to set limit, `compute_budget.loaded_accounts_data_size_limit` is set to default // limit of 64MB; which will convert to (64M/32K)*8CU = 16_000 CUs // - pub fn calculate_loaded_accounts_data_size_cost(compute_budget: &ComputeBudget) -> u64 { + pub fn calculate_loaded_accounts_data_size_cost( + compute_budget_limits: &ComputeBudgetLimits, + ) -> u64 { FeeStructure::calculate_memory_usage_cost( - compute_budget.loaded_accounts_data_size_limit, - compute_budget.heap_cost, + usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), + DEFAULT_HEAP_COST, ) } @@ -128,32 +130,28 @@ impl CostModel { } // calculate bpf cost based on compute budget instructions - let mut compute_budget = ComputeBudget::default(); - - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match result { - Ok(_) => { + match process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + feature_set, + ) { + Ok(compute_budget_limits) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by // 'compute_unit_limit_is_set' flag, because compute_budget does not distinguish // builtin and bpf instructions when calculating default compute-unit-limit. (see // compute_budget.rs test `test_process_mixed_instructions_without_compute_budget`) if bpf_costs > 0 && compute_unit_limit_is_set { - bpf_costs = compute_budget.compute_unit_limit + bpf_costs = u64::from(compute_budget_limits.compute_unit_limit); } if feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()) { loaded_accounts_data_size_cost = - Self::calculate_loaded_accounts_data_size_cost(&compute_budget); + Self::calculate_loaded_accounts_data_size_cost(&compute_budget_limits); } } Err(_) => { @@ -545,7 +543,8 @@ mod tests { // default loaded_accounts_data_size_limit const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = - solana_program_runtime::compute_budget::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES as u64 + solana_program_runtime::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + as u64 / ACCOUNT_DATA_COST_PAGE_SIZE * DEFAULT_PAGE_COST; @@ -663,36 +662,36 @@ mod tests { #[allow(clippy::field_reassign_with_default)] #[test] fn test_calculate_loaded_accounts_data_size_cost() { - let mut compute_budget = ComputeBudget::default(); + let mut compute_budget_limits = ComputeBudgetLimits::default(); // accounts data size are priced in block of 32K, ... // ... requesting less than 32K should still be charged as one block - compute_budget.loaded_accounts_data_size_limit = 31_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 31 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 32K should be charged as one block - compute_budget.loaded_accounts_data_size_limit = 32_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 32 * 1024; assert_eq!( - compute_budget.heap_cost, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting slightly above 32K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 33_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 33 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); // ... requesting exact 64K should be charged as 2 block - compute_budget.loaded_accounts_data_size_limit = 64_usize * 1024; + compute_budget_limits.loaded_accounts_bytes = 64 * 1024; assert_eq!( - compute_budget.heap_cost * 2, - CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget) + DEFAULT_HEAP_COST * 2, + CostModel::calculate_loaded_accounts_data_size_cost(&compute_budget_limits) ); } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index e4f1b917d74b26..efdd86512d2039 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -713,7 +713,7 @@ mod tests { } // case 3: add tx writes to [acct1, acct2], acct2 exceeds limit, should failed atomically, - // we shoudl still have: + // we should still have: // | acct1 | $cost | // | acct2 | $cost * 2 | // | acct3 | $cost | diff --git a/docs/src/developing/on-chain-programs/developing-rust.md b/docs/src/developing/on-chain-programs/developing-rust.md index 3e21799222077d..d1f8423ecdbe6f 100644 --- a/docs/src/developing/on-chain-programs/developing-rust.md +++ b/docs/src/developing/on-chain-programs/developing-rust.md @@ -386,5 +386,10 @@ $ cargo build-bpf --dump ## Examples The [Solana Program Library -github](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust) +GitHub](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust) repo contains a collection of Rust examples. + +The [Solana Developers +Program Examples GitHub](https://github.com/solana-developers/program-examples) +repo also contains a collection of beginner to intermediate Rust program +examples. \ No newline at end of file diff --git a/docs/src/developing/versioned-transactions.md b/docs/src/developing/versioned-transactions.md index 95c2073b2115b6..8d942814d7310d 100644 --- a/docs/src/developing/versioned-transactions.md +++ b/docs/src/developing/versioned-transactions.md @@ -93,7 +93,7 @@ let blockhash = await connection Create an `array` of all the `instructions` you desire to send in your transaction. In this example below, we are creating a simple SOL transfer instruction: ```js -// create an array with your desires `instructions` +// create an array with your desired `instructions` const instructions = [ web3.SystemProgram.transfer({ fromPubkey: payer.publicKey, diff --git a/docs/src/running-validator/validator-stake.md b/docs/src/running-validator/validator-stake.md index ae430ae9675c8f..71c9cd213016e8 100644 --- a/docs/src/running-validator/validator-stake.md +++ b/docs/src/running-validator/validator-stake.md @@ -3,7 +3,7 @@ title: Staking --- **By default your validator will have no stake.** This means it will be -ineligible to become leader. +ineligible to become leader, and unable to land votes. ## Monitoring Catch Up @@ -55,8 +55,25 @@ but only one re-delegation is permitted per epoch: solana delegate-stake ~/validator-stake-keypair.json ~/some-other-vote-account-keypair.json ``` -Assuming the node is voting, now you're up and running and generating validator -rewards. Rewards are paid automatically on epoch boundaries. +## Validator Stake Warm-up + +To combat various attacks on consensus, new stake delegations are subject to +a [warm-up](/staking/stake-accounts#delegation-warmup-and-cooldown) +period. + +Monitor a validator's stake during warmup by: + +- View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network. +- View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json` +- `solana validators` displays the current active stake of all validators, including yours +- `solana stake-history` shows the history of stake warming up and cooling down over recent epochs +- Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` +- Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators` + +## Validator Rewards + +Once your stake is warmed up, and assuming the node is voting, you will now be +generating validator rewards. Rewards are paid automatically on epoch boundaries. The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can @@ -76,21 +93,6 @@ before submitting a transaction. Learn more about [transaction fees here](../implemented-proposals/transaction-fees.md). -## Validator Stake Warm-up - -To combat various attacks on consensus, new stake delegations are subject to -a [warm-up](/staking/stake-accounts#delegation-warmup-and-cooldown) -period. - -Monitor a validator's stake during warmup by: - -- View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network. -- View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json` -- `solana validators` displays the current active stake of all validators, including yours -- `solana stake-history` shows the history of stake warming up and cooling down over recent epochs -- Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` -- Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators` - ## Monitor Your Staked Validator Confirm your validator becomes a [leader](../terminology.md#leader) diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index cfa3aa152f15a6..ccd012aa79997c 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -255,6 +255,12 @@ Remember to move your authorized withdrawer keypair into a very secure location Read more about [creating and managing a vote account](vote-accounts.md). +## Stake your validator + +Until your validator is staked, it will be unable to vote, propose leader blocks, or collect rewards. + +Follow the instructions to [stake your validator](validator-stake.md) + ## Known validators If you know and respect other validator operators, you can specify this on the command line with the `--known-validator ` diff --git a/docs/src/validator/get-started/setup-a-validator.md b/docs/src/validator/get-started/setup-a-validator.md index 6598400bda5a37..8379b6f1d1c4d1 100644 --- a/docs/src/validator/get-started/setup-a-validator.md +++ b/docs/src/validator/get-started/setup-a-validator.md @@ -131,7 +131,7 @@ Make sure you have the latest and greatest package versions on your server ``` sudo apt update -sudo apt install +sudo apt upgrade ``` ## Sol User diff --git a/docs/static/katex/katex.js b/docs/static/katex/katex.js index 37fb0fa89676a4..e5d316691883bf 100644 --- a/docs/static/katex/katex.js +++ b/docs/static/katex/katex.js @@ -3674,7 +3674,7 @@ function assertSpan(group) { // '\expandafter\show\the\scriptscriptfont2' \ // '\stop' // -// The metrics themselves were retreived using the following commands: +// The metrics themselves were retrieved using the following commands: // // tftopl cmsy10 // tftopl cmsy7 diff --git a/docs/static/katex/katex.mjs b/docs/static/katex/katex.mjs index 76938229fff30c..488d2101c21232 100644 --- a/docs/static/katex/katex.mjs +++ b/docs/static/katex/katex.mjs @@ -3698,7 +3698,7 @@ var metricMap = { // '\expandafter\show\the\scriptscriptfont2' \ // '\stop' // -// The metrics themselves were retreived using the following commands: +// The metrics themselves were retrieved using the following commands: // // tftopl cmsy10 // tftopl cmsy7 diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index f48df55d8d0ce5..465f700efe3275 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -22,4 +22,4 @@ pub trait BlockMetadataNotifier { ); } -pub type BlockMetadataNotifierLock = Arc>; +pub type BlockMetadataNotifierArc = Arc; diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 0698cf1a656363..20729146767c0a 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -177,6 +177,22 @@ impl GeyserPluginManager { data: None, })?; + // Then see if a plugin with this name already exists. If so, abort + if self + .plugins + .iter() + .any(|plugin| plugin.name().eq(new_plugin.name())) + { + return Err(jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "There already exists a plugin named {} loaded, while reloading {name}. Did not load requested plugin", + new_plugin.name() + ), + data: None, + }); + } + // Attempt to on_load with new plugin match new_plugin.on_load(new_parsed_config_file) { // On success, push plugin and library @@ -244,13 +260,13 @@ pub enum GeyserPluginManagerError { #[error("Invalid plugin path")] InvalidPluginPath, - #[error("Cannot load plugin shared library")] + #[error("Cannot load plugin shared library (error: {0})")] PluginLoadError(String), #[error("The geyser plugin {0} is already loaded shared library")] PluginAlreadyLoaded(String), - #[error("The GeyserPlugin on_load method failed")] + #[error("The GeyserPlugin on_load method failed (error: {0})")] PluginStartError(String), } diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index b8f9db49102dc7..ff3e050dc4b391 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -2,7 +2,7 @@ use { crate::{ accounts_update_notifier::AccountsUpdateNotifierImpl, block_metadata_notifier::BlockMetadataNotifierImpl, - block_metadata_notifier_interface::BlockMetadataNotifierLock, + block_metadata_notifier_interface::BlockMetadataNotifierArc, entry_notifier::EntryNotifierImpl, geyser_plugin_manager::{GeyserPluginManager, GeyserPluginManagerRequest}, slot_status_notifier::SlotStatusNotifierImpl, @@ -12,10 +12,10 @@ use { crossbeam_channel::Receiver, log::*, solana_accounts_db::accounts_update_notifier_interface::AccountsUpdateNotifier, - solana_ledger::entry_notifier_interface::EntryNotifierLock, + solana_ledger::entry_notifier_interface::EntryNotifierArc, solana_rpc::{ optimistically_confirmed_bank_tracker::SlotNotification, - transaction_notifier_interface::TransactionNotifierLock, + transaction_notifier_interface::TransactionNotifierArc, }, std::{ path::{Path, PathBuf}, @@ -34,9 +34,9 @@ pub struct GeyserPluginService { slot_status_observer: Option, plugin_manager: Arc>, accounts_update_notifier: Option, - transaction_notifier: Option, - entry_notifier: Option, - block_metadata_notifier: Option, + transaction_notifier: Option, + entry_notifier: Option, + block_metadata_notifier: Option, } impl GeyserPluginService { @@ -87,29 +87,29 @@ impl GeyserPluginService { if account_data_notifications_enabled { let accounts_update_notifier = AccountsUpdateNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(accounts_update_notifier))) + Some(Arc::new(accounts_update_notifier)) } else { None }; - let transaction_notifier: Option = + let transaction_notifier: Option = if transaction_notifications_enabled { let transaction_notifier = TransactionNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(transaction_notifier))) + Some(Arc::new(transaction_notifier)) } else { None }; - let entry_notifier: Option = if entry_notifications_enabled { + let entry_notifier: Option = if entry_notifications_enabled { let entry_notifier = EntryNotifierImpl::new(plugin_manager.clone()); - Some(Arc::new(RwLock::new(entry_notifier))) + Some(Arc::new(entry_notifier)) } else { None }; let (slot_status_observer, block_metadata_notifier): ( Option, - Option, + Option, ) = if account_data_notifications_enabled || transaction_notifications_enabled || entry_notifications_enabled @@ -121,9 +121,9 @@ impl GeyserPluginService { confirmed_bank_receiver, slot_status_notifier, )), - Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new( + Some(Arc::new(BlockMetadataNotifierImpl::new( plugin_manager.clone(), - )))), + ))), ) } else { (None, None) @@ -160,15 +160,15 @@ impl GeyserPluginService { self.accounts_update_notifier.clone() } - pub fn get_transaction_notifier(&self) -> Option { + pub fn get_transaction_notifier(&self) -> Option { self.transaction_notifier.clone() } - pub fn get_entry_notifier(&self) -> Option { + pub fn get_entry_notifier(&self) -> Option { self.entry_notifier.clone() } - pub fn get_block_metadata_notifier(&self) -> Option { + pub fn get_block_metadata_notifier(&self) -> Option { self.block_metadata_notifier.clone() } diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index 569f7c480dfa1e..7759679bdfd36c 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -139,14 +139,16 @@ fn retransmit_to( .filter(|addr| socket_addr_space.check(addr)) .collect() }; - if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests) - { - error!( - "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", - ioerr, - num_failed, - dests.len(), - ); + match multi_target_send(socket, data, &dests) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, num_failed)) => { + error!( + "retransmit_to multi_target_send error: {:?}, {}/{} packets failed", + ioerr, + num_failed, + dests.len(), + ); + } } } diff --git a/install/src/command.rs b/install/src/command.rs index ac53f5fe2b5fd5..ed8d37ff0f3b8e 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -968,58 +968,62 @@ pub fn update(config_file: &str, check_only: bool) -> Result { pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Result { let mut config = Config::load(config_file)?; - let semver_update_type = if is_init { - SemverUpdateType::Fixed - } else { - SemverUpdateType::Patch - }; - let (updated_version, download_url_and_sha256, release_dir) = if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(current_release_semver) => { - let progress_bar = new_spinner_progress_bar(); - progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - - let github_release = check_for_newer_github_release( - current_release_semver, - semver_update_type, - is_init, - )?; - - progress_bar.finish_and_clear(); + let release_dir = config.release_dir(current_release_semver); + if is_init && release_dir.exists() { + (current_release_semver.to_owned(), None, release_dir) + } else { + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message(format!("{LOOKING_GLASS}Checking for updates...")); - match github_release { - None => { - return Err(format!("Unknown release: {current_release_semver}")); - } - Some(release_semver) => { - if release_semver == *current_release_semver { - if let Ok(active_release_version) = load_release_version( - &config.active_release_dir().join("version.yml"), - ) { - if format!("v{current_release_semver}") - == active_release_version.channel - { - println!( + let semver_update_type = if is_init { + SemverUpdateType::Fixed + } else { + SemverUpdateType::Patch + }; + let github_release = check_for_newer_github_release( + current_release_semver, + semver_update_type, + is_init, + )?; + + progress_bar.finish_and_clear(); + + match github_release { + None => { + return Err(format!("Unknown release: {current_release_semver}")); + } + Some(release_semver) => { + if release_semver == *current_release_semver { + if let Ok(active_release_version) = load_release_version( + &config.active_release_dir().join("version.yml"), + ) { + if format!("v{current_release_semver}") + == active_release_version.channel + { + println!( "Install is up to date. {release_semver} is the latest compatible release" ); - return Ok(false); + return Ok(false); + } } } + config.explicit_release = + Some(ExplicitRelease::Semver(release_semver.clone())); + + let release_dir = config.release_dir(&release_semver); + let download_url_and_sha256 = if release_dir.exists() { + // Release already present in the cache + None + } else { + Some((github_release_download_url(&release_semver), None)) + }; + (release_semver, download_url_and_sha256, release_dir) } - config.explicit_release = - Some(ExplicitRelease::Semver(release_semver.clone())); - - let release_dir = config.release_dir(&release_semver); - let download_url_and_sha256 = if release_dir.exists() { - // Release already present in the cache - None - } else { - Some((github_release_download_url(&release_semver), None)) - }; - (release_semver, download_url_and_sha256, release_dir) } } } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index c11954a56780ab..0bb28e4a2779ca 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -83,6 +83,8 @@ pub fn get_accounts_db_config( exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), skip_initial_hash_calc: arg_matches.is_present("accounts_db_skip_initial_hash_calculation"), test_partitioned_epoch_rewards, + test_skip_rewrites_but_include_in_bank_hash: arg_matches + .is_present("accounts_db_test_skip_rewrites"), ..AccountsDbConfig::default() } } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 697199981b26f8..33031e9d14a0a5 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1128,6 +1128,12 @@ fn main() { "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", ) .hidden(hidden_unless_forced()); + let accounts_db_test_skip_rewrites_but_include_in_bank_hash = Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", + ) + .hidden(hidden_unless_forced()); let accounts_filler_count = Arg::with_name("accounts_filler_count") .long("accounts-filler-count") .value_name("COUNT") @@ -1556,6 +1562,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) ) .subcommand( SubCommand::with_name("shred-meta") @@ -1573,6 +1580,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) ) .subcommand( SubCommand::with_name("bounds") @@ -1608,6 +1616,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_skip_shrink) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_filler_count) .arg(&accounts_filler_size) .arg(&verify_index_arg) @@ -1688,6 +1697,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1724,6 +1734,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&accountsdb_skip_shrink) .arg(&ancient_append_vecs) @@ -1918,6 +1929,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1952,6 +1964,7 @@ fn main() { .arg(&accounts_index_limit) .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 7420a1f7a10b4b..c1a65170a239fa 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -552,7 +552,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .clone(), ); for key in cached_account_keys { - loaded_programs.replenish(key, bank.load_program(&key, false)); + loaded_programs.replenish(key, bank.load_program(&key, false, None)); debug!("Loaded program {}", key); } invoke_context.programs_loaded_for_tx_batch = &loaded_programs; diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index b3fb1ac5f9b97d..87ba0c39235a12 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -25,6 +25,7 @@ lazy_static = { workspace = true } libc = { workspace = true } log = { workspace = true } lru = { workspace = true } +mockall = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } prost = { workspace = true } diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index 3db5f8eebbe863..be28ee8a0703d8 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -138,7 +138,7 @@ pub async fn upload_confirmed_blocks( "No blocks between {} and {} need to be uploaded to bigtable", starting_slot, ending_slot ); - return Ok(last_blockstore_slot); + return Ok(ending_slot); } let last_slot = *blocks_to_upload.last().unwrap(); info!( diff --git a/ledger/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs index 3149eb96a32d8e..0ffb02aac2475c 100644 --- a/ledger/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -117,7 +117,7 @@ impl BigTableUploadService { )); match result { - Ok(last_slot_uploaded) => start_slot = last_slot_uploaded, + Ok(last_slot_uploaded) => start_slot = last_slot_uploaded.saturating_add(1), Err(err) => { warn!("bigtable: upload_confirmed_blocks: {}", err); std::thread::sleep(std::time::Duration::from_secs(2)); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index ce9336e1132192..3ea9525fcb194f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1962,12 +1962,27 @@ impl Blockstore { } } - pub fn get_block_time(&self, slot: Slot) -> Result> { - datapoint_info!("blockstore-rpc-api", ("method", "get_block_time", String)); + fn get_block_time(&self, slot: Slot) -> Result> { let _lock = self.check_lowest_cleanup_slot(slot)?; self.blocktime_cf.get(slot) } + pub fn get_rooted_block_time(&self, slot: Slot) -> Result { + datapoint_info!( + "blockstore-rpc-api", + ("method", "get_rooted_block_time", String) + ); + let _lock = self.check_lowest_cleanup_slot(slot)?; + + if self.is_root(slot) { + return self + .blocktime_cf + .get(slot)? + .ok_or(BlockstoreError::SlotUnavailable); + } + Err(BlockstoreError::SlotNotRooted) + } + pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> { self.blocktime_cf.put(slot, ×tamp) } @@ -7806,7 +7821,7 @@ pub mod tests { assert_eq!(counter, 1); } - fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_ledger_cleanup_service: bool) { + fn do_test_lowest_cleanup_slot_and_special_cfs(simulate_blockstore_cleanup_service: bool) { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -7914,13 +7929,13 @@ pub mod tests { assert_eq!(are_missing, (false, false)); assert_existing_always(); - if simulate_ledger_cleanup_service { + if simulate_blockstore_cleanup_service { *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; blockstore.purge_slots(0, lowest_cleanup_slot, PurgeType::CompactionFilter); } let are_missing = check_for_missing(); - if simulate_ledger_cleanup_service { + if simulate_blockstore_cleanup_service { // ... when either simulation (or both) is effective, we should observe to be missing // consistently assert_eq!(are_missing, (true, true)); @@ -7932,12 +7947,12 @@ pub mod tests { } #[test] - fn test_lowest_cleanup_slot_and_special_cfs_with_ledger_cleanup_service_simulation() { + fn test_lowest_cleanup_slot_and_special_cfs_with_blockstore_cleanup_service_simulation() { do_test_lowest_cleanup_slot_and_special_cfs(true); } #[test] - fn test_lowest_cleanup_slot_and_special_cfs_without_ledger_cleanup_service_simulation() { + fn test_lowest_cleanup_slot_and_special_cfs_without_blockstore_cleanup_service_simulation() { do_test_lowest_cleanup_slot_and_special_cfs(false); } diff --git a/core/src/ledger_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs similarity index 93% rename from core/src/ledger_cleanup_service.rs rename to ledger/src/blockstore_cleanup_service.rs index 80924bf7628564..dbd8e64e612186 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -1,15 +1,15 @@ -//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage. +//! The `blockstore_cleanup_service` drops older ledger data to limit disk space usage. //! The service works by counting the number of live data shreds in the ledger; this //! can be done quickly and should have a fairly stable correlation to actual bytes. //! Once the shred count (and thus roughly the byte count) reaches a threshold, //! the services begins removing data in FIFO order. use { - crossbeam_channel::{Receiver, RecvTimeoutError}, - solana_ledger::{ + crate::{ blockstore::{Blockstore, PurgeType}, blockstore_db::{Result as BlockstoreResult, DATA_SHRED_CF}, }, + crossbeam_channel::{Receiver, RecvTimeoutError}, solana_measure::measure::Measure, solana_sdk::clock::Slot, std::{ @@ -40,11 +40,11 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000; // and starve other blockstore users. pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512; -pub struct LedgerCleanupService { +pub struct BlockstoreCleanupService { t_cleanup: JoinHandle<()>, } -impl LedgerCleanupService { +impl BlockstoreCleanupService { pub fn new( new_root_receiver: Receiver, blockstore: Arc, @@ -54,12 +54,12 @@ impl LedgerCleanupService { let mut last_purge_slot = 0; info!( - "LedgerCleanupService active. max ledger shreds={}", + "BlockstoreCleanupService active. max ledger shreds={}", max_ledger_shreds ); let t_cleanup = Builder::new() - .name("solLedgerClean".to_string()) + .name("solBstoreClean".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; @@ -296,8 +296,8 @@ impl LedgerCleanupService { mod tests { use { super::*, + crate::{blockstore::make_many_slot_entries, get_tmp_ledger_path_auto_delete}, crossbeam_channel::unbounded, - solana_ledger::{blockstore::make_many_slot_entries, get_tmp_ledger_path_auto_delete}, }; fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore { @@ -314,7 +314,7 @@ mod tests { #[test] fn test_find_slots_to_clean() { - // LedgerCleanupService::find_slots_to_clean() does not modify the + // BlockstoreCleanupService::find_slots_to_clean() does not modify the // Blockstore, so we can make repeated calls on the same slots solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -334,22 +334,31 @@ mod tests { // Ensure no cleaning of slots > last_root let last_root = 0; let max_ledger_shreds = 0; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); // Slot 0 will exist in blockstore with zero shreds since it is slot // 1's parent. Thus, slot 0 will be identified for clean. assert!(should_clean && lowest_purged == 0); // Now, set max_ledger_shreds to 1, slot 0 still eligible for clean let max_ledger_shreds = 1; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); assert!(should_clean && lowest_purged == 0); // Ensure no cleaning if blockstore contains fewer than max_ledger_shreds let last_root = num_slots; let max_ledger_shreds = (shreds_per_slot * num_slots) + 1; - let (should_clean, lowest_purged, _) = - LedgerCleanupService::find_slots_to_clean(&blockstore, last_root, max_ledger_shreds); + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( + &blockstore, + last_root, + max_ledger_shreds, + ); assert!(!should_clean && lowest_purged == 0); for slot in 1..=num_slots { @@ -357,7 +366,7 @@ mod tests { let last_root = slot; // Set max_ledger_shreds to 0 so that all eligible slots are cleaned let max_ledger_shreds = 0; - let (should_clean, lowest_purged, _) = LedgerCleanupService::find_slots_to_clean( + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( &blockstore, last_root, max_ledger_shreds, @@ -369,7 +378,7 @@ mod tests { // Set max_ledger_shreds to the number of shreds in slots > slot. // This will make it so that slots [1, slot] are cleaned let max_ledger_shreds = shreds_per_slot * (num_slots - slot); - let (should_clean, lowest_purged, _) = LedgerCleanupService::find_slots_to_clean( + let (should_clean, lowest_purged, _) = BlockstoreCleanupService::find_slots_to_clean( &blockstore, last_root, max_ledger_shreds, @@ -393,8 +402,14 @@ mod tests { //send a signal to kill all but 5 shreds, which will be in the newest slots let mut last_purge_slot = 0; sender.send(50).unwrap(); - LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10) - .unwrap(); + BlockstoreCleanupService::cleanup_ledger( + &receiver, + &blockstore, + 5, + &mut last_purge_slot, + 10, + ) + .unwrap(); assert_eq!(last_purge_slot, 50); //check that 0-40 don't exist @@ -437,7 +452,7 @@ mod tests { let mut time = Measure::start("purge time"); sender.send(slot + num_slots).unwrap(); - LedgerCleanupService::cleanup_ledger( + BlockstoreCleanupService::cleanup_ledger( &receiver, &blockstore, initial_slots, diff --git a/core/src/ledger_metric_report_service.rs b/ledger/src/blockstore_metric_report_service.rs similarity index 75% rename from core/src/ledger_metric_report_service.rs rename to ledger/src/blockstore_metric_report_service.rs index 2e91013eb991b8..393442a3e25aca 100644 --- a/core/src/ledger_metric_report_service.rs +++ b/ledger/src/blockstore_metric_report_service.rs @@ -1,7 +1,7 @@ -//! The `ledger_metric_report_service` periodically reports ledger store metrics. +//! The `blockstore_metric_report_service` periodically reports ledger store metrics. use { - solana_ledger::blockstore::Blockstore, + crate::blockstore::Blockstore, std::{ string::ToString, sync::{ @@ -14,15 +14,15 @@ use { }; // Determines how often we report blockstore metrics under -// LedgerMetricReportService. Note that there're other blockstore -// metrics that are reported outside LedgerMetricReportService. +// BlockstoreMetricReportService. Note that there are other blockstore +// metrics that are reported outside BlockstoreMetricReportService. const BLOCKSTORE_METRICS_REPORT_PERIOD_MILLIS: u64 = 10000; -pub struct LedgerMetricReportService { +pub struct BlockstoreMetricReportService { t_cf_metric: JoinHandle<()>, } -impl LedgerMetricReportService { +impl BlockstoreMetricReportService { pub fn new(blockstore: Arc, exit: Arc) -> Self { let t_cf_metric = Builder::new() .name("solRocksCfMtrcs".to_string()) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index bf8fa02249da15..5218b55c4b9050 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -216,12 +216,27 @@ fn execute_batch( } #[derive(Default)] -struct ExecuteBatchesInternalMetrics { +pub struct ExecuteBatchesInternalMetrics { execution_timings_per_thread: HashMap, total_batches_len: u64, execute_batches_us: u64, } +impl ExecuteBatchesInternalMetrics { + pub fn new_with_timings_from_all_threads(execute_timings: ExecuteTimings) -> Self { + const DUMMY_THREAD_INDEX: usize = 999; + let mut new = Self::default(); + new.execution_timings_per_thread.insert( + DUMMY_THREAD_INDEX, + ThreadExecuteTimings { + execute_timings, + ..ThreadExecuteTimings::default() + }, + ); + new + } +} + fn execute_batches_internal( bank: &Arc, batches: &[TransactionBatchWithIndexes], @@ -294,6 +309,70 @@ fn execute_batches_internal( }) } +// This fn diverts the code-path into two variants. Both must provide exactly the same set of +// validations. For this reason, this fn is deliberately inserted into the code path to be called +// inside process_entries(), so that Bank::prepare_sanitized_batch() has been called on all of +// batches already, while minimizing code duplication (thus divergent behavior risk) at the cost of +// acceptable overhead of meaningless buffering of batches for the scheduler variant. +// +// Also note that the scheduler variant can't implement the batch-level sanitization naively, due +// to the nature of individual tx processing. That's another reason of this particular placement of +// divergent point in the code-path (i.e. not one layer up with its own prepare_sanitized_batch() +// invocation). +fn process_batches( + bank: &BankWithScheduler, + batches: &[TransactionBatchWithIndexes], + transaction_status_sender: Option<&TransactionStatusSender>, + replay_vote_sender: Option<&ReplayVoteSender>, + batch_execution_timing: &mut BatchExecutionTiming, + log_messages_bytes_limit: Option, + prioritization_fee_cache: &PrioritizationFeeCache, +) -> Result<()> { + if bank.has_installed_scheduler() { + debug!( + "process_batches()/schedule_batches_for_execution({} batches)", + batches.len() + ); + // scheduling always succeeds here without being blocked on actual transaction executions. + // The transaction execution errors will be collected via the blocking fn called + // BankWithScheduler::wait_for_completed_scheduler(), if any. + schedule_batches_for_execution(bank, batches); + Ok(()) + } else { + debug!( + "process_batches()/rebatch_and_execute_batches({} batches)", + batches.len() + ); + rebatch_and_execute_batches( + bank, + batches, + transaction_status_sender, + replay_vote_sender, + batch_execution_timing, + log_messages_bytes_limit, + prioritization_fee_cache, + ) + } +} + +fn schedule_batches_for_execution( + bank: &BankWithScheduler, + batches: &[TransactionBatchWithIndexes], +) { + for TransactionBatchWithIndexes { + batch, + transaction_indexes, + } in batches + { + bank.schedule_transaction_executions( + batch + .sanitized_transactions() + .iter() + .zip(transaction_indexes.iter()), + ); + } +} + fn rebatch_transactions<'a>( lock_results: &'a [Result<()>], bank: &'a Arc, @@ -314,7 +393,7 @@ fn rebatch_transactions<'a>( } } -fn execute_batches( +fn rebatch_and_execute_batches( bank: &Arc, batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, @@ -488,7 +567,7 @@ fn process_entries( if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) { // If it's a tick that will cause a new blockhash to be created, // execute the group and register the tick - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -541,7 +620,7 @@ fn process_entries( } else { // else we have an entry that conflicts with a prior entry // execute the current queue and try to process this entry again - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -556,7 +635,7 @@ fn process_entries( } } } - execute_batches( + process_batches( bank, &batches, transaction_status_sender, @@ -1004,7 +1083,7 @@ pub struct BatchExecutionTiming { } impl BatchExecutionTiming { - fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) { + pub fn accumulate(&mut self, new_batch: ExecuteBatchesInternalMetrics) { let Self { totals, wall_clock_us, @@ -1318,6 +1397,9 @@ fn process_bank_0( &mut ExecuteTimings::default(), ) .expect("Failed to process bank 0 from ledger. Did you forget to provide a snapshot?"); + if let Some((result, _timings)) = bank0.wait_for_completed_scheduler() { + result.unwrap(); + } bank0.freeze(); if blockstore.is_primary_access() { blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false); @@ -1720,6 +1802,9 @@ fn process_single_slot( err })?; + if let Some((result, _timings)) = bank.wait_for_completed_scheduler() { + result? + } bank.freeze(); // all banks handled by this routine are created from complete slots if blockstore.is_primary_access() { blockstore.insert_bank_hash(bank.slot(), bank.hash(), false); @@ -1856,8 +1941,11 @@ pub mod tests { rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_program_runtime::declare_process_instruction, - solana_runtime::genesis_utils::{ - self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + solana_runtime::{ + genesis_utils::{ + self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + }, + installed_scheduler_pool::{MockInstalledScheduler, SchedulingContext, WaitReason}, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -4245,6 +4333,38 @@ pub mod tests { ) } + fn create_test_transactions( + mint_keypair: &Keypair, + genesis_hash: &Hash, + ) -> Vec { + let pubkey = solana_sdk::pubkey::new_rand(); + let keypair2 = Keypair::new(); + let pubkey2 = solana_sdk::pubkey::new_rand(); + let keypair3 = Keypair::new(); + let pubkey3 = solana_sdk::pubkey::new_rand(); + + vec![ + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + mint_keypair, + &pubkey, + 1, + *genesis_hash, + )), + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &keypair2, + &pubkey2, + 1, + *genesis_hash, + )), + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &keypair3, + &pubkey3, + 1, + *genesis_hash, + )), + ] + } + #[test] fn test_confirm_slot_entries_progress_num_txs_indexes() { let GenesisConfigInfo { @@ -4368,34 +4488,7 @@ pub mod tests { .. } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - - let pubkey = solana_sdk::pubkey::new_rand(); - let keypair2 = Keypair::new(); - let pubkey2 = solana_sdk::pubkey::new_rand(); - let keypair3 = Keypair::new(); - let pubkey3 = solana_sdk::pubkey::new_rand(); - - let txs = vec![ - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &mint_keypair, - &pubkey, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair2, - &pubkey2, - 1, - genesis_config.hash(), - )), - SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( - &keypair3, - &pubkey3, - 1, - genesis_config.hash(), - )), - ]; - + let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let batch = bank.prepare_sanitized_batch(&txs); assert!(batch.needs_unlock()); let transaction_indexes = vec![42, 43, 44]; @@ -4424,6 +4517,64 @@ pub mod tests { assert_eq!(batch3.transaction_indexes, vec![43, 44]); } + #[test] + fn test_schedule_batches_for_execution() { + solana_logger::setup(); + let dummy_leader_pubkey = solana_sdk::pubkey::new_rand(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let context = SchedulingContext::new(bank.clone()); + + let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); + + let mut mocked_scheduler = MockInstalledScheduler::new(); + let mut seq = mockall::Sequence::new(); + mocked_scheduler + .expect_context() + .times(1) + .in_sequence(&mut seq) + .return_const(context); + mocked_scheduler + .expect_schedule_execution() + .times(txs.len()) + .returning(|_| ()); + mocked_scheduler + .expect_wait_for_termination() + .with(mockall::predicate::eq(WaitReason::DroppedFromBankForks)) + .times(1) + .in_sequence(&mut seq) + .returning(|_| None); + mocked_scheduler + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq) + .returning(|| ()); + let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); + + let batch = bank.prepare_sanitized_batch(&txs); + let batch_with_indexes = TransactionBatchWithIndexes { + batch, + transaction_indexes: (0..txs.len()).collect(), + }; + + let mut batch_execution_timing = BatchExecutionTiming::default(); + let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64); + assert!(process_batches( + &bank, + &[batch_with_indexes], + None, + None, + &mut batch_execution_timing, + None, + &ignored_prioritization_fee_cache + ) + .is_ok()); + } + #[test] fn test_confirm_slot_entries_with_fix() { const HASHES_PER_TICK: u64 = 10; diff --git a/ledger/src/entry_notifier_interface.rs b/ledger/src/entry_notifier_interface.rs index de523fc979ab01..174be9e1b7f1f4 100644 --- a/ledger/src/entry_notifier_interface.rs +++ b/ledger/src/entry_notifier_interface.rs @@ -1,11 +1,7 @@ -use { - solana_entry::entry::EntrySummary, - solana_sdk::clock::Slot, - std::sync::{Arc, RwLock}, -}; +use {solana_entry::entry::EntrySummary, solana_sdk::clock::Slot, std::sync::Arc}; pub trait EntryNotifier { fn notify_entry(&self, slot: Slot, index: usize, entry: &EntrySummary); } -pub type EntryNotifierLock = Arc>; +pub type EntryNotifierArc = Arc; diff --git a/ledger/src/entry_notifier_service.rs b/ledger/src/entry_notifier_service.rs index 5e108c94e80578..ec7eae0bc75723 100644 --- a/ledger/src/entry_notifier_service.rs +++ b/ledger/src/entry_notifier_service.rs @@ -1,5 +1,5 @@ use { - crate::entry_notifier_interface::EntryNotifierLock, + crate::entry_notifier_interface::EntryNotifierArc, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, solana_entry::entry::EntrySummary, solana_sdk::clock::Slot, @@ -28,7 +28,7 @@ pub struct EntryNotifierService { } impl EntryNotifierService { - pub fn new(entry_notifier: EntryNotifierLock, exit: Arc) -> Self { + pub fn new(entry_notifier: EntryNotifierArc, exit: Arc) -> Self { let (entry_notification_sender, entry_notification_receiver) = unbounded(); let thread_hdl = Builder::new() .name("solEntryNotif".to_string()) @@ -52,14 +52,11 @@ impl EntryNotifierService { fn notify_entry( entry_notification_receiver: &EntryNotifierReceiver, - entry_notifier: EntryNotifierLock, + entry_notifier: EntryNotifierArc, ) -> Result<(), RecvTimeoutError> { let EntryNotification { slot, index, entry } = entry_notification_receiver.recv_timeout(Duration::from_secs(1))?; - entry_notifier - .write() - .unwrap() - .notify_entry(slot, index, &entry); + entry_notifier.notify_entry(slot, index, &entry); Ok(()) } diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 0f311ca1216ec4..10dd5182717841 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -9,8 +9,10 @@ pub mod block_error; #[macro_use] pub mod blockstore; pub mod ancestor_iterator; +pub mod blockstore_cleanup_service; pub mod blockstore_db; pub mod blockstore_meta; +pub mod blockstore_metric_report_service; pub mod blockstore_metrics; pub mod blockstore_options; pub mod blockstore_processor; diff --git a/ledger/src/use_snapshot_archives_at_startup.rs b/ledger/src/use_snapshot_archives_at_startup.rs index e34abfb777967f..b173ed1564e5fa 100644 --- a/ledger/src/use_snapshot_archives_at_startup.rs +++ b/ledger/src/use_snapshot_archives_at_startup.rs @@ -8,7 +8,6 @@ use strum::{Display, EnumString, EnumVariantNames, IntoStaticStr, VariantNames}; pub enum UseSnapshotArchivesAtStartup { /// If snapshot archives are used, they will be extracted and overwrite any existing state /// already on disk. This will incur the associated runtime costs for extracting. - #[default] Always, /// If snapshot archives are not used, then the local snapshot state already on disk is /// used instead. If there is no local state on disk, startup will fail. @@ -18,6 +17,7 @@ pub enum UseSnapshotArchivesAtStartup { /// restarting. At startup, the snapshot archive would be the newest and loaded from. /// Note, this also implies that snapshot archives will be used if there is no local snapshot /// state on disk. + #[default] WhenNewest, } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 658fdf0de3b04e..f6791307dd9453 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -765,12 +765,16 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st accounts_hash_interval, num_account_paths, ); - let validator_snapshot_test_config = SnapshotValidatorConfig::new( + let mut validator_snapshot_test_config = SnapshotValidatorConfig::new( full_snapshot_interval, incremental_snapshot_interval, accounts_hash_interval, num_account_paths, ); + // The test has asserts that require the validator always boots from snapshot archives + validator_snapshot_test_config + .validator_config + .use_snapshot_archives_at_startup = UseSnapshotArchivesAtStartup::Always; let stake = DEFAULT_NODE_STAKE; let mut config = ClusterConfig { node_stakes: vec![stake], @@ -4270,49 +4274,6 @@ fn test_leader_failure_4() { ); } -#[test] -#[serial] -fn test_ledger_cleanup_service() { - solana_logger::setup_with_default(RUST_LOG_FILTER); - error!("test_ledger_cleanup_service"); - let num_nodes = 3; - let validator_config = ValidatorConfig { - max_ledger_shreds: Some(100), - ..ValidatorConfig::default_for_test() - }; - let mut config = ClusterConfig { - cluster_lamports: DEFAULT_CLUSTER_LAMPORTS, - poh_config: PohConfig::new_sleep(Duration::from_millis(50)), - node_stakes: vec![DEFAULT_NODE_STAKE; num_nodes], - validator_configs: make_identical_validator_configs(&validator_config, num_nodes), - ..ClusterConfig::default() - }; - let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); - // 200ms/per * 100 = 20 seconds, so sleep a little longer than that. - sleep(Duration::from_secs(60)); - - cluster_tests::spend_and_verify_all_nodes( - &cluster.entry_point_info, - &cluster.funding_keypair, - num_nodes, - HashSet::new(), - SocketAddrSpace::Unspecified, - &cluster.connection_cache, - ); - cluster.close_preserve_ledgers(); - //check everyone's ledgers and make sure only ~100 slots are stored - for info in cluster.validators.values() { - let mut slots = 0; - let blockstore = Blockstore::open(&info.info.ledger_path).unwrap(); - blockstore - .slot_meta_iterator(0) - .unwrap() - .for_each(|_| slots += 1); - // with 3 nodes up to 3 slots can be in progress and not complete so max slots in blockstore should be up to 103 - assert!(slots <= 103, "got {slots}"); - } -} - // This test verifies that even if votes from a validator end up taking too long to land, and thus // some of the referenced slots are slots are no longer present in the slot hashes sysvar, // consensus can still be attained. diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index f69c05d1ed3d7f..5afc543b2f0032 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -106,6 +106,9 @@ while [[ -n $1 ]]; do elif [[ $1 = --log-messages-bytes-limit ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --block-production-method ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 9090055b908b10..487154101ac979 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -182,6 +182,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 == --block-production-method ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = -h ]]; then usage "$@" else diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index f9239224b488a0..a568162c139c37 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -1,28 +1,11 @@ use { - crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + crate::compute_budget_processor::{self, process_compute_budget_instructions}, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, - feature_set::{ - add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, - FeatureSet, - }, - fee::FeeBudgetLimits, - instruction::{CompiledInstruction, InstructionError}, - pubkey::Pubkey, - transaction::TransactionError, + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, + transaction::Result, }, }; -/// The total accounts data a transaction can load is limited to 64MiB to not break -/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction -pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: usize = 64 * 1024 * 1024; - -pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; -pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; -const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; - #[cfg(RUSTC_WITH_SPECIALIZATION)] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { fn example() -> Self { @@ -31,6 +14,10 @@ impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { } } +/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the +/// default heap page cost = 0.5 * 15 ~= 8CU/page +pub const DEFAULT_HEAP_COST: u64 = 8; + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ComputeBudget { /// Number of compute units that a transaction or individual instruction is @@ -118,9 +105,6 @@ pub struct ComputeBudget { pub alt_bn128_pairing_one_pair_cost_other: u64, /// Big integer modular exponentiation cost pub big_modular_exponentiation_cost: u64, - /// Maximum accounts data size, in bytes, that a transaction is allowed to load; The - /// value is capped by MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES to prevent overuse of memory. - pub loaded_accounts_data_size_limit: usize, /// Coefficient `a` of the quadratic function which determines the number /// of compute units consumed to call poseidon syscall for a given number /// of inputs. @@ -143,7 +127,7 @@ pub struct ComputeBudget { impl Default for ComputeBudget { fn default() -> Self { - Self::new(MAX_COMPUTE_UNIT_LIMIT as u64) + Self::new(compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT as u64) } } @@ -180,14 +164,13 @@ impl ComputeBudget { curve25519_ristretto_msm_base_cost: 2303, curve25519_ristretto_msm_incremental_cost: 788, heap_size: u32::try_from(solana_sdk::entrypoint::HEAP_LENGTH).unwrap(), - heap_cost: 8, + heap_cost: DEFAULT_HEAP_COST, mem_op_base_cost: 10, alt_bn128_addition_cost: 334, alt_bn128_multiplication_cost: 3_840, alt_bn128_pairing_one_pair_cost_first: 36_364, alt_bn128_pairing_one_pair_cost_other: 12_121, big_modular_exponentiation_cost: 33, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, @@ -198,127 +181,16 @@ impl ComputeBudget { } } - pub fn process_instructions<'a>( - &mut self, - instructions: impl Iterator, - support_request_units_deprecated: bool, - support_set_loaded_accounts_data_size_limit_ix: bool, - ) -> Result { - let mut num_non_compute_budget_instructions: u32 = 0; - let mut updated_compute_unit_limit = None; - let mut requested_heap_size = None; - let mut prioritization_fee = None; - let mut updated_loaded_accounts_data_size_limit = None; - - for (i, (program_id, instruction)) in instructions.enumerate() { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) if support_request_units_deprecated => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - prioritization_fee = - Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); - } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - requested_heap_size = Some((bytes, i as u8)); - } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - prioritization_fee = - Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); - } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) - if support_set_loaded_accounts_data_size_limit_ix => - { - if updated_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_loaded_accounts_data_size_limit = Some(bytes as usize); - } - _ => return Err(invalid_instruction_data_error), - } - } else { - // only include non-request instructions in default max calc - num_non_compute_budget_instructions = - num_non_compute_budget_instructions.saturating_add(1); - } - } - - if let Some((bytes, i)) = requested_heap_size { - if bytes > MAX_HEAP_FRAME_BYTES - || bytes < MIN_HEAP_FRAME_BYTES as u32 - || bytes % 1024 != 0 - { - return Err(TransactionError::InstructionError( - i, - InstructionError::InvalidInstructionData, - )); - } - self.heap_size = bytes; - } - - let compute_unit_limit = updated_compute_unit_limit - .unwrap_or_else(|| { - num_non_compute_budget_instructions - .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) - }) - .min(MAX_COMPUTE_UNIT_LIMIT); - self.compute_unit_limit = u64::from(compute_unit_limit); - - self.loaded_accounts_data_size_limit = updated_loaded_accounts_data_size_limit - .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); - - Ok(prioritization_fee - .map(|fee_type| PrioritizationFeeDetails::new(fee_type, self.compute_unit_limit)) - .unwrap_or_default()) - } - - pub fn fee_budget_limits<'a>( + pub fn try_from_instructions<'a>( instructions: impl Iterator, feature_set: &FeatureSet, - ) -> FeeBudgetLimits { - let mut compute_budget = Self::default(); - - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - !feature_set.is_active(&remove_deprecated_request_unit_ix::id()), - feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ) - .unwrap_or_default(); - - FeeBudgetLimits { - loaded_accounts_data_size_limit: compute_budget.loaded_accounts_data_size_limit, - heap_cost: compute_budget.heap_cost, - compute_unit_limit: compute_budget.compute_unit_limit, - prioritization_fee: prioritization_fee_details.get_fee(), - } + ) -> Result { + let compute_budget_limits = process_compute_budget_instructions(instructions, feature_set)?; + Ok(ComputeBudget { + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), + heap_size: compute_budget_limits.updated_heap_bytes, + ..ComputeBudget::default() + }) } /// Returns cost of the Poseidon hash function for the given number of @@ -350,489 +222,3 @@ impl ComputeBudget { Some(final_result) } } - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{ - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::Keypair, - signer::Signer, - system_instruction::{self}, - transaction::{SanitizedTransaction, Transaction}, - }, - }; - - macro_rules! test { - ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new($instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - false, /*not support request_units_deprecated*/ - $support_set_loaded_accounts_data_size_limit_ix, - ); - assert_eq!($expected_result, result); - assert_eq!(compute_budget, $expected_budget); - }; - ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { - test!($instructions, $expected_result, $expected_budget, false); - }; - } - - #[test] - fn test_process_instructions() { - // Units - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - test!( - &[ - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::set_compute_unit_price(42) - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(42), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - } - ); - - // HeapFrame - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: 40 * 1024, - ..ComputeBudget::default() - } - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(31 * 1024), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(1), - ], - Err(TransactionError::InstructionError( - 3, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 7, - ..ComputeBudget::default() - } - ); - - // Combined - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - MAX_COMPUTE_UNIT_LIMIT as u64, - )), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(u64::MAX), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - heap_size: MAX_HEAP_FRAME_BYTES, - ..ComputeBudget::default() - } - ); - - // Duplicates - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_price(0), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default() - ); - - // deprecated - test!( - &[Instruction::new_with_borsh( - compute_budget::id(), - &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { - units: 1_000, - additional_fee: 10 - }, - vec![] - )], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default() - ); - } - - #[test] - fn test_process_loaded_accounts_data_size_limit_instruction() { - // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix - // will not change results, which should all be default - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - test!( - &[], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: 0, - ..ComputeBudget::default() - }, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set with data_size - // else - // return InstructionError - let data_size: usize = 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: data_size, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to max data size - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ) - } else { - ( - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit is not presented - // if support_set_loaded_accounts_data_size_limit_ix then - // budget is set to default data size - // else - // return - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = ( - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - loaded_accounts_data_size_limit: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - ..ComputeBudget::default() - }, - ); - - test!( - &[Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![] - ),], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - - // Assert when set_loaded_accounts_data_size_limit presents more than once, - // if support_set_loaded_accounts_data_size_limit_ix then - // return DuplicateInstruction - // else - // return InstructionError - let data_size: usize = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; - for support_set_loaded_accounts_data_size_limit_ix in [true, false] { - let (expected_result, expected_budget) = - if support_set_loaded_accounts_data_size_limit_ix { - ( - Err(TransactionError::DuplicateInstruction(2)), - ComputeBudget::default(), - ) - } else { - ( - Err(TransactionError::InstructionError( - 1, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - ) - }; - - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size as u32), - ], - expected_result, - expected_budget, - support_set_loaded_accounts_data_size_limit_ix - ); - } - } - - #[test] - fn test_process_mixed_instructions_without_compute_budget() { - let payer_keypair = Keypair::new(); - - let transaction = - SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), - ], - Some(&payer_keypair.pubkey()), - &[&payer_keypair], - Hash::default(), - )); - - let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - transaction.message().program_instructions_iter(), - false, //not support request_units_deprecated - true, //support_set_loaded_accounts_data_size_limit_ix, - ); - - // assert process_instructions will be successful with default, - assert_eq!(Ok(PrioritizationFeeDetails::default()), result); - // assert the default compute_unit_limit is 2 times default: one for bpf ix, one for - // builtin ix. - assert_eq!( - compute_budget, - ComputeBudget { - compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - ..ComputeBudget::default() - } - ); - } -} diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs new file mode 100644 index 00000000000000..b2c3a892493d41 --- /dev/null +++ b/program-runtime/src/compute_budget_processor.rs @@ -0,0 +1,704 @@ +//! Process compute_budget instructions to extract and sanitize limits. +use { + crate::{ + compute_budget::DEFAULT_HEAP_COST, + prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + }, + solana_sdk::{ + borsh0_10::try_from_slice_unchecked, + compute_budget::{self, ComputeBudgetInstruction}, + entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, + feature_set::{ + add_set_tx_loaded_accounts_data_size_instruction, remove_deprecated_request_unit_ix, + FeatureSet, + }, + fee::FeeBudgetLimits, + instruction::{CompiledInstruction, InstructionError}, + pubkey::Pubkey, + transaction::TransactionError, + }, +}; + +const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; +pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; +pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; + +/// The total accounts data a transaction can load is limited to 64MiB to not break +/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction +pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ComputeBudgetLimits { + pub updated_heap_bytes: u32, + pub compute_unit_limit: u32, + pub compute_unit_price: u64, + pub loaded_accounts_bytes: u32, + pub deprecated_additional_fee: Option, +} + +impl Default for ComputeBudgetLimits { + fn default() -> Self { + ComputeBudgetLimits { + updated_heap_bytes: u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap(), + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + compute_unit_price: 0, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + deprecated_additional_fee: None, + } + } +} + +impl From for FeeBudgetLimits { + fn from(val: ComputeBudgetLimits) -> Self { + let prioritization_fee = + if let Some(deprecated_additional_fee) = val.deprecated_additional_fee { + deprecated_additional_fee + } else { + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), + u64::from(val.compute_unit_limit), + ); + prioritization_fee_details.get_fee() + }; + + FeeBudgetLimits { + // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and + // `loaded_accounts_bytes` is greater than u16::MAX. In that case, panic is proper. + loaded_accounts_data_size_limit: usize::try_from(val.loaded_accounts_bytes).unwrap(), + heap_cost: DEFAULT_HEAP_COST, + compute_unit_limit: u64::from(val.compute_unit_limit), + prioritization_fee, + } + } +} + +/// Processing compute_budget could be part of tx sanitizing, failed to process +/// these instructions will drop the transaction eventually without execution, +/// may as well fail it early. +/// If succeeded, the transaction's specific limits/requests (could be default) +/// are retrieved and returned, +pub fn process_compute_budget_instructions<'a>( + instructions: impl Iterator, + feature_set: &FeatureSet, +) -> Result { + let support_request_units_deprecated = + !feature_set.is_active(&remove_deprecated_request_unit_ix::id()); + let support_set_loaded_accounts_data_size_limit_ix = + feature_set.is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()); + + let mut num_non_compute_budget_instructions: u32 = 0; + let mut updated_compute_unit_limit = None; + let mut updated_compute_unit_price = None; + let mut requested_heap_size = None; + let mut updated_loaded_accounts_data_size_limit = None; + let mut deprecated_additional_fee = None; + + for (i, (program_id, instruction)) in instructions.enumerate() { + if compute_budget::check_id(program_id) { + let invalid_instruction_data_error = TransactionError::InstructionError( + i as u8, + InstructionError::InvalidInstructionData, + ); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); + + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }) if support_request_units_deprecated => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + updated_compute_unit_price = + support_deprecated_requested_units(additional_fee, compute_unit_limit); + deprecated_additional_fee = Some(u64::from(additional_fee)); + } + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if requested_heap_size.is_some() { + return Err(duplicate_instruction_error); + } + if sanitize_requested_heap_size(bytes) { + requested_heap_size = Some(bytes); + } else { + return Err(invalid_instruction_data_error); + } + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_limit = Some(compute_unit_limit); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if updated_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + updated_compute_unit_price = Some(micro_lamports); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) + if support_set_loaded_accounts_data_size_limit_ix => + { + if updated_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); + } + updated_loaded_accounts_data_size_limit = Some(bytes); + } + _ => return Err(invalid_instruction_data_error), + } + } else { + // only include non-request instructions in default max calc + num_non_compute_budget_instructions = + num_non_compute_budget_instructions.saturating_add(1); + } + } + + // sanitize limits + let updated_heap_bytes = requested_heap_size + .unwrap_or(u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()) // loader's default heap_size + .min(MAX_HEAP_FRAME_BYTES); + + let compute_unit_limit = updated_compute_unit_limit + .unwrap_or_else(|| { + num_non_compute_budget_instructions + .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }) + .min(MAX_COMPUTE_UNIT_LIMIT); + + let compute_unit_price = updated_compute_unit_price.unwrap_or(0); + + let loaded_accounts_bytes = updated_loaded_accounts_data_size_limit + .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(ComputeBudgetLimits { + updated_heap_bytes, + compute_unit_limit, + compute_unit_price, + loaded_accounts_bytes, + deprecated_additional_fee, + }) +} + +fn sanitize_requested_heap_size(bytes: u32) -> bool { + (u32::try_from(MIN_HEAP_FRAME_BYTES).unwrap()..=MAX_HEAP_FRAME_BYTES).contains(&bytes) + && bytes % 1024 == 0 +} + +// Supports request_units_deprecated ix, returns compute_unit_price from deprecated requested +// units. +fn support_deprecated_requested_units(additional_fee: u32, compute_unit_limit: u32) -> Option { + // TODO: remove support of 'Deprecated' after feature remove_deprecated_request_unit_ix::id() is activated + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::Deprecated(u64::from(additional_fee)), + u64::from(compute_unit_limit), + ); + Some(prioritization_fee_details.get_priority()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction::{self}, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + macro_rules! test { + ( $instructions: expr, $expected_result: expr, $support_set_loaded_accounts_data_size_limit_ix: expr ) => { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new($instructions, Some(&payer_keypair.pubkey())), + Hash::default(), + )); + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + if $support_set_loaded_accounts_data_size_limit_ix { + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + } + let result = process_compute_budget_instructions( + tx.message().program_instructions_iter(), + &feature_set, + ); + assert_eq!($expected_result, result); + }; + ( $instructions: expr, $expected_result: expr ) => { + test!($instructions, $expected_result, false); + }; + } + + #[test] + fn test_process_instructions() { + // Units + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::set_compute_unit_price(42) + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: 1, + compute_unit_price: 42, + ..ComputeBudgetLimits::default() + }) + ); + + // HeapFrame + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: 40 * 1024, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(40 * 1024 + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(31 * 1024), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES + 1), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(1), + ], + Err(TransactionError::InstructionError( + 3, + InstructionError::InvalidInstructionData, + )) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 7, + ..ComputeBudgetLimits::default() + }) + ); + + // Combined + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(1), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Ok(ComputeBudgetLimits { + compute_unit_price: u64::MAX, + compute_unit_limit: 1, + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + ..ComputeBudgetLimits::default() + }) + ); + + // Duplicates + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT - 1), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), + ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_compute_unit_price(0), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ], + Err(TransactionError::DuplicateInstruction(2)) + ); + + // deprecated + test!( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: 1_000, + additional_fee: 10 + }, + vec![] + )], + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + ); + } + + #[test] + fn test_process_loaded_accounts_data_size_limit_instruction() { + // Assert for empty instructions, change value of support_set_loaded_accounts_data_size_limit_ix + // will not change results, which should all be default + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + test!( + &[], + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }), + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set with data_size + // else + // return InstructionError + let data_size = 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: data_size, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to max data size + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }) + } else { + Err(TransactionError::InstructionError( + 0, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit is not presented + // if support_set_loaded_accounts_data_size_limit_ix then + // budget is set to default data size + // else + // return + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + ..ComputeBudgetLimits::default() + }); + + test!( + &[Instruction::new_with_bincode( + Pubkey::new_unique(), + &0_u8, + vec![] + ),], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + + // Assert when set_loaded_accounts_data_size_limit presents more than once, + // if support_set_loaded_accounts_data_size_limit_ix then + // return DuplicateInstruction + // else + // return InstructionError + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; + for support_set_loaded_accounts_data_size_limit_ix in [true, false] { + let expected_result = if support_set_loaded_accounts_data_size_limit_ix { + Err(TransactionError::DuplicateInstruction(2)) + } else { + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidInstructionData, + )) + }; + + test!( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(data_size), + ], + expected_result, + support_set_loaded_accounts_data_size_limit_ix + ); + } + } + + #[test] + fn test_process_mixed_instructions_without_compute_budget() { + let payer_keypair = Keypair::new(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 2), + ], + Some(&payer_keypair.pubkey()), + &[&payer_keypair], + Hash::default(), + )); + + let mut feature_set = FeatureSet::default(); + feature_set.activate(&remove_deprecated_request_unit_ix::id(), 0); + feature_set.activate(&add_set_tx_loaded_accounts_data_size_instruction::id(), 0); + + let result = process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + &feature_set, + ); + + // assert process_instructions will be successful with default, + // and the default compute_unit_limit is 2 times default: one for bpf ix, one for + // builtin ix. + assert_eq!( + result, + Ok(ComputeBudgetLimits { + compute_unit_limit: 2 * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + } + + fn try_prioritization_fee_from_deprecated_requested_units( + additional_fee: u32, + compute_unit_limit: u32, + ) { + let payer_keypair = Keypair::new(); + let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( + &[&payer_keypair], + Message::new( + &[Instruction::new_with_borsh( + compute_budget::id(), + &compute_budget::ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }, + vec![], + )], + Some(&payer_keypair.pubkey()), + ), + Hash::default(), + )); + + // sucessfully process deprecated instruction + let compute_budget_limits = process_compute_budget_instructions( + tx.message().program_instructions_iter(), + &FeatureSet::default(), + ) + .unwrap(); + + // assert compute_budget_limit + let expected_compute_unit_price = (additional_fee as u128) + .saturating_mul(1_000_000) + .checked_div(compute_unit_limit as u128) + .map(|cu_price| u64::try_from(cu_price).unwrap_or(u64::MAX)) + .unwrap(); + let expected_compute_unit_limit = compute_unit_limit.min(MAX_COMPUTE_UNIT_LIMIT); + assert_eq!( + compute_budget_limits.compute_unit_price, + expected_compute_unit_price + ); + assert_eq!( + compute_budget_limits.compute_unit_limit, + expected_compute_unit_limit + ); + + // assert fee_budget_limits + let fee_budget_limits = FeeBudgetLimits::from(compute_budget_limits); + assert_eq!( + fee_budget_limits.prioritization_fee, + u64::from(additional_fee) + ); + assert_eq!( + fee_budget_limits.compute_unit_limit, + u64::from(expected_compute_unit_limit) + ); + } + + #[test] + fn test_support_deprecated_requested_units() { + // a normal case + try_prioritization_fee_from_deprecated_requested_units(647, 6002); + + // requesting cu limit more than MAX, div result will be round down + try_prioritization_fee_from_deprecated_requested_units( + 640, + MAX_COMPUTE_UNIT_LIMIT + 606_002, + ); + + // requesting cu limit more than MAX, div result will round up + try_prioritization_fee_from_deprecated_requested_units( + 764, + MAX_COMPUTE_UNIT_LIMIT + 606_004, + ); + } +} diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index be95fca637ccdb..6ee87fefa7ccdc 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -769,7 +769,7 @@ pub fn mock_process_instruction BlockRelation; + + /// Returns the epoch of the given slot + fn slot_epoch(&self, _slot: Slot) -> Option { + Some(0) + } } /// Provides information about current working slot, and its ancestors @@ -454,6 +459,14 @@ pub struct LoadedPrograms { pub latest_root_epoch: Epoch, /// Environments of the current epoch pub environments: ProgramRuntimeEnvironments, + /// Anticipated replacement for `environments` at the next epoch + /// + /// This is `None` during most of an epoch, and only `Some` around the boundaries (at the end and beginning of an epoch). + /// More precisely, it starts with the recompilation phase a few hundred slots before the epoch boundary, + /// and it ends with the first rerooting after the epoch boundary. + pub upcoming_environments: Option, + /// List of loaded programs which should be recompiled before the next epoch (but don't have to). + pub programs_to_recompile: Vec<(Pubkey, Arc)>, pub stats: Stats, pub fork_graph: Option>>, } @@ -476,6 +489,8 @@ impl Default for LoadedPrograms { latest_root_slot: 0, latest_root_epoch: 0, environments: ProgramRuntimeEnvironments::default(), + upcoming_environments: None, + programs_to_recompile: Vec::default(), stats: Stats::default(), fork_graph: None, } @@ -562,7 +577,12 @@ impl LoadedPrograms { } /// Returns the current environments depending on the given epoch - pub fn get_environments_for_epoch(&self, _epoch: Epoch) -> &ProgramRuntimeEnvironments { + pub fn get_environments_for_epoch(&self, epoch: Epoch) -> &ProgramRuntimeEnvironments { + if epoch != self.latest_root_epoch { + if let Some(upcoming_environments) = self.upcoming_environments.as_ref() { + return upcoming_environments; + } + } &self.environments } @@ -625,22 +645,6 @@ impl LoadedPrograms { entry } - /// On the epoch boundary this removes all programs of the outdated feature set - pub fn prune_feature_set_transition(&mut self) { - for second_level in self.entries.values_mut() { - second_level.retain(|entry| { - if Self::matches_environment(entry, &self.environments) { - return true; - } - self.stats - .prunes_environment - .fetch_add(1, Ordering::Relaxed); - false - }); - } - self.remove_programs_with_no_entries(); - } - pub fn prune_by_deployment_slot(&mut self, slot: Slot) { self.entries.retain(|_key, second_level| { *second_level = second_level @@ -663,6 +667,15 @@ impl LoadedPrograms { error!("Failed to lock fork graph for reading."); return; }; + let mut recompilation_phase_ends = false; + if self.latest_root_epoch != new_root_epoch { + self.latest_root_epoch = new_root_epoch; + if let Some(upcoming_environments) = self.upcoming_environments.take() { + recompilation_phase_ends = true; + self.environments = upcoming_environments; + self.programs_to_recompile.clear(); + } + } for second_level in self.entries.values_mut() { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; @@ -692,6 +705,15 @@ impl LoadedPrograms { return false; } } + // Remove outdated environment of previous feature set + if recompilation_phase_ends + && !Self::matches_environment(entry, &self.environments) + { + self.stats + .prunes_environment + .fetch_add(1, Ordering::Relaxed); + return false; + } true }) .cloned() @@ -701,9 +723,6 @@ impl LoadedPrograms { self.remove_programs_with_no_entries(); debug_assert!(self.latest_root_slot <= new_root_slot); self.latest_root_slot = new_root_slot; - if self.latest_root_epoch < new_root_epoch { - self.latest_root_epoch = new_root_epoch; - } } fn matches_environment( @@ -759,14 +778,29 @@ impl LoadedPrograms { let environments = self.get_environments_for_epoch(working_slot.current_epoch()); let mut missing = Vec::new(); let mut unloaded = Vec::new(); + let current_slot = working_slot.current_slot(); let found = keys .filter_map(|(key, (match_criteria, count))| { if let Some(second_level) = self.entries.get(&key) { for entry in second_level.iter().rev() { - let current_slot = working_slot.current_slot(); + let is_ancestor = if let Some(fork_graph) = &self.fork_graph { + fork_graph + .read() + .map(|fork_graph_r| { + matches!( + fork_graph_r + .relationship(entry.deployment_slot, current_slot), + BlockRelation::Ancestor + ) + }) + .unwrap_or(false) + } else { + working_slot.is_ancestor(entry.deployment_slot) + }; + if entry.deployment_slot <= self.latest_root_slot || entry.deployment_slot == current_slot - || working_slot.is_ancestor(entry.deployment_slot) + || is_ancestor { if current_slot >= entry.effective_slot { if !Self::is_entry_usable(entry, current_slot, &match_criteria) { @@ -818,7 +852,7 @@ impl LoadedPrograms { ExtractedPrograms { loaded: LoadedProgramsForTxBatch { entries: found, - slot: working_slot.current_slot(), + slot: current_slot, environments: environments.clone(), }, missing, @@ -1488,52 +1522,19 @@ mod tests { } } - struct TestWorkingSlot { - slot: Slot, - fork: Vec, - slot_pos: usize, - } - - impl TestWorkingSlot { - fn new(slot: Slot, fork: &[Slot]) -> Self { - let mut fork = fork.to_vec(); - fork.sort(); - let slot_pos = fork - .iter() - .position(|current| *current == slot) - .expect("The fork didn't have the slot in it"); - TestWorkingSlot { - slot, - fork, - slot_pos, - } - } - - fn update_slot(&mut self, slot: Slot) { - self.slot = slot; - self.slot_pos = self - .fork - .iter() - .position(|current| *current == slot) - .expect("The fork didn't have the slot in it"); - } - } + struct TestWorkingSlot(pub Slot); impl WorkingSlot for TestWorkingSlot { fn current_slot(&self) -> Slot { - self.slot + self.0 } fn current_epoch(&self) -> Epoch { 0 } - fn is_ancestor(&self, other: Slot) -> bool { - self.fork - .iter() - .position(|current| *current == other) - .map(|other_pos| other_pos < self.slot_pos) - .unwrap_or(false) + fn is_ancestor(&self, _other: Slot) -> bool { + false } } @@ -1571,7 +1572,7 @@ mod tests { let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20, 22]); - fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); + fork_graph.insert_fork(&[0, 5, 11, 15, 16, 18, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); @@ -1628,13 +1629,12 @@ mod tests { // 23 // Testing fork 0 - 10 - 12 - 22 with current slot at 22 - let working_slot = TestWorkingSlot::new(22, &[0, 10, 20, 22]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(22), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 2)), @@ -1651,14 +1651,13 @@ mod tests { assert!(missing.contains(&(program3, 3))); assert!(unloaded.is_empty()); - // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 16 - let mut working_slot = TestWorkingSlot::new(15, &[0, 5, 11, 15, 16, 18, 19, 23]); + // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 15 let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(15), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1681,13 +1680,12 @@ mod tests { assert!(unloaded.is_empty()); // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4). - working_slot.update_slot(18); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(18), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1707,13 +1705,12 @@ mod tests { assert!(unloaded.is_empty()); // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4). - working_slot.update_slot(23); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(23), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1733,13 +1730,12 @@ mod tests { assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11 - let working_slot = TestWorkingSlot::new(11, &[0, 5, 11, 15, 16]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(11), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1772,13 +1768,12 @@ mod tests { assert!(!cache.replenish(program4, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let working_slot = TestWorkingSlot::new(19, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(19), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1798,13 +1793,12 @@ mod tests { // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 // This would cause program4 deployed at slot 19 to be expired. - let working_slot = TestWorkingSlot::new(21, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(21), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1843,14 +1837,13 @@ mod tests { // | // 23 - // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22 - let working_slot = TestWorkingSlot::new(22, &[5, 11, 15, 16, 19, 22, 23]); + // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 21 let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(21), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1861,21 +1854,20 @@ mod tests { ); // Since the fork was pruned, we should not find the entry deployed at slot 20. - assert!(match_slot(&found, &program1, 0, 22)); - assert!(match_slot(&found, &program2, 11, 22)); - assert!(match_slot(&found, &program4, 15, 22)); + assert!(match_slot(&found, &program1, 0, 21)); + assert!(match_slot(&found, &program2, 11, 21)); + assert!(match_slot(&found, &program4, 15, 21)); assert!(missing.contains(&(program3, 1))); assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let working_slot = TestWorkingSlot::new(27, &[11, 25, 27]); let ExtractedPrograms { loaded: found, missing: _, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(27), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1909,13 +1901,12 @@ mod tests { // 23 // Testing fork 16, 19, 23, with root at 15, current slot at 23 - let working_slot = TestWorkingSlot::new(23, &[16, 19, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(23), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1955,7 +1946,7 @@ mod tests { let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20, 22]); - fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); + fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); @@ -1973,13 +1964,12 @@ mod tests { assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let working_slot = TestWorkingSlot::new(12, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(12), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2000,7 +1990,7 @@ mod tests { missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(12), vec![ ( program1, @@ -2078,13 +2068,12 @@ mod tests { ); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let working_slot = TestWorkingSlot::new(19, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(19), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2100,13 +2089,12 @@ mod tests { assert!(unloaded.is_empty()); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let working_slot = TestWorkingSlot::new(27, &[0, 5, 11, 25, 27]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(27), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2122,13 +2110,12 @@ mod tests { assert!(missing.is_empty()); // Testing fork 0 - 10 - 20 - 22 with current slot at 22 - let working_slot = TestWorkingSlot::new(22, &[0, 10, 20, 22]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(22), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2164,7 +2151,7 @@ mod tests { let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20, 22]); - fork_graph.insert_fork(&[0, 5, 11, 15, 16, 19, 21, 23]); + fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); cache.set_fork_graph(fork_graph); @@ -2193,13 +2180,12 @@ mod tests { assert!(!cache.replenish(program1, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let working_slot = TestWorkingSlot::new(12, &[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(12), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2217,13 +2203,12 @@ mod tests { // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 // This would cause program4 deployed at slot 15 to be expired. - let working_slot = TestWorkingSlot::new(15, &[0, 5, 11, 15, 16, 18, 19, 21, 23]); let ExtractedPrograms { loaded: found, missing, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(15), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2290,13 +2275,12 @@ mod tests { cache.prune(10, 0); - let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { loaded: found, missing: _, unloaded, } = cache.extract( - &working_slot, + &TestWorkingSlot(20), vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))].into_iter(), ); assert!(unloaded.is_empty()); @@ -2328,7 +2312,7 @@ mod tests { // deployed at slot 0. let mut fork_graph = TestForkGraphSpecific::default(); fork_graph.insert_fork(&[0, 10, 20]); - fork_graph.insert_fork(&[0, 5]); + fork_graph.insert_fork(&[0, 5, 6]); let fork_graph = Arc::new(RwLock::new(fork_graph)); cache.set_fork_graph(fork_graph); @@ -2339,13 +2323,12 @@ mod tests { let program2 = Pubkey::new_unique(); assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); - let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { loaded: found, missing: _, unloaded: _, } = cache.extract( - &working_slot, + &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2356,13 +2339,12 @@ mod tests { assert!(match_slot(&found, &program1, 0, 20)); assert!(match_slot(&found, &program2, 10, 20)); - let working_slot = TestWorkingSlot::new(6, &[0, 5, 6]); let ExtractedPrograms { loaded: found, missing, unloaded: _, } = cache.extract( - &working_slot, + &TestWorkingSlot(6), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2377,13 +2359,12 @@ mod tests { // On fork chaining from slot 5, the entry deployed at slot 0 will become visible. cache.prune_by_deployment_slot(5); - let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { loaded: found, missing: _, unloaded: _, } = cache.extract( - &working_slot, + &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2394,13 +2375,12 @@ mod tests { assert!(match_slot(&found, &program1, 0, 20)); assert!(match_slot(&found, &program2, 10, 20)); - let working_slot = TestWorkingSlot::new(6, &[0, 5, 6]); let ExtractedPrograms { loaded: found, missing, unloaded: _, } = cache.extract( - &working_slot, + &TestWorkingSlot(6), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2415,13 +2395,12 @@ mod tests { // As there is no other entry for program2, extract() will return it as missing. cache.prune_by_deployment_slot(10); - let working_slot = TestWorkingSlot::new(20, &[0, 10, 20]); let ExtractedPrograms { loaded: found, missing: _, unloaded: _, } = cache.extract( - &working_slot, + &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 5192319aecaae3..37e848471a8b3a 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -888,7 +888,7 @@ impl ProgramTest { .read() .unwrap() .working_bank() - .register_recent_blockhash(&Hash::new_unique()); + .register_unique_recent_blockhash_for_test(); } }); @@ -1040,7 +1040,7 @@ impl ProgramTestContext { .read() .unwrap() .working_bank() - .register_recent_blockhash(&Hash::new_unique()); + .register_unique_recent_blockhash_for_test(); } }), ); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1b789fb7f72299..fe1623388c4188 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -156,6 +156,20 @@ version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +[[package]] +name = "aquamarine" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1" +dependencies = [ + "include_dir", + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "arc-swap" version = "1.5.0" @@ -411,7 +425,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -565,7 +579,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -576,9 +590,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" dependencies = [ "serde", ] @@ -1207,7 +1221,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1218,7 +1232,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1306,6 +1320,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.8.1" @@ -1396,9 +1416,15 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "eager" version = "0.1.0" @@ -1499,7 +1525,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1643,6 +1669,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1673,6 +1708,12 @@ dependencies = [ "percent-encoding 2.3.0", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "fs-err" version = "2.9.0" @@ -1693,9 +1734,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -1708,9 +1749,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -1718,15 +1759,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -1736,38 +1777,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1869,7 +1910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.28", + "futures 0.3.29", "log", "reqwest", "serde", @@ -1944,7 +1985,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", ] [[package]] @@ -2106,7 +2147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "headers", "http", "hyper", @@ -2218,11 +2259,30 @@ dependencies = [ "version_check", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "index_list" -version = "0.2.7" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9d968042a4902e08810946fc7cd5851eb75e80301342305af755ca06cb82ce" +checksum = "70891286cb8e844fdfcf1178b47569699f9e20b5ecc4b45a6240a64771444638" [[package]] name = "indexmap" @@ -2236,9 +2296,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -2324,7 +2384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2342,7 +2402,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "futures-executor", "futures-util", "log", @@ -2357,7 +2417,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-client-transports", ] @@ -2379,7 +2439,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2395,7 +2455,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2410,7 +2470,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "jsonrpc-core", "lazy_static", "log", @@ -2426,7 +2486,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.28", + "futures 0.3.29", "globset", "jsonrpc-core", "lazy_static", @@ -2608,9 +2668,9 @@ dependencies = [ [[package]] name = "light-poseidon" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949bdd22e4ed93481d45e9a6badb34b99132bcad0c8a8d4f05c42f7dcc7b90bc" +checksum = "a5b439809cdfc0d86ecc7317f1724df13dfa665df48991b79e90e689411451f7" dependencies = [ "ark-bn254", "ark-ff", @@ -2619,9 +2679,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -2787,6 +2847,33 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "mockall" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -2866,6 +2953,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num" version = "0.2.1" @@ -2931,7 +3024,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2997,11 +3090,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" +checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" dependencies = [ - "num_enum_derive 0.7.0", + "num_enum_derive 0.7.1", ] [[package]] @@ -3013,19 +3106,19 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "num_enum_derive" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" +checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3081,11 +3174,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3113,18 +3206,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.92" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", @@ -3190,7 +3283,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.28", + "futures 0.3.29", "libc", "log", "rand 0.7.3", @@ -3438,6 +3531,36 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +[[package]] +name = "predicates" +version = "2.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" + +[[package]] +name = "predicates-tree" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty-hex" version = "0.3.0" @@ -3461,7 +3584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3602,7 +3725,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3630,7 +3753,7 @@ checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "rustls-native-certs", @@ -3769,7 +3892,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", "yasna", ] @@ -3798,6 +3921,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.0" @@ -3907,11 +4039,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.3", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -3984,11 +4130,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -3997,12 +4143,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.3", "rustls-webpki", "sct", ] @@ -4039,12 +4185,12 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.3", + "untrusted 0.9.0", ] [[package]] @@ -4110,8 +4256,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -4154,9 +4300,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -4172,13 +4318,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4223,7 +4369,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4232,7 +4378,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "itoa", "ryu", "serde", @@ -4456,7 +4602,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.28", + "futures 0.3.29", "httparse", "log", "rand 0.8.5", @@ -4512,7 +4658,7 @@ dependencies = [ "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -4569,7 +4715,7 @@ name = "solana-banks-client" version = "1.18.0" dependencies = [ "borsh 0.10.3", - "futures 0.3.28", + "futures 0.3.29", "solana-banks-interface", "solana-program", "solana-sdk", @@ -4594,7 +4740,7 @@ version = "1.18.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "solana-accounts-db", "solana-banks-interface", "solana-client", @@ -4659,7 +4805,7 @@ dependencies = [ "log", "memmap2", "modular-bitfield", - "num_enum 0.7.0", + "num_enum 0.7.1", "rand 0.8.5", "solana-measure", "solana-sdk", @@ -4727,9 +4873,9 @@ dependencies = [ "async-trait", "bincode", "dashmap", - "futures 0.3.28", + "futures 0.3.29", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "quinn", @@ -4779,7 +4925,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "log", "rand 0.8.5", "rayon", @@ -4804,14 +4950,14 @@ dependencies = [ "dashmap", "eager", "etcd-client", - "futures 0.3.28", + "futures 0.3.29", "histogram", "itertools", "lazy_static", "log", "lru", "min-max-heap", - "num_enum 0.7.0", + "num_enum 0.7.1", "prio-graph", "quinn", "rand 0.8.5", @@ -4971,7 +5117,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5030,7 +5176,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "log", "lru", @@ -5074,21 +5220,22 @@ version = "1.18.0" dependencies = [ "assert_matches", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "byteorder 1.5.0", "chrono", "chrono-humanize", "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "libc", "log", "lru", + "mockall", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "prost", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5211,7 +5358,7 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" name = "solana-perf" version = "1.18.0" dependencies = [ - "ahash 0.8.5", + "ahash 0.8.6", "bincode", "bv", "caps", @@ -5260,7 +5407,7 @@ dependencies = [ "ark-serialize", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "blake3", "borsh 0.10.3", "borsh 0.9.3", @@ -5385,7 +5532,7 @@ version = "1.18.0" dependencies = [ "async-mutex", "async-trait", - "futures 0.3.28", + "futures 0.3.29", "itertools", "lazy_static", "log", @@ -5543,6 +5690,7 @@ dependencies = [ name = "solana-runtime" version = "1.18.0" dependencies = [ + "aquamarine", "arrayref", "base64 0.21.5", "bincode", @@ -5565,11 +5713,12 @@ dependencies = [ "lru", "lz4", "memmap2", + "mockall", "modular-bitfield", "num-derive 0.4.1", "num-traits", "num_cpus", - "num_enum 0.7.0", + "num_enum 0.7.1", "ouroboros", "percentage", "qualifier_attr", @@ -6043,7 +6192,7 @@ dependencies = [ "assert_matches", "base64 0.21.5", "bincode", - "bitflags 2.3.3", + "bitflags 2.4.1", "borsh 0.10.3", "bs58", "bytemuck", @@ -6063,7 +6212,7 @@ dependencies = [ "memmap2", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "pbkdf2 0.11.0", "qstring", "qualifier_attr", @@ -6096,7 +6245,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6136,7 +6285,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.28", + "futures 0.3.29", "goauth", "http", "hyper", @@ -6182,7 +6331,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.0.2", + "indexmap 2.1.0", "itertools", "libc", "log", @@ -6265,7 +6414,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.0.2", + "indexmap 2.1.0", "indicatif", "log", "rayon", @@ -6310,7 +6459,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.28", + "futures 0.3.29", "itertools", "log", "lru", @@ -6598,7 +6747,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6610,7 +6759,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", "thiserror", ] @@ -6658,7 +6807,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6700,7 +6849,7 @@ dependencies = [ "bytemuck", "num-derive 0.4.1", "num-traits", - "num_enum 0.7.0", + "num_enum 0.7.1", "solana-program", "solana-zk-token-sdk", "spl-memo", @@ -6831,9 +6980,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -6921,7 +7070,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.28", + "futures 0.3.29", "humantime", "opentelemetry", "pin-project", @@ -6950,13 +7099,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix", "windows-sys 0.48.0", ] @@ -6970,6 +7119,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "test-case" version = "3.2.1" @@ -6989,7 +7144,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7001,7 +7156,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "test-case-core", ] @@ -7037,7 +7192,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7160,7 +7315,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7515,6 +7670,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "uriparse" version = "0.6.4" @@ -7637,7 +7798,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -7671,7 +7832,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7941,22 +8102,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c19fae0c8a9efc6a8281f2e623db8af1db9e57852e04cde3e754dd2dc29340f" +checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.11" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc56589e9ddd1f1c28d4b4b5c773ce232910a6bb67a70133d61c9e347585efe9" +checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7976,7 +8137,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 6f069a3f5bfd8b..7ab496de8eebd4 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -196,6 +196,13 @@ targets = ["x86_64-unknown-linux-gnu"] # and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their # dependencies in our build tree. # +# If you are developing downstream using non-crates-io solana-program (local or +# forked repo, or from github rev, eg), duplicate the following patch statements +# in your Cargo.toml. If you still hit duplicate-type errors with the patch +# statements in place, run `cargo update -p solana-program` and/or `cargo update +# -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock +# file. +# # There is a similar override in `../../Cargo.toml`. Please keep both comments and the # overrides in sync. solana-program = { path = "../../sdk/program" } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 2cc8a76875bb32..97d5c2ceb58756 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -20,7 +20,10 @@ use { TransactionResults, }, solana_ledger::token_balances::collect_token_balances, - solana_program_runtime::{compute_budget::ComputeBudget, timings::ExecuteTimings}, + solana_program_runtime::{ + compute_budget::ComputeBudget, + compute_budget_processor::process_compute_budget_instructions, timings::ExecuteTimings, + }, solana_rbpf::vm::ContextObject, solana_runtime::{ bank::TransactionBalancesSet, @@ -3835,10 +3838,12 @@ fn test_program_fees() { let expected_normal_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, - &ComputeBudget::fee_budget_limits( + &process_compute_budget_instructions( sanitized_message.program_instructions_iter(), &feature_set, - ), + ) + .unwrap_or_default() + .into(), true, false, ); @@ -3862,10 +3867,12 @@ fn test_program_fees() { let expected_prioritized_fee = fee_structure.calculate_fee( &sanitized_message, congestion_multiplier, - &ComputeBudget::fee_budget_limits( + &process_compute_budget_instructions( sanitized_message.program_instructions_iter(), &feature_set, - ), + ) + .unwrap_or_default() + .into(), true, false, ); @@ -4006,7 +4013,7 @@ fn test_cpi_account_ownership_writability() { TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE, TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER, ] { - bank.register_recent_blockhash(&Hash::new_unique()); + bank.register_unique_recent_blockhash_for_test(); let account = AccountSharedData::new(42, account_size, &invoke_program_id); bank.store_account(&account_keypair.pubkey(), &account); diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 3e43c564e70cef..0aa75c4ef5cff5 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -17,20 +17,20 @@ use { }; pub const CLOSE_CONTEXT_STATE_COMPUTE_UNITS: u64 = 3_300; -pub const VERIFY_ZERO_BALANCE_COMPUTE_UNITS: u64 = 6012; -pub const VERIFY_WITHDRAW_COMPUTE_UNITS: u64 = 112_454; -pub const VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS: u64 = 7_943; -pub const VERIFY_TRANSFER_COMPUTE_UNITS: u64 = 219_290; -pub const VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS: u64 = 407_121; -pub const VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS: u64 = 2_619; -pub const VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 105_066; -pub const VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 111_478; -pub const VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS: u64 = 204_512; +pub const VERIFY_ZERO_BALANCE_COMPUTE_UNITS: u64 = 6_000; +pub const VERIFY_WITHDRAW_COMPUTE_UNITS: u64 = 110_000; +pub const VERIFY_CIPHERTEXT_CIPHERTEXT_EQUALITY_COMPUTE_UNITS: u64 = 8_000; +pub const VERIFY_TRANSFER_COMPUTE_UNITS: u64 = 219_000; +pub const VERIFY_TRANSFER_WITH_FEE_COMPUTE_UNITS: u64 = 407_000; +pub const VERIFY_PUBKEY_VALIDITY_COMPUTE_UNITS: u64 = 2_600; +pub const VERIFY_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 105_000; +pub const VERIFY_BATCHED_RANGE_PROOF_U64_COMPUTE_UNITS: u64 = 111_000; +pub const VERIFY_BATCHED_RANGE_PROOF_U128_COMPUTE_UNITS: u64 = 200_000; pub const VERIFY_BATCHED_RANGE_PROOF_U256_COMPUTE_UNITS: u64 = 368_000; -pub const VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS: u64 = 6_424; -pub const VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 6_440; -pub const VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 12_575; -pub const VERIFY_FEE_SIGMA_COMPUTE_UNITS: u64 = 6_547; +pub const VERIFY_CIPHERTEXT_COMMITMENT_EQUALITY_COMPUTE_UNITS: u64 = 6_400; +pub const VERIFY_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 6_400; +pub const VERIFY_BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_COMPUTE_UNITS: u64 = 13_000; +pub const VERIFY_FEE_SIGMA_COMPUTE_UNITS: u64 = 6_500; fn process_verify_proof(invoke_context: &mut InvokeContext) -> Result<(), InstructionError> where diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index a36ec712ebfeb8..5e62dff9ce55d3 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1321,9 +1321,9 @@ impl JsonRpcRequestProcessor { .unwrap() .highest_super_majority_root() { - let result = self.blockstore.get_block_time(slot); + let result = self.blockstore.get_rooted_block_time(slot); self.check_blockstore_root(&result, slot)?; - if result.is_err() || matches!(result, Ok(None)) { + if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; self.check_bigtable_result(&bigtable_result)?; @@ -1333,7 +1333,7 @@ impl JsonRpcRequestProcessor { } } self.check_slot_cleaned_up(&result, slot)?; - Ok(result.ok().unwrap_or(None)) + Ok(result.ok()) } else { let r_bank_forks = self.bank_forks.read().unwrap(); if let Some(bank) = r_bank_forks.get(slot) { diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 6bb6fd7854642c..19244415bd61ac 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -659,7 +659,7 @@ mod tests { current_slot: Slot, ) -> transaction::Result<()> { bank_forks - .write() + .read() .unwrap() .get(current_slot) .unwrap() @@ -1166,7 +1166,7 @@ mod tests { let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash); bank_forks - .write() + .read() .unwrap() .get(1) .unwrap() @@ -1221,7 +1221,7 @@ mod tests { let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash); bank_forks - .write() + .read() .unwrap() .get(1) .unwrap() diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 7a5a5628d16dfc..5183716e00d27a 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1838,7 +1838,7 @@ pub(crate) mod tests { &stake::program::id(), ); bank_forks - .write() + .read() .unwrap() .get(0) .unwrap() @@ -2436,8 +2436,7 @@ pub(crate) mod tests { } = create_genesis_config(100); let bank = Bank::new_for_tests(&genesis_config); let blockhash = bank.last_blockhash(); - let bank_forks_arc = BankForks::new_rw_arc(bank); - let mut bank_forks = bank_forks_arc.write().unwrap(); + let bank_forks = BankForks::new_rw_arc(bank); let alice = Keypair::new(); let past_bank_tx = @@ -2448,24 +2447,28 @@ pub(crate) mod tests { system_transaction::transfer(&mint_keypair, &alice.pubkey(), 3, blockhash); bank_forks + .read() + .unwrap() .get(0) .unwrap() .process_transaction(&past_bank_tx) .unwrap(); let next_bank = Bank::new_from_parent( - bank_forks.get(0).unwrap(), + bank_forks.read().unwrap().get(0).unwrap(), &solana_sdk::pubkey::new_rand(), 1, ); - bank_forks.insert(next_bank); + bank_forks.write().unwrap().insert(next_bank); bank_forks + .read() + .unwrap() .get(1) .unwrap() .process_transaction(&processed_tx) .unwrap(); - let bank1 = bank_forks[1].clone(); + let bank1 = bank_forks.read().unwrap().get(1).unwrap().clone(); let mut cache0 = BlockCommitment::default(); cache0.increase_confirmation_stake(1, 10); @@ -2483,19 +2486,16 @@ pub(crate) mod tests { }, ); - // Drop the write locked bank_forks - drop(bank_forks); - let exit = Arc::new(AtomicBool::new(false)); let optimistically_confirmed_bank = - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks_arc); + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let max_complete_rewards_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( exit, max_complete_transaction_status_slot, max_complete_rewards_slot, - bank_forks_arc, + bank_forks, Arc::new(RwLock::new(block_commitment_cache)), optimistically_confirmed_bank, )); @@ -2818,7 +2818,7 @@ pub(crate) mod tests { // Add the same transaction to the unfrozen 2nd bank bank_forks - .write() + .read() .unwrap() .get(2) .unwrap() diff --git a/rpc/src/transaction_notifier_interface.rs b/rpc/src/transaction_notifier_interface.rs index ab765d1207fe27..d09a207b1c6e18 100644 --- a/rpc/src/transaction_notifier_interface.rs +++ b/rpc/src/transaction_notifier_interface.rs @@ -1,7 +1,7 @@ use { solana_sdk::{clock::Slot, signature::Signature, transaction::SanitizedTransaction}, solana_transaction_status::TransactionStatusMeta, - std::sync::{Arc, RwLock}, + std::sync::Arc, }; pub trait TransactionNotifier { @@ -15,4 +15,4 @@ pub trait TransactionNotifier { ); } -pub type TransactionNotifierLock = Arc>; +pub type TransactionNotifierArc = Arc; diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 193efb69fa481f..b98f0831518675 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -1,5 +1,5 @@ use { - crate::transaction_notifier_interface::TransactionNotifierLock, + crate::transaction_notifier_interface::TransactionNotifierArc, crossbeam_channel::{Receiver, RecvTimeoutError}, itertools::izip, solana_accounts_db::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, @@ -29,7 +29,7 @@ impl TransactionStatusService { write_transaction_status_receiver: Receiver, max_complete_transaction_status_slot: Arc, enable_rpc_transaction_history: bool, - transaction_notifier: Option, + transaction_notifier: Option, blockstore: Arc, enable_extended_tx_metadata_storage: bool, exit: Arc, @@ -60,7 +60,7 @@ impl TransactionStatusService { write_transaction_status_receiver: &Receiver, max_complete_transaction_status_slot: &Arc, enable_rpc_transaction_history: bool, - transaction_notifier: Option, + transaction_notifier: Option, blockstore: &Blockstore, enable_extended_tx_metadata_storage: bool, ) -> Result<(), RecvTimeoutError> { @@ -169,7 +169,7 @@ impl TransactionStatusService { }; if let Some(transaction_notifier) = transaction_notifier.as_ref() { - transaction_notifier.write().unwrap().notify_transaction( + transaction_notifier.notify_transaction( slot, transaction_index, transaction.signature(), @@ -255,7 +255,7 @@ pub(crate) mod tests { std::{ sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, }, thread::sleep, time::Duration, @@ -432,7 +432,7 @@ pub(crate) mod tests { transaction_indexes: vec![transaction_index], }; - let test_notifier = Arc::new(RwLock::new(TestTransactionNotifier::new())); + let test_notifier = Arc::new(TestTransactionNotifier::new()); let exit = Arc::new(AtomicBool::new(false)); let transaction_status_service = TransactionStatusService::new( @@ -452,16 +452,15 @@ pub(crate) mod tests { exit.store(true, Ordering::Relaxed); transaction_status_service.join().unwrap(); - let notifier = test_notifier.read().unwrap(); - assert_eq!(notifier.notifications.len(), 1); + assert_eq!(test_notifier.notifications.len(), 1); let key = TestNotifierKey { slot, transaction_index, signature, }; - assert!(notifier.notifications.contains_key(&key)); + assert!(test_notifier.notifications.contains_key(&key)); - let result = &*notifier.notifications.get(&key).unwrap(); + let result = test_notifier.notifications.get(&key).unwrap(); assert_eq!( expected_transaction.signature(), result.transaction.signature() diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 2d15c7acbace71..f0509811497037 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +aquamarine = { workspace = true } arrayref = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } @@ -32,6 +33,7 @@ log = { workspace = true } lru = { workspace = true } lz4 = { workspace = true } memmap2 = { workspace = true } +mockall = { workspace = true } modular-bitfield = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 43361669244578..993c22d2a04e18 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -42,7 +42,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<() AccountSharedData::new((t + 1) as u64, 0, AccountSharedData::default().owner()); pubkeys.push(pubkey); assert!(bank.get_account(&pubkey).is_none()); - bank.deposit(&pubkey, (t + 1) as u64)?; + test_utils::deposit(bank, &pubkey, (t + 1) as u64)?; assert_eq!(bank.get_account(&pubkey).unwrap(), account); } Ok(()) @@ -80,7 +80,7 @@ fn test_accounts_squash(bencher: &mut Bencher) { &Pubkey::default(), slot, )); - next_bank.deposit(&pubkeys[0], 1).unwrap(); + test_utils::deposit(&next_bank, &pubkeys[0], 1).unwrap(); next_bank.squash(); slot += 1; prev_bank = next_bank; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 66d9a37e5c9c0e..2b8cbe34926100 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -43,7 +43,7 @@ use { builtins::{BuiltinPrototype, BUILTINS}, epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, - inline_feature_gate_program, + installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, @@ -69,17 +69,18 @@ use { }, solana_accounts_db::{ account_overrides::AccountOverrides, - account_rent_state::RentState, accounts::{ AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, RewardInterval, TransactionLoadResult, }, accounts_db::{ - AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, + AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, - accounts_hash::{AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash}, + accounts_hash::{ + AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, + }, accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult, ZeroLamport}, accounts_partition::{self, Partition, PartitionIndex}, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -102,11 +103,13 @@ use { }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_cost_model::cost_tracker::CostTracker, + solana_loader_v4_program::create_program_runtime_environment_v2, solana_measure::{measure, measure::Measure, measure_us}, solana_perf::perf_libs, solana_program_runtime::{ accounts_data_meter::MAX_ACCOUNTS_DATA_LEN, - compute_budget::{self, ComputeBudget}, + compute_budget::ComputeBudget, + compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, @@ -136,10 +139,8 @@ use { epoch_schedule::EpochSchedule, feature, feature_set::{ - self, add_set_tx_loaded_accounts_data_size_instruction, - include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, remove_deprecated_request_unit_ix, - FeatureSet, + self, include_loaded_accounts_data_size_in_fee_calculation, + remove_congestion_multiplier_from_fee_calculation, FeatureSet, }, fee::FeeStructure, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -149,7 +150,6 @@ use { incinerator, inflation::Inflation, instruction::InstructionError, - lamports::LamportsError, loader_v4::{self, LoaderV4State, LoaderV4Status}, message::{AccountKeys, SanitizedMessage}, native_loader, @@ -185,7 +185,7 @@ use { borrow::Cow, cell::RefCell, collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, fmt, mem, ops::{AddAssign, RangeInclusive}, path::PathBuf, @@ -196,7 +196,7 @@ use { AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering::{self, AcqRel, Acquire, Relaxed}, }, - Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, thread::Builder, time::{Duration, Instant}, @@ -216,12 +216,12 @@ mod address_lookup_table; pub mod bank_hash_details; mod builtin_programs; pub mod epoch_accounts_hash_utils; +mod fee_distribution; mod metrics; -mod replace_account; mod serde_snapshot; mod sysvar_cache; #[cfg(test)] -mod tests; +pub(crate) mod tests; mod transaction_account_state_info; pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0; @@ -508,6 +508,7 @@ impl PartialEq for Bank { return true; } let Self { + skipped_rewrites: _, rc: _, status_cache: _, blockhash_queue, @@ -816,6 +817,10 @@ pub struct Bank { /// The change to accounts data size in this Bank, due to off-chain events (i.e. rent collection) accounts_data_size_delta_off_chain: AtomicI64, + /// until the skipped rewrites feature is activated, it is possible to skip rewrites and still include + /// the account hash of the accounts that would have been rewritten as bank hash expects. + skipped_rewrites: Mutex>, + /// Transaction fee structure pub fee_structure: FeeStructure, @@ -942,7 +947,6 @@ impl WorkingSlot for Bank { self.ancestors.contains_key(&other) } } - #[derive(Debug, Default)] /// result of calculating the stake rewards at end of epoch struct StakeRewardCalculation { @@ -1014,6 +1018,7 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc: BankRc::new(accounts, Slot::default()), status_cache: Arc::>::default(), @@ -1345,6 +1350,7 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc, status_cache, @@ -1437,11 +1443,10 @@ impl Bank { }); // Following code may touch AccountsDb, requiring proper ancestors - let parent_epoch = parent.epoch(); let (_, update_epoch_time_us) = measure_us!({ - if parent_epoch < new.epoch() { + if parent.epoch() < new.epoch() { new.process_new_epoch( - parent_epoch, + parent.epoch(), parent.slot(), parent.block_height(), reward_calc_tracer, @@ -1456,11 +1461,71 @@ impl Bank { } }); + let (_, recompilation_time_us) = measure_us!({ + // Recompile loaded programs one at a time before the next epoch hits + let (_epoch, slot_index) = new.get_epoch_and_slot_index(new.slot()); + let slots_in_epoch = new.get_slots_in_epoch(new.epoch()); + let slots_in_recompilation_phase = + (solana_program_runtime::loaded_programs::MAX_LOADED_ENTRY_COUNT as u64) + .min(slots_in_epoch) + .checked_div(2) + .unwrap(); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + if loaded_programs_cache.upcoming_environments.is_some() { + if let Some((key, program_to_recompile)) = + loaded_programs_cache.programs_to_recompile.pop() + { + drop(loaded_programs_cache); + let recompiled = new.load_program(&key, false, Some(program_to_recompile)); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + loaded_programs_cache.replenish(key, recompiled); + } + } else if new.epoch() != loaded_programs_cache.latest_root_epoch + || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch + { + // Anticipate the upcoming program runtime environment for the next epoch, + // so we can try to recompile loaded programs before the feature transition hits. + drop(loaded_programs_cache); + let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true); + let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + let program_runtime_environment_v1 = create_program_runtime_environment_v1( + &feature_set, + &new.runtime_config.compute_budget.unwrap_or_default(), + false, /* deployment */ + false, /* debugging_features */ + ) + .unwrap(); + let program_runtime_environment_v2 = create_program_runtime_environment_v2( + &new.runtime_config.compute_budget.unwrap_or_default(), + false, /* debugging_features */ + ); + let mut upcoming_environments = loaded_programs_cache.environments.clone(); + let changed_program_runtime_v1 = + *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1; + let changed_program_runtime_v2 = + *upcoming_environments.program_runtime_v2 != program_runtime_environment_v2; + if changed_program_runtime_v1 { + upcoming_environments.program_runtime_v1 = + Arc::new(program_runtime_environment_v1); + } + if changed_program_runtime_v2 { + upcoming_environments.program_runtime_v2 = + Arc::new(program_runtime_environment_v2); + } + loaded_programs_cache.upcoming_environments = Some(upcoming_environments); + loaded_programs_cache.programs_to_recompile = loaded_programs_cache + .get_entries_sorted_by_tx_usage( + changed_program_runtime_v1, + changed_program_runtime_v2, + ); + } + }); + // Update sysvars before processing transactions let (_, update_sysvars_time_us) = measure_us!({ new.update_slot_hashes(); - new.update_stake_history(Some(parent_epoch)); - new.update_clock(Some(parent_epoch)); + new.update_stake_history(Some(parent.epoch())); + new.update_clock(Some(parent.epoch())); new.update_fees(); new.update_last_restart_slot() }); @@ -1488,6 +1553,7 @@ impl Bank { feature_set_time_us, ancestors_time_us, update_epoch_time_us, + recompilation_time_us, update_sysvars_time_us, fill_sysvar_cache_time_us, }, @@ -1799,6 +1865,7 @@ impl Bank { ); let stakes_accounts_load_duration = now.elapsed(); let mut bank = Self { + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rc: bank_rc, status_cache: Arc::>::default(), @@ -3672,62 +3739,6 @@ impl Bank { stake_weighted_timestamp } - // Distribute collected transaction fees for this slot to collector_id (= current leader). - // - // Each validator is incentivized to process more transactions to earn more transaction fees. - // Transaction fees are rewarded for the computing resource utilization cost, directly - // proportional to their actual processing power. - // - // collector_id is rotated according to stake-weighted leader schedule. So the opportunity of - // earning transaction fees are fairly distributed by stake. And missing the opportunity - // (not producing a block as a leader) earns nothing. So, being online is incentivized as a - // form of transaction fees as well. - // - // On the other hand, rent fees are distributed under slightly different philosophy, while - // still being stake-weighted. - // Ref: distribute_rent_to_validators - fn collect_fees(&self) { - let collector_fees = self.collector_fees.load(Relaxed); - - if collector_fees != 0 { - let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees); - // burn a portion of fees - debug!( - "distributed fee: {} (rounded from: {}, burned: {})", - deposit, collector_fees, burn - ); - - match self.deposit(&self.collector_id, deposit) { - Ok(post_balance) => { - if deposit != 0 { - self.rewards.write().unwrap().push(( - self.collector_id, - RewardInfo { - reward_type: RewardType::Fee, - lamports: deposit as i64, - post_balance, - commission: None, - }, - )); - } - } - Err(_) => { - error!( - "Burning {} fee instead of crediting {}", - deposit, self.collector_id - ); - datapoint_error!( - "bank-burned_fee", - ("slot", self.slot(), i64), - ("num_lamports", deposit, i64) - ); - burn += deposit; - } - } - self.capitalization.fetch_sub(burn, Relaxed); - } - } - pub fn rehash(&self) { let mut hash = self.hash.write().unwrap(); let new = self.hash_internal_state(); @@ -3753,8 +3764,8 @@ impl Bank { if *hash == Hash::default() { // finish up any deferred changes to account state self.collect_rent_eagerly(); - self.collect_fees(); - self.distribute_rent(); + self.distribute_transaction_fees(); + self.distribute_rent_fees(); self.update_slot_history(); self.run_incinerator(); @@ -3857,12 +3868,14 @@ impl Bank { self.accounts_data_size_initial += account.data().len() as u64; } - // highest staked node is the first collector + // Highest staked node is the first collector but if a genesis config + // doesn't define any staked nodes, we assume this genesis config is for + // testing and set the collector id to a unique pubkey. self.collector_id = self .stakes_cache .stakes() .highest_staked_node() - .unwrap_or_default(); + .unwrap_or_else(Pubkey::new_unique); self.blockhash_queue.write().unwrap().genesis_hash( &genesis_config.hash(), @@ -4090,10 +4103,12 @@ impl Bank { self.fee_structure.calculate_fee( message, lamports_per_signature, - &ComputeBudget::fee_budget_limits( + &process_compute_budget_instructions( message.program_instructions_iter(), &self.feature_set, - ), + ) + .unwrap_or_default() + .into(), self.feature_set .is_active(&remove_congestion_multiplier_from_fee_calculation::id()), self.feature_set @@ -4177,7 +4192,11 @@ impl Bank { /// Register a new recent blockhash in the bank's recent blockhash queue. Called when a bank /// reaches its max tick height. Can be called by tests to get new blockhashes for transaction /// processing without advancing to a new bank slot. - pub fn register_recent_blockhash(&self, blockhash: &Hash) { + fn register_recent_blockhash(&self, blockhash: &Hash, scheduler: &InstalledSchedulerRwLock) { + // This is needed because recent_blockhash updates necessitate synchronizations for + // consistent tx check_age handling. + BankWithScheduler::wait_for_paused_scheduler(self, scheduler); + // Only acquire the write lock for the blockhash queue on block boundaries because // readers can starve this write lock acquisition and ticks would be slowed down too // much if the write lock is acquired for each tick. @@ -4186,20 +4205,29 @@ impl Bank { self.update_recent_blockhashes_locked(&w_blockhash_queue); } + // gating this under #[cfg(feature = "dev-context-only-utils")] isn't easy due to + // solana-program-test's usage... + pub fn register_unique_recent_blockhash_for_test(&self) { + self.register_recent_blockhash( + &Hash::new_unique(), + &BankWithScheduler::no_scheduler_available(), + ) + } + /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls /// correspond to later entries, and will boot the oldest ones once its internal cache is full. /// Once boot, the bank will reject transactions using that `hash`. /// /// This is NOT thread safe because if tick height is updated by two different threads, the /// block boundary condition could be missed. - pub fn register_tick(&self, hash: &Hash) { + pub fn register_tick(&self, hash: &Hash, scheduler: &InstalledSchedulerRwLock) { assert!( !self.freeze_started(), "register_tick() working on a bank that is already frozen or is undergoing freezing!" ); if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) { - self.register_recent_blockhash(hash); + self.register_recent_blockhash(hash, scheduler); } // ReplayStage will start computing the accounts delta hash when it @@ -4212,18 +4240,17 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] pub fn register_tick_for_test(&self, hash: &Hash) { - // currently meaningless wrapper; upcoming pr will make it an actual helper... - self.register_tick(hash) + self.register_tick(hash, &BankWithScheduler::no_scheduler_available()) } #[cfg(feature = "dev-context-only-utils")] pub fn register_default_tick_for_test(&self) { - self.register_tick(&Hash::default()) + self.register_tick_for_test(&Hash::default()) } #[cfg(feature = "dev-context-only-utils")] pub fn register_unique_tick(&self) { - self.register_tick(&Hash::new_unique()) + self.register_tick_for_test(&Hash::new_unique()) } pub fn is_complete(&self) -> bool { @@ -4676,16 +4703,25 @@ impl Bank { ProgramAccountLoadResult::InvalidAccountData } - pub fn load_program(&self, pubkey: &Pubkey, reload: bool) -> Arc { + pub fn load_program( + &self, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let environments = loaded_programs_cache.get_environments_for_epoch(self.epoch); - + let effective_epoch = if recompile.is_some() { + loaded_programs_cache.latest_root_epoch.saturating_add(1) + } else { + self.epoch + }; + let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), ..LoadProgramMetrics::default() }; - let loaded_program = match self.load_program_accounts(pubkey) { + let mut loaded_program = match self.load_program_accounts(pubkey) { ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( self.slot, LoadedProgramType::Closed, @@ -4792,6 +4828,16 @@ impl Bank { let mut timings = ExecuteDetailsTimings::default(); load_program_metrics.submit_datapoint(&mut timings); + if let Some(recompile) = recompile { + loaded_program.effective_slot = loaded_program.effective_slot.max( + self.epoch_schedule() + .get_first_slot_in_epoch(effective_epoch), + ); + loaded_program.tx_usage_counter = + AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); + loaded_program.ix_usage_counter = + AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); + } Arc::new(loaded_program) } @@ -5038,7 +5084,7 @@ impl Bank { let missing_programs: Vec<(Pubkey, Arc)> = missing .iter() .map(|(key, count)| { - let program = self.load_program(key, false); + let program = self.load_program(key, false, None); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -5048,7 +5094,7 @@ impl Bank { let unloaded_programs: Vec<(Pubkey, Arc)> = unloaded .iter() .map(|(key, count)| { - let program = self.load_program(key, true); + let program = self.load_program(key, true, None); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -5179,36 +5225,28 @@ impl Bank { .map(|(accs, tx)| match accs { (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { - let compute_budget = if let Some(compute_budget) = - self.runtime_config.compute_budget - { - compute_budget - } else { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let process_transaction_result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - !self - .feature_set - .is_active(&remove_deprecated_request_unit_ix::id()), - self.feature_set - .is_active(&add_set_tx_loaded_accounts_data_size_instruction::id()), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = process_transaction_result { - return TransactionExecutionResult::NotExecuted(err); - } - compute_budget - }; + let compute_budget = + if let Some(compute_budget) = self.runtime_config.compute_budget { + compute_budget + } else { + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let maybe_compute_budget = ComputeBudget::try_from_instructions( + tx.message().program_instructions_iter(), + &self.feature_set, + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = maybe_compute_budget { + return TransactionExecutionResult::NotExecuted(err); + } + maybe_compute_budget.unwrap() + }; let result = self.execute_loaded_transaction( tx, @@ -5653,183 +5691,6 @@ impl Bank { } } - // Distribute collected rent fees for this slot to staked validators (excluding stakers) - // according to stake. - // - // The nature of rent fee is the cost of doing business, every validator has to hold (or have - // access to) the same list of accounts, so we pay according to stake, which is a rough proxy for - // value to the network. - // - // Currently, rent distribution doesn't consider given validator's uptime at all (this might - // change). That's because rent should be rewarded for the storage resource utilization cost. - // It's treated differently from transaction fees, which is for the computing resource - // utilization cost. - // - // We can't use collector_id (which is rotated according to stake-weighted leader schedule) - // as an approximation to the ideal rent distribution to simplify and avoid this per-slot - // computation for the distribution (time: N log N, space: N acct. stores; N = # of - // validators). - // The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction - // fees - // - // Ref: collect_fees - #[allow(clippy::needless_collect)] - fn distribute_rent_to_validators( - &self, - vote_accounts: &VoteAccountsHashMap, - rent_to_be_distributed: u64, - ) { - let mut total_staked = 0; - - // Collect the stake associated with each validator. - // Note that a validator may be present in this vector multiple times if it happens to have - // more than one staked vote account somehow - let mut validator_stakes = vote_accounts - .iter() - .filter_map(|(_vote_pubkey, (staked, account))| { - if *staked == 0 { - None - } else { - total_staked += *staked; - Some((account.node_pubkey()?, *staked)) - } - }) - .collect::>(); - - #[cfg(test)] - if validator_stakes.is_empty() { - // some tests bank.freezes() with bad staking state - self.capitalization - .fetch_sub(rent_to_be_distributed, Relaxed); - return; - } - #[cfg(not(test))] - assert!(!validator_stakes.is_empty()); - - // Sort first by stake and then by validator identity pubkey for determinism. - // If two items are still equal, their relative order does not matter since - // both refer to the same validator. - validator_stakes.sort_unstable_by(|(pubkey1, staked1), (pubkey2, staked2)| { - (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() - }); - - let enforce_fix = self.no_overflow_rent_distribution_enabled(); - - let mut rent_distributed_in_initial_round = 0; - let validator_rent_shares = validator_stakes - .into_iter() - .map(|(pubkey, staked)| { - let rent_share = if !enforce_fix { - (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 - } else { - (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) - .try_into() - .unwrap() - }; - rent_distributed_in_initial_round += rent_share; - (pubkey, rent_share) - }) - .collect::>(); - - // Leftover lamports after fraction calculation, will be paid to validators starting from highest stake - // holder - let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round; - - let mut rewards = vec![]; - validator_rent_shares - .into_iter() - .for_each(|(pubkey, rent_share)| { - let rent_to_be_paid = if leftover_lamports > 0 { - leftover_lamports -= 1; - rent_share + 1 - } else { - rent_share - }; - if !enforce_fix || rent_to_be_paid > 0 { - let mut account = self - .get_account_with_fixed_root(&pubkey) - .unwrap_or_default(); - let rent = self.rent_collector().rent; - let recipient_pre_rent_state = RentState::from_account(&account, &rent); - let distribution = account.checked_add_lamports(rent_to_be_paid); - let recipient_post_rent_state = RentState::from_account(&account, &rent); - let rent_state_transition_allowed = recipient_post_rent_state - .transition_allowed_from(&recipient_pre_rent_state); - if !rent_state_transition_allowed { - warn!( - "Rent distribution of {rent_to_be_paid} to {pubkey} results in \ - invalid RentState: {recipient_post_rent_state:?}" - ); - datapoint_warn!( - "bank-rent_distribution_invalid_state", - ("slot", self.slot(), i64), - ("pubkey", pubkey.to_string(), String), - ("rent_to_be_paid", rent_to_be_paid, i64) - ); - } - if distribution.is_err() - || (self.prevent_rent_paying_rent_recipients() - && !rent_state_transition_allowed) - { - // overflow adding lamports or resulting account is not rent-exempt - self.capitalization.fetch_sub(rent_to_be_paid, Relaxed); - error!( - "Burned {} rent lamports instead of sending to {}", - rent_to_be_paid, pubkey - ); - datapoint_error!( - "bank-burned_rent", - ("slot", self.slot(), i64), - ("num_lamports", rent_to_be_paid, i64) - ); - } else { - self.store_account(&pubkey, &account); - rewards.push(( - pubkey, - RewardInfo { - reward_type: RewardType::Rent, - lamports: rent_to_be_paid as i64, - post_balance: account.lamports(), - commission: None, - }, - )); - } - } - }); - self.rewards.write().unwrap().append(&mut rewards); - - if enforce_fix { - assert_eq!(leftover_lamports, 0); - } else if leftover_lamports != 0 { - warn!( - "There was leftover from rent distribution: {}", - leftover_lamports - ); - self.capitalization.fetch_sub(leftover_lamports, Relaxed); - } - } - - fn distribute_rent(&self) { - let total_rent_collected = self.collected_rent.load(Relaxed); - - let (burned_portion, rent_to_be_distributed) = self - .rent_collector - .rent - .calculate_burn(total_rent_collected); - - debug!( - "distributed rent: {} (rounded from: {}, burned: {})", - rent_to_be_distributed, total_rent_collected, burned_portion - ); - self.capitalization.fetch_sub(burned_portion, Relaxed); - - if rent_to_be_distributed == 0 { - return; - } - - self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed); - } - fn collect_rent( &self, execution_results: &[TransactionExecutionResult], @@ -6012,9 +5873,16 @@ impl Bank { let mut time_collecting_rent_us = 0; let mut time_storing_accounts_us = 0; let can_skip_rewrites = self.bank_hash_skips_rent_rewrites(); + let test_skip_rewrites_but_include_hash_in_bank_hash = !can_skip_rewrites + && self + .rc + .accounts + .accounts_db + .test_skip_rewrites_but_include_in_bank_hash; let set_exempt_rent_epoch_max: bool = self .feature_set .is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); + let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let (rent_collected_info, measure) = measure!(self.rent_collector.collect_from_existing_account( @@ -6030,7 +5898,9 @@ impl Bank { // Also, there's another subtle side-effect from rewrites: this // ensures we verify the whole on-chain state (= all accounts) // via the bank delta hash slowly once per an epoch. - if !can_skip_rewrites || !Self::skip_rewrite(rent_collected_info.rent_amount, account) { + if (!can_skip_rewrites && !test_skip_rewrites_but_include_hash_in_bank_hash) + || !Self::skip_rewrite(rent_collected_info.rent_amount, account) + { if rent_collected_info.rent_amount > 0 { if let Some(rent_paying_pubkeys) = rent_paying_pubkeys { if !rent_paying_pubkeys.contains(pubkey) { @@ -6060,6 +5930,13 @@ impl Bank { } total_rent_collected_info += rent_collected_info; accounts_to_store.push((pubkey, account)); + } else if test_skip_rewrites_but_include_hash_in_bank_hash { + // include rewrites that we skipped in the accounts delta hash. + // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites. + // This code path exists to allow us to test the long term effects on validators when the skipped rewrites + // feature is enabled. + let hash = AccountsDb::hash_account(account, pubkey); + skipped_rewrites.push((*pubkey, hash)); } rent_debits.insert(pubkey, rent_collected_info.rent_amount, account.lamports()); } @@ -6073,6 +5950,7 @@ impl Bank { } CollectRentFromAccountsInfo { + skipped_rewrites, rent_collected_info: total_rent_collected_info, rent_rewards: rent_debits.into_unordered_rewards_iter().collect(), time_collecting_rent_us, @@ -6175,6 +6053,11 @@ impl Bank { CollectRentInPartitionInfo::reduce, ); + self.skipped_rewrites + .lock() + .unwrap() + .extend(&mut results.skipped_rewrites.into_iter()); + // We cannot assert here that we collected from all expected keys. // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports. @@ -6718,19 +6601,6 @@ impl Bank { } } - pub fn deposit( - &self, - pubkey: &Pubkey, - lamports: u64, - ) -> std::result::Result { - // This doesn't collect rents intentionally. - // Rents should only be applied to actual TXes - let mut account = self.get_account_with_fixed_root(pubkey).unwrap_or_default(); - account.checked_add_lamports(lamports)?; - self.store_account(pubkey, &account); - Ok(account.lamports()) - } - pub fn accounts(&self) -> Arc { self.rc.accounts.clone() } @@ -6769,6 +6639,24 @@ impl Bank { } } + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + loaded_programs_cache.latest_root_slot = self.slot(); + loaded_programs_cache.latest_root_epoch = self.epoch(); + loaded_programs_cache.environments.program_runtime_v1 = Arc::new( + create_program_runtime_environment_v1( + &self.feature_set, + &self.runtime_config.compute_budget.unwrap_or_default(), + false, /* deployment */ + false, /* debugging_features */ + ) + .unwrap(), + ); + loaded_programs_cache.environments.program_runtime_v2 = + Arc::new(create_program_runtime_environment_v2( + &self.runtime_config.compute_budget.unwrap_or_default(), + false, /* debugging_features */ + )); + if self .feature_set .is_active(&feature_set::cap_accounts_data_len::id()) @@ -7068,7 +6956,11 @@ impl Bank { .rc .accounts .accounts_db - .calculate_accounts_delta_hash_internal(slot, ignore); + .calculate_accounts_delta_hash_internal( + slot, + ignore, + std::mem::take(&mut self.skipped_rewrites.lock().unwrap()), + ); let mut signature_count_buf = [0u8; 8]; LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count()); @@ -7637,10 +7529,20 @@ impl Bank { }); let (verified_bank, verify_bank_time_us) = measure_us!({ - info!("Verifying bank..."); - let verified = self.verify_hash(); - info!("Verifying bank... Done."); - verified + let should_verify_bank = !self + .rc + .accounts + .accounts_db + .test_skip_rewrites_but_include_in_bank_hash; + if should_verify_bank { + info!("Verifying bank..."); + let verified = self.verify_hash(); + info!("Verifying bank... Done."); + verified + } else { + info!("Verifying bank... Skipped."); + true + } }); datapoint_info!( @@ -7933,6 +7835,11 @@ impl Bank { .is_active(&feature_set::prevent_rent_paying_rent_recipients::id()) } + pub fn validate_fee_collector_account(&self) -> bool { + self.feature_set + .is_active(&feature_set::validate_fee_collector_account::id()) + } + pub fn read_cost_tracker(&self) -> LockResult> { self.cost_tracker.read() } @@ -7966,10 +7873,14 @@ impl Bank { } pub fn fill_bank_with_ticks_for_tests(&self) { + self.do_fill_bank_with_ticks_for_tests(&BankWithScheduler::no_scheduler_available()) + } + + pub(crate) fn do_fill_bank_with_ticks_for_tests(&self, scheduler: &InstalledSchedulerRwLock) { if self.tick_height.load(Relaxed) < self.max_tick_height { let last_blockhash = self.last_blockhash(); while self.last_blockhash() == last_blockhash { - self.register_tick(&Hash::new_unique()) + self.register_tick(&Hash::new_unique(), scheduler) } } else { warn!("Bank already reached max tick height, cannot fill it with more ticks"); @@ -8041,24 +7952,6 @@ impl Bank { if new_feature_activations.contains(&feature_set::update_hashes_per_tick6::id()) { self.apply_updated_hashes_per_tick(UPDATED_HASHES_PER_TICK6); } - - if new_feature_activations.contains(&feature_set::programify_feature_gate_program::id()) { - let datapoint_name = "bank-progamify_feature_gate_program"; - if let Err(e) = replace_account::replace_empty_account_with_upgradeable_program( - self, - &feature::id(), - &inline_feature_gate_program::noop_program::id(), - datapoint_name, - ) { - warn!( - "{}: Failed to replace empty account {} with upgradeable program: {}", - datapoint_name, - feature::id(), - e - ); - datapoint_warn!(datapoint_name, ("slot", self.slot(), i64),); - } - } } fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) { @@ -8129,46 +8022,6 @@ impl Bank { only_apply_transitions_for_new_features: bool, new_feature_activations: &HashSet, ) { - const FEATURES_AFFECTING_RBPF: &[Pubkey] = &[ - feature_set::error_on_syscall_bpf_function_hash_collisions::id(), - feature_set::reject_callx_r10::id(), - feature_set::switch_to_new_elf_parser::id(), - feature_set::bpf_account_data_direct_mapping::id(), - feature_set::enable_alt_bn128_syscall::id(), - feature_set::enable_alt_bn128_compression_syscall::id(), - feature_set::enable_big_mod_exp_syscall::id(), - feature_set::blake3_syscall_enabled::id(), - feature_set::curve25519_syscall_enabled::id(), - feature_set::disable_fees_sysvar::id(), - feature_set::enable_partitioned_epoch_reward::id(), - feature_set::disable_deploy_of_alloc_free_syscall::id(), - feature_set::last_restart_slot_sysvar::id(), - feature_set::remaining_compute_units_syscall_enabled::id(), - ]; - if !only_apply_transitions_for_new_features - || FEATURES_AFFECTING_RBPF - .iter() - .any(|key| new_feature_activations.contains(key)) - { - let program_runtime_environment_v1 = create_program_runtime_environment_v1( - &self.feature_set, - &self.runtime_config.compute_budget.unwrap_or_default(), - false, /* deployment */ - false, /* debugging_features */ - ) - .unwrap(); - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.environments.program_runtime_v1 = - Arc::new(program_runtime_environment_v1); - let program_runtime_environment_v2 = - solana_loader_v4_program::create_program_runtime_environment_v2( - &self.runtime_config.compute_budget.unwrap_or_default(), - false, /* debugging_features */ - ); - loaded_programs_cache.environments.program_runtime_v2 = - Arc::new(program_runtime_environment_v2); - loaded_programs_cache.prune_feature_set_transition(); - } for builtin in BUILTINS.iter() { if let Some(feature_id) = builtin.feature_id { let should_apply_action_for_feature_transition = @@ -8200,6 +8053,42 @@ impl Bank { } } + /// Use to replace programs by feature activation + #[allow(dead_code)] + fn replace_program_account( + &mut self, + old_address: &Pubkey, + new_address: &Pubkey, + datapoint_name: &'static str, + ) { + if let Some(old_account) = self.get_account_with_fixed_root(old_address) { + if let Some(new_account) = self.get_account_with_fixed_root(new_address) { + datapoint_info!(datapoint_name, ("slot", self.slot, i64)); + + // Burn lamports in the old account + self.capitalization + .fetch_sub(old_account.lamports(), Relaxed); + + // Transfer new account to old account + self.store_account(old_address, &new_account); + + // Clear new account + self.store_account(new_address, &AccountSharedData::default()); + + // Unload a program from the bank's cache + self.loaded_programs_cache + .write() + .unwrap() + .remove_programs([*old_address].into_iter()); + + self.calculate_and_update_accounts_data_size_delta_off_chain( + old_account.data().len(), + new_account.data().len(), + ); + } + } + } + /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts()?; @@ -8339,6 +8228,7 @@ enum ApplyFeatureActivationsCaller { /// process later. #[derive(Debug, Default)] struct CollectRentFromAccountsInfo { + skipped_rewrites: Vec<(Pubkey, AccountHash)>, rent_collected_info: CollectedInfo, rent_rewards: Vec<(Pubkey, RewardInfo)>, time_collecting_rent_us: u64, @@ -8350,6 +8240,7 @@ struct CollectRentFromAccountsInfo { /// `collect_rent_in_partition()`—and then perform a reduce on all of them. #[derive(Debug, Default)] struct CollectRentInPartitionInfo { + skipped_rewrites: Vec<(Pubkey, AccountHash)>, rent_collected: u64, accounts_data_size_reclaimed: u64, rent_rewards: Vec<(Pubkey, RewardInfo)>, @@ -8365,6 +8256,7 @@ impl CollectRentInPartitionInfo { #[must_use] fn new(info: CollectRentFromAccountsInfo, time_loading_accounts: Duration) -> Self { Self { + skipped_rewrites: info.skipped_rewrites, rent_collected: info.rent_collected_info.rent_amount, accounts_data_size_reclaimed: info.rent_collected_info.account_data_len_reclaimed, rent_rewards: info.rent_rewards, @@ -8382,6 +8274,7 @@ impl CollectRentInPartitionInfo { #[must_use] fn reduce(lhs: Self, rhs: Self) -> Self { Self { + skipped_rewrites: [lhs.skipped_rewrites, rhs.skipped_rewrites].concat(), rent_collected: lhs.rent_collected.saturating_add(rhs.rent_collected), accounts_data_size_reclaimed: lhs .accounts_data_size_reclaimed @@ -8473,7 +8366,12 @@ pub mod test_utils { use { super::Bank, crate::installed_scheduler_pool::BankWithScheduler, - solana_sdk::{hash::hashv, pubkey::Pubkey}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + hash::hashv, + lamports::LamportsError, + pubkey::Pubkey, + }, solana_vote_program::vote_state::{self, BlockTimestamp, VoteStateVersions}, std::sync::Arc, }; @@ -8505,4 +8403,17 @@ pub mod test_utils { vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_pubkey, &vote_account); } + + pub fn deposit( + bank: &Bank, + pubkey: &Pubkey, + lamports: u64, + ) -> std::result::Result { + // This doesn't collect rents intentionally. + // Rents should only be applied to actual TXes + let mut account = bank.get_account_with_fixed_root(pubkey).unwrap_or_default(); + account.checked_add_lamports(lamports)?; + bank.store_account(pubkey, &account); + Ok(account.lamports()) + } } diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs new file mode 100644 index 00000000000000..e1d251c0bf478c --- /dev/null +++ b/runtime/src/bank/fee_distribution.rs @@ -0,0 +1,908 @@ +use { + super::Bank, + log::{debug, warn}, + solana_accounts_db::{account_rent_state::RentState, stake_rewards::RewardInfo}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + pubkey::Pubkey, + reward_type::RewardType, + system_program, + }, + solana_vote::vote_account::VoteAccountsHashMap, + std::{result::Result, sync::atomic::Ordering::Relaxed}, + thiserror::Error, +}; + +#[derive(Debug)] +struct DepositFeeOptions { + check_account_owner: bool, + check_rent_paying: bool, +} + +#[derive(Error, Debug, PartialEq)] +enum DepositFeeError { + #[error("fee account became rent paying")] + InvalidRentPayingAccount, + #[error("lamport overflow")] + LamportOverflow, + #[error("invalid fee account owner")] + InvalidAccountOwner, +} + +impl Bank { + // Distribute collected transaction fees for this slot to collector_id (= current leader). + // + // Each validator is incentivized to process more transactions to earn more transaction fees. + // Transaction fees are rewarded for the computing resource utilization cost, directly + // proportional to their actual processing power. + // + // collector_id is rotated according to stake-weighted leader schedule. So the opportunity of + // earning transaction fees are fairly distributed by stake. And missing the opportunity + // (not producing a block as a leader) earns nothing. So, being online is incentivized as a + // form of transaction fees as well. + // + // On the other hand, rent fees are distributed under slightly different philosophy, while + // still being stake-weighted. + // Ref: distribute_rent_to_validators + pub(super) fn distribute_transaction_fees(&self) { + let collector_fees = self.collector_fees.load(Relaxed); + if collector_fees != 0 { + let (deposit, mut burn) = self.fee_rate_governor.burn(collector_fees); + if deposit > 0 { + let validate_fee_collector = self.validate_fee_collector_account(); + match self.deposit_fees( + &self.collector_id, + deposit, + DepositFeeOptions { + check_account_owner: validate_fee_collector, + check_rent_paying: validate_fee_collector, + }, + ) { + Ok(post_balance) => { + self.rewards.write().unwrap().push(( + self.collector_id, + RewardInfo { + reward_type: RewardType::Fee, + lamports: deposit as i64, + post_balance, + commission: None, + }, + )); + } + Err(err) => { + debug!( + "Burned {} lamport tx fee instead of sending to {} due to {}", + deposit, self.collector_id, err + ); + datapoint_warn!( + "bank-burned_fee", + ("slot", self.slot(), i64), + ("num_lamports", deposit, i64), + ("error", err.to_string(), String), + ); + burn += deposit; + } + } + } + self.capitalization.fetch_sub(burn, Relaxed); + } + } + + // Deposits fees into a specified account and if successful, returns the new balance of that account + fn deposit_fees( + &self, + pubkey: &Pubkey, + fees: u64, + options: DepositFeeOptions, + ) -> Result { + let mut account = self.get_account_with_fixed_root(pubkey).unwrap_or_default(); + + if options.check_account_owner && !system_program::check_id(account.owner()) { + return Err(DepositFeeError::InvalidAccountOwner); + } + + let rent = self.rent_collector().rent; + let recipient_pre_rent_state = RentState::from_account(&account, &rent); + let distribution = account.checked_add_lamports(fees); + if distribution.is_err() { + return Err(DepositFeeError::LamportOverflow); + } + if options.check_rent_paying { + let recipient_post_rent_state = RentState::from_account(&account, &rent); + let rent_state_transition_allowed = + recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); + if !rent_state_transition_allowed { + return Err(DepositFeeError::InvalidRentPayingAccount); + } + } + + self.store_account(pubkey, &account); + Ok(account.lamports()) + } + + // Distribute collected rent fees for this slot to staked validators (excluding stakers) + // according to stake. + // + // The nature of rent fee is the cost of doing business, every validator has to hold (or have + // access to) the same list of accounts, so we pay according to stake, which is a rough proxy for + // value to the network. + // + // Currently, rent distribution doesn't consider given validator's uptime at all (this might + // change). That's because rent should be rewarded for the storage resource utilization cost. + // It's treated differently from transaction fees, which is for the computing resource + // utilization cost. + // + // We can't use collector_id (which is rotated according to stake-weighted leader schedule) + // as an approximation to the ideal rent distribution to simplify and avoid this per-slot + // computation for the distribution (time: N log N, space: N acct. stores; N = # of + // validators). + // The reason is that rent fee doesn't need to be incentivized for throughput unlike transaction + // fees + // + // Ref: distribute_transaction_fees + #[allow(clippy::needless_collect)] + fn distribute_rent_to_validators( + &self, + vote_accounts: &VoteAccountsHashMap, + rent_to_be_distributed: u64, + ) { + let mut total_staked = 0; + + // Collect the stake associated with each validator. + // Note that a validator may be present in this vector multiple times if it happens to have + // more than one staked vote account somehow + let mut validator_stakes = vote_accounts + .iter() + .filter_map(|(_vote_pubkey, (staked, account))| { + if *staked == 0 { + None + } else { + total_staked += *staked; + Some((account.node_pubkey()?, *staked)) + } + }) + .collect::>(); + + #[cfg(test)] + if validator_stakes.is_empty() { + // some tests bank.freezes() with bad staking state + self.capitalization + .fetch_sub(rent_to_be_distributed, Relaxed); + return; + } + #[cfg(not(test))] + assert!(!validator_stakes.is_empty()); + + // Sort first by stake and then by validator identity pubkey for determinism. + // If two items are still equal, their relative order does not matter since + // both refer to the same validator. + validator_stakes.sort_unstable_by(|(pubkey1, staked1), (pubkey2, staked2)| { + (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() + }); + + let enforce_fix = self.no_overflow_rent_distribution_enabled(); + + let mut rent_distributed_in_initial_round = 0; + let validator_rent_shares = validator_stakes + .into_iter() + .map(|(pubkey, staked)| { + let rent_share = if !enforce_fix { + (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 + } else { + (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) + .try_into() + .unwrap() + }; + rent_distributed_in_initial_round += rent_share; + (pubkey, rent_share) + }) + .collect::>(); + + // Leftover lamports after fraction calculation, will be paid to validators starting from highest stake + // holder + let mut leftover_lamports = rent_to_be_distributed - rent_distributed_in_initial_round; + + let mut rent_to_burn: u64 = 0; + let mut rewards = vec![]; + validator_rent_shares + .into_iter() + .for_each(|(pubkey, rent_share)| { + let rent_to_be_paid = if leftover_lamports > 0 { + leftover_lamports -= 1; + rent_share + 1 + } else { + rent_share + }; + if !enforce_fix || rent_to_be_paid > 0 { + let check_account_owner = self.validate_fee_collector_account(); + let check_rent_paying = self.prevent_rent_paying_rent_recipients(); + match self.deposit_fees( + &pubkey, + rent_to_be_paid, + DepositFeeOptions { + check_account_owner, + check_rent_paying, + }, + ) { + Ok(post_balance) => { + rewards.push(( + pubkey, + RewardInfo { + reward_type: RewardType::Rent, + lamports: rent_to_be_paid as i64, + post_balance, + commission: None, + }, + )); + } + Err(err) => { + debug!( + "Burned {} lamport rent fee instead of sending to {} due to {}", + rent_to_be_paid, pubkey, err + ); + + // overflow adding lamports or resulting account is invalid + // so burn lamports and track lamports burned per slot + rent_to_burn = rent_to_burn.saturating_add(rent_to_be_paid); + } + } + } + }); + self.rewards.write().unwrap().append(&mut rewards); + + if rent_to_burn > 0 { + self.capitalization.fetch_sub(rent_to_burn, Relaxed); + datapoint_warn!( + "bank-burned_rent", + ("slot", self.slot(), i64), + ("num_lamports", rent_to_burn, i64) + ); + } + + if enforce_fix { + assert_eq!(leftover_lamports, 0); + } else if leftover_lamports != 0 { + warn!( + "There was leftover from rent distribution: {}", + leftover_lamports + ); + self.capitalization.fetch_sub(leftover_lamports, Relaxed); + } + } + + pub(super) fn distribute_rent_fees(&self) { + let total_rent_collected = self.collected_rent.load(Relaxed); + + let (burned_portion, rent_to_be_distributed) = self + .rent_collector + .rent + .calculate_burn(total_rent_collected); + + debug!( + "distributed rent: {} (rounded from: {}, burned: {})", + rent_to_be_distributed, total_rent_collected, burned_portion + ); + self.capitalization.fetch_sub(burned_portion, Relaxed); + + if rent_to_be_distributed == 0 { + return; + } + + self.distribute_rent_to_validators(&self.vote_accounts(), rent_to_be_distributed); + } +} + +#[cfg(test)] +pub mod tests { + use { + super::*, + crate::genesis_utils::{ + create_genesis_config, create_genesis_config_with_leader, + create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, + }, + log::info, + solana_sdk::{ + account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, + rent::Rent, signature::Signer, + }, + }; + + #[test] + fn test_distribute_transaction_fees() { + #[derive(PartialEq)] + enum Scenario { + Normal, + InvalidOwner, + RentPaying, + } + + struct TestCase { + scenario: Scenario, + disable_checks: bool, + } + + impl TestCase { + fn new(scenario: Scenario, disable_checks: bool) -> Self { + Self { + scenario, + disable_checks, + } + } + } + + for test_case in [ + TestCase::new(Scenario::Normal, false), + TestCase::new(Scenario::Normal, true), + TestCase::new(Scenario::InvalidOwner, false), + TestCase::new(Scenario::InvalidOwner, true), + TestCase::new(Scenario::RentPaying, false), + TestCase::new(Scenario::RentPaying, true), + ] { + let mut genesis = create_genesis_config(0); + if test_case.disable_checks { + genesis + .genesis_config + .accounts + .remove(&feature_set::validate_fee_collector_account::id()) + .unwrap(); + } + let rent = Rent::default(); + let min_rent_exempt_balance = rent.minimum_balance(0); + genesis.genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + let (expected_collected_fees, burn_amount) = + bank.fee_rate_governor.burn(transaction_fees); + assert!(burn_amount > 0); + + if test_case.scenario == Scenario::RentPaying { + // ensure that account balance + collected fees will make it rent-paying + let initial_balance = 100; + let account = AccountSharedData::new(initial_balance, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + assert!(initial_balance + transaction_fees < min_rent_exempt_balance); + } else if test_case.scenario == Scenario::InvalidOwner { + // ensure that account owner is invalid and fee distribution will fail + let account = + AccountSharedData::new(min_rent_exempt_balance, 0, &Pubkey::new_unique()); + bank.store_account(bank.collector_id(), &account); + } else { + let account = + AccountSharedData::new(min_rent_exempt_balance, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + } + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + if test_case.scenario != Scenario::Normal && !test_case.disable_checks { + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } else { + assert_eq!( + initial_collector_id_balance + expected_collected_fees, + new_collector_id_balance + ); + + assert_eq!(initial_capitalization - burn_amount, bank.capitalization()); + + let locked_rewards = bank.rewards.read().unwrap(); + assert_eq!( + locked_rewards.len(), + 1, + "There should be one reward distributed" + ); + + let reward_info = &locked_rewards[0]; + assert_eq!( + reward_info.1.lamports, expected_collected_fees as i64, + "The reward amount should match the expected deposit" + ); + assert_eq!( + reward_info.1.reward_type, + RewardType::Fee, + "The reward type should be Fee" + ); + } + } + } + + #[test] + fn test_distribute_transaction_fees_zero() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + assert_eq!(bank.collector_fees.load(Relaxed), 0); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!(initial_capitalization, bank.capitalization()); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_distribute_transaction_fees_burn_all() { + let mut genesis = create_genesis_config(0); + genesis.genesis_config.fee_rate_governor.burn_percent = 100; + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_distribute_transaction_fees_overflow_failure() { + let genesis = create_genesis_config(0); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let transaction_fees = 100; + bank.collector_fees.fetch_add(transaction_fees, Relaxed); + assert_eq!(transaction_fees, bank.collector_fees.load(Relaxed)); + + // ensure that account balance will overflow and fee distribution will fail + let account = AccountSharedData::new(u64::MAX, 0, &system_program::id()); + bank.store_account(bank.collector_id(), &account); + + let initial_capitalization = bank.capitalization(); + let initial_collector_id_balance = bank.get_balance(bank.collector_id()); + bank.distribute_transaction_fees(); + let new_collector_id_balance = bank.get_balance(bank.collector_id()); + + assert_eq!(initial_collector_id_balance, new_collector_id_balance); + assert_eq!( + initial_capitalization - transaction_fees, + bank.capitalization() + ); + let locked_rewards = bank.rewards.read().unwrap(); + assert!( + locked_rewards.is_empty(), + "There should be no rewards distributed" + ); + } + + #[test] + fn test_deposit_fees() { + let initial_balance = 1_000_000_000; + let genesis = create_genesis_config(initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.mint_keypair.pubkey(); + + let deposit_amount = 500; + let options = DepositFeeOptions { + check_account_owner: true, + check_rent_paying: true, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + + #[test] + fn test_deposit_fees_with_overflow() { + let initial_balance = u64::MAX; + let genesis = create_genesis_config(initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.mint_keypair.pubkey(); + + let deposit_amount = 500; + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::LamportOverflow), + "Expected an error due to lamport overflow" + ); + } + + #[test] + fn test_deposit_fees_invalid_account_owner() { + let initial_balance = 1000; + let genesis = create_genesis_config_with_leader(0, &pubkey::new_rand(), initial_balance); + let bank = Bank::new_for_tests(&genesis.genesis_config); + let pubkey = genesis.voting_keypair.pubkey(); + + let deposit_amount = 500; + + // enable check_account_owner + { + let options = DepositFeeOptions { + check_account_owner: true, // Intentionally checking for account owner + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::InvalidAccountOwner), + "Expected an error due to invalid account owner" + ); + } + + // disable check_account_owner + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + } + + #[test] + fn test_deposit_fees_invalid_rent_paying() { + let initial_balance = 0; + let genesis = create_genesis_config(initial_balance); + let pubkey = genesis.mint_keypair.pubkey(); + let mut genesis_config = genesis.genesis_config; + let rent = Rent::default(); + genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + let bank = Bank::new_for_tests(&genesis_config); + let min_rent_exempt_balance = rent.minimum_balance(0); + + let deposit_amount = 500; + assert!(initial_balance + deposit_amount < min_rent_exempt_balance); + + // enable check_rent_paying + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: true, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Err(DepositFeeError::InvalidRentPayingAccount), + "Expected an error due to invalid rent paying account" + ); + } + + // disable check_rent_paying + { + let options = DepositFeeOptions { + check_account_owner: false, + check_rent_paying: false, + }; + + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount, options), + Ok(initial_balance + deposit_amount), + "New balance should be the sum of the initial balance and deposit amount" + ); + } + } + + #[test] + fn test_distribute_rent_to_validators_overflow() { + solana_logger::setup(); + + // These values are taken from the real cluster (testnet) + const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; + const VALIDATOR_STAKE: u64 = 374_999_998_287_840; + + let validator_pubkey = solana_sdk::pubkey::new_rand(); + let mut genesis_config = + create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE) + .genesis_config; + + let bank = Bank::new_for_tests(&genesis_config); + let old_validator_lamports = bank.get_balance(&validator_pubkey); + bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); + let new_validator_lamports = bank.get_balance(&validator_pubkey); + assert_eq!( + new_validator_lamports, + old_validator_lamports + RENT_TO_BE_DISTRIBUTED + ); + + genesis_config + .accounts + .remove(&feature_set::no_overflow_rent_distribution::id()) + .unwrap(); + let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); + let old_validator_lamports = bank.get_balance(&validator_pubkey); + let new_validator_lamports = std::panic::catch_unwind(|| { + bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); + bank.get_balance(&validator_pubkey) + }); + + if let Ok(new_validator_lamports) = new_validator_lamports { + info!("asserting overflowing incorrect rent distribution"); + assert_ne!( + new_validator_lamports, + old_validator_lamports + RENT_TO_BE_DISTRIBUTED + ); + } else { + info!("NOT-asserting overflowing incorrect rent distribution"); + } + } + + #[test] + fn test_distribute_rent_to_validators_rent_paying() { + solana_logger::setup(); + + const RENT_PER_VALIDATOR: u64 = 55; + const TOTAL_RENT: u64 = RENT_PER_VALIDATOR * 4; + + let empty_validator = ValidatorVoteKeypairs::new_rand(); + let rent_paying_validator = ValidatorVoteKeypairs::new_rand(); + let becomes_rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); + let rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); + let keypairs = vec![ + &empty_validator, + &rent_paying_validator, + &becomes_rent_exempt_validator, + &rent_exempt_validator, + ]; + let genesis_config_info = create_genesis_config_with_vote_accounts( + sol_to_lamports(1000.), + &keypairs, + vec![sol_to_lamports(1000.); 4], + ); + let mut genesis_config = genesis_config_info.genesis_config; + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + + for deactivate_feature in [false, true] { + if deactivate_feature { + genesis_config + .accounts + .remove(&feature_set::prevent_rent_paying_rent_recipients::id()) + .unwrap(); + } + let bank = Bank::new_for_tests(&genesis_config); + let rent = bank.rent_collector().rent; + let rent_exempt_minimum = rent.minimum_balance(0); + + // Make one validator have an empty identity account + let mut empty_validator_account = bank + .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) + .unwrap(); + empty_validator_account.set_lamports(0); + bank.store_account( + &empty_validator.node_keypair.pubkey(), + &empty_validator_account, + ); + + // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR + let mut becomes_rent_exempt_validator_account = bank + .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + becomes_rent_exempt_validator_account + .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); + bank.store_account( + &becomes_rent_exempt_validator.node_keypair.pubkey(), + &becomes_rent_exempt_validator_account, + ); + + // Make one validator rent-exempt + let mut rent_exempt_validator_account = bank + .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) + .unwrap(); + rent_exempt_validator_account.set_lamports(rent_exempt_minimum); + bank.store_account( + &rent_exempt_validator.node_keypair.pubkey(), + &rent_exempt_validator_account, + ); + + let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { + let account = bank + .get_account_with_fixed_root(address) + .unwrap_or_default(); + RentState::from_account(&account, &rent) + }; + + // Assert starting RentStates + assert_eq!( + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + RentState::Uninitialized + ); + assert_eq!( + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: 42, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + RentState::RentPaying { + lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, + data_size: 0, + } + ); + assert_eq!( + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + RentState::RentExempt + ); + + let old_empty_validator_lamports = + bank.get_balance(&empty_validator.node_keypair.pubkey()); + let old_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let old_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let old_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); + + bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); + + let new_empty_validator_lamports = + bank.get_balance(&empty_validator.node_keypair.pubkey()); + let new_rent_paying_validator_lamports = + bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); + let new_becomes_rent_exempt_validator_lamports = + bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); + let new_rent_exempt_validator_lamports = + bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); + + // Assert ending balances; rent should be withheld if test is active and ending RentState + // is RentPaying, ie. empty_validator and rent_paying_validator + assert_eq!( + if deactivate_feature { + old_empty_validator_lamports + RENT_PER_VALIDATOR + } else { + old_empty_validator_lamports + }, + new_empty_validator_lamports + ); + + assert_eq!( + if deactivate_feature { + old_rent_paying_validator_lamports + RENT_PER_VALIDATOR + } else { + old_rent_paying_validator_lamports + }, + new_rent_paying_validator_lamports + ); + + assert_eq!( + old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_becomes_rent_exempt_validator_lamports + ); + + assert_eq!( + old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, + new_rent_exempt_validator_lamports + ); + + // Assert ending RentStates + assert_eq!( + if deactivate_feature { + RentState::RentPaying { + lamports: RENT_PER_VALIDATOR, + data_size: 0, + } + } else { + RentState::Uninitialized + }, + get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), + ); + assert_eq!( + if deactivate_feature { + RentState::RentPaying { + lamports: old_rent_paying_validator_lamports + RENT_PER_VALIDATOR, + data_size: 0, + } + } else { + RentState::RentPaying { + lamports: old_rent_paying_validator_lamports, + data_size: 0, + } + }, + get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), + ); + assert_eq!( + RentState::RentExempt, + get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), + ); + } + } + + #[test] + fn test_distribute_rent_to_validators_invalid_owner() { + struct TestCase { + disable_owner_check: bool, + use_invalid_owner: bool, + } + + impl TestCase { + fn new(disable_owner_check: bool, use_invalid_owner: bool) -> Self { + Self { + disable_owner_check, + use_invalid_owner, + } + } + } + + for test_case in [ + TestCase::new(false, false), + TestCase::new(false, true), + TestCase::new(true, false), + TestCase::new(true, true), + ] { + let genesis_config_info = + create_genesis_config_with_leader(0, &Pubkey::new_unique(), 100); + let mut genesis_config = genesis_config_info.genesis_config; + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + + if test_case.disable_owner_check { + genesis_config + .accounts + .remove(&feature_set::validate_fee_collector_account::id()) + .unwrap(); + } + let bank = Bank::new_for_tests(&genesis_config); + + let initial_balance = 1_000_000; + let account_owner = if test_case.use_invalid_owner { + Pubkey::new_unique() + } else { + system_program::id() + }; + let account = AccountSharedData::new(initial_balance, 0, &account_owner); + bank.store_account(bank.collector_id(), &account); + + let initial_capitalization = bank.capitalization(); + let rent_fees = 100; + bank.distribute_rent_to_validators(&bank.vote_accounts(), rent_fees); + let new_capitalization = bank.capitalization(); + let new_balance = bank.get_balance(bank.collector_id()); + + if test_case.use_invalid_owner && !test_case.disable_owner_check { + assert_eq!(initial_balance, new_balance); + assert_eq!(initial_capitalization - rent_fees, new_capitalization); + assert_eq!(bank.rewards.read().unwrap().len(), 0); + } else { + assert_eq!(initial_balance + rent_fees, new_balance); + assert_eq!(initial_capitalization, new_capitalization); + assert_eq!(bank.rewards.read().unwrap().len(), 1); + } + } + } +} diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index 1fa33b2e7f92ee..ccf8c4837761db 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -39,6 +39,7 @@ pub(crate) struct NewBankTimings { pub(crate) feature_set_time_us: u64, pub(crate) ancestors_time_us: u64, pub(crate) update_epoch_time_us: u64, + pub(crate) recompilation_time_us: u64, pub(crate) update_sysvars_time_us: u64, pub(crate) fill_sysvar_cache_time_us: u64, } @@ -144,6 +145,7 @@ pub(crate) fn report_new_bank_metrics( ("feature_set_us", timings.feature_set_time_us, i64), ("ancestors_us", timings.ancestors_time_us, i64), ("update_epoch_us", timings.update_epoch_time_us, i64), + ("recompilation_time_us", timings.recompilation_time_us, i64), ("update_sysvars_us", timings.update_sysvars_time_us, i64), ( "fill_sysvar_cache_us", diff --git a/runtime/src/bank/replace_account.rs b/runtime/src/bank/replace_account.rs deleted file mode 100644 index 8d650aeebe7e87..00000000000000 --- a/runtime/src/bank/replace_account.rs +++ /dev/null @@ -1,191 +0,0 @@ -use { - super::Bank, - log::*, - solana_accounts_db::accounts_index::ZeroLamport, - solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - pubkey::Pubkey, - }, - std::sync::atomic::Ordering::Relaxed, - thiserror::Error, -}; - -/// Errors returned by `replace_account` methods -#[derive(Debug, Error)] -pub enum ReplaceAccountError { - /// Account not found - #[error("Account not found: {0:?}")] - AccountNotFound(Pubkey), - /// Account exists - #[error("Account exists: {0:?}")] - AccountExists(Pubkey), - #[error("Bincode Error: {0}")] - BincodeError(#[from] bincode::Error), - /// Not an upgradeable program - #[error("Not an upgradeable program")] - NotAnUpgradeableProgram, -} - -/// Moves one account in place of another -/// `source`: the account to replace with -/// `destination`: the account to be replaced -fn move_account( - bank: &Bank, - source_address: &Pubkey, - source_account: &V, - destination_address: &Pubkey, - destination_account: Option<&U>, -) where - U: ReadableAccount + Sync + ZeroLamport, - V: ReadableAccount + Sync + ZeroLamport, -{ - let (destination_lamports, destination_len) = match destination_account { - Some(destination_account) => ( - destination_account.lamports(), - destination_account.data().len(), - ), - None => (0, 0), - }; - - // Burn lamports in the destination account - bank.capitalization.fetch_sub(destination_lamports, Relaxed); - - // Transfer source account to destination account - bank.store_account(destination_address, source_account); - - // Clear source account - bank.store_account(source_address, &AccountSharedData::default()); - - bank.calculate_and_update_accounts_data_size_delta_off_chain( - destination_len, - source_account.data().len(), - ); -} - -/// Use to replace non-upgradeable programs by feature activation -/// `source`: the non-upgradeable program account to replace with -/// `destination`: the non-upgradeable program account to be replaced -#[allow(dead_code)] -pub(crate) fn replace_non_upgradeable_program_account( - bank: &Bank, - source_address: &Pubkey, - destination_address: &Pubkey, - datapoint_name: &'static str, -) -> Result<(), ReplaceAccountError> { - let destination_account = bank - .get_account_with_fixed_root(destination_address) - .ok_or(ReplaceAccountError::AccountNotFound(*destination_address))?; - let source_account = bank - .get_account_with_fixed_root(source_address) - .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; - - datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); - - move_account( - bank, - source_address, - &source_account, - destination_address, - Some(&destination_account), - ); - - // Unload a program from the bank's cache - bank.loaded_programs_cache - .write() - .unwrap() - .remove_programs([*destination_address].into_iter()); - - Ok(()) -} - -/// Use to replace an empty account with a program by feature activation -/// Note: The upgradeable program should have both: -/// - Program account -/// - Program data account -/// `source`: the upgradeable program account to replace with -/// `destination`: the empty account to be replaced -pub(crate) fn replace_empty_account_with_upgradeable_program( - bank: &Bank, - source_address: &Pubkey, - destination_address: &Pubkey, - datapoint_name: &'static str, -) -> Result<(), ReplaceAccountError> { - // Must be attempting to replace an empty account with a program - // account _and_ data account - let source_account = bank - .get_account_with_fixed_root(source_address) - .ok_or(ReplaceAccountError::AccountNotFound(*source_address))?; - - let (destination_data_address, _) = Pubkey::find_program_address( - &[destination_address.as_ref()], - &bpf_loader_upgradeable::id(), - ); - let (source_data_address, _) = - Pubkey::find_program_address(&[source_address.as_ref()], &bpf_loader_upgradeable::id()); - - // Make sure the data within the source account is the PDA of its - // data account. This also means it has at least the necessary - // lamports for rent. - let source_state = bincode::deserialize::(source_account.data())?; - if !matches!(source_state, UpgradeableLoaderState::Program { .. }) { - return Err(ReplaceAccountError::NotAnUpgradeableProgram); - } - - let source_data_account = bank - .get_account_with_fixed_root(&source_data_address) - .ok_or(ReplaceAccountError::AccountNotFound(source_data_address))?; - - // Make sure the destination account is empty - // We aren't going to check that there isn't a data account at - // the known program-derived address (ie. `destination_data_address`), - // because if it exists, it will be overwritten - if bank - .get_account_with_fixed_root(destination_address) - .is_some() - { - return Err(ReplaceAccountError::AccountExists(*destination_address)); - } - let state = UpgradeableLoaderState::Program { - programdata_address: destination_data_address, - }; - let data = bincode::serialize(&state)?; - let lamports = bank.get_minimum_balance_for_rent_exemption(data.len()); - let created_program_account = Account { - lamports, - data, - owner: bpf_loader_upgradeable::id(), - executable: true, - rent_epoch: source_account.rent_epoch(), - }; - - datapoint_info!(datapoint_name, ("slot", bank.slot, i64)); - let change_in_capitalization = source_account.lamports().saturating_sub(lamports); - - // Replace the destination data account with the source one - // If the destination data account does not exist, it will be created - // If it does exist, it will be overwritten - move_account( - bank, - &source_data_address, - &source_data_account, - &destination_data_address, - bank.get_account_with_fixed_root(&destination_data_address) - .as_ref(), - ); - - // Write the source data account's PDA into the destination program account - move_account( - bank, - source_address, - &created_program_account, - destination_address, - None::<&AccountSharedData>, - ); - - // Any remaining lamports in the source program account are burnt - bank.capitalization - .fetch_sub(change_in_capitalization, Relaxed); - - Ok(()) -} diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 17bba5638f2d47..e1746c52b79f75 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -3,8 +3,8 @@ mod tests { use { crate::{ bank::{ - epoch_accounts_hash_utils, Bank, BankTestConfig, EpochRewardStatus, - StartBlockHeightAndRewards, + epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, BankTestConfig, + EpochRewardStatus, StartBlockHeightAndRewards, }, genesis_utils::{activate_all_features, activate_feature}, runtime_config::RuntimeConfig, @@ -109,7 +109,7 @@ mod tests { // Create an account on a non-root fork let key1 = Keypair::new(); - bank1.deposit(&key1.pubkey(), 5).unwrap(); + bank_test_utils::deposit(&bank1, &key1.pubkey(), 5).unwrap(); // If setting an initial EAH, then the bank being snapshotted must be in the EAH calculation // window. Otherwise `bank_to_stream()` below will *not* include the EAH in the bank snapshot, @@ -123,11 +123,11 @@ mod tests { // Test new account let key2 = Keypair::new(); - bank2.deposit(&key2.pubkey(), 10).unwrap(); + bank_test_utils::deposit(&bank2, &key2.pubkey(), 10).unwrap(); assert_eq!(bank2.get_balance(&key2.pubkey()), 10); let key3 = Keypair::new(); - bank2.deposit(&key3.pubkey(), 0).unwrap(); + bank_test_utils::deposit(&bank2, &key3.pubkey(), 0).unwrap(); bank2.freeze(); bank2.squash(); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 343e87975b57a5..cddac40fe3761f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -7,13 +7,7 @@ use { *, }, crate::{ - accounts_background_service::{ - AbsRequestSender, PrunedBanksRequestHandler, SendDroppedBankCallback, - }, - bank::replace_account::{ - replace_empty_account_with_upgradeable_program, - replace_non_upgradeable_program_account, ReplaceAccountError, - }, + accounts_background_service::{PrunedBanksRequestHandler, SendDroppedBankCallback}, bank_client::BankClient, bank_forks::BankForks, epoch_rewards_hasher::hash_rewards_into_partitions, @@ -46,7 +40,8 @@ use { }, solana_logger, solana_program_runtime::{ - compute_budget::{self, ComputeBudget, MAX_COMPUTE_UNIT_LIMIT}, + compute_budget::ComputeBudget, + compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, @@ -277,7 +272,7 @@ fn test_bank_new() { assert_eq!(rent.lamports_per_byte_year, 5); } -fn create_simple_test_bank(lamports: u64) -> Bank { +pub(crate) fn create_simple_test_bank(lamports: u64) -> Bank { let (genesis_config, _mint_keypair) = create_genesis_config(lamports); Bank::new_for_tests(&genesis_config) } @@ -979,232 +974,6 @@ fn test_rent_distribution() { ); } -#[test] -fn test_distribute_rent_to_validators_overflow() { - solana_logger::setup(); - - // These values are taken from the real cluster (testnet) - const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; - const VALIDATOR_STAKE: u64 = 374_999_998_287_840; - - let validator_pubkey = solana_sdk::pubkey::new_rand(); - let mut genesis_config = - create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE).genesis_config; - - let bank = Bank::new_for_tests(&genesis_config); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - let new_validator_lamports = bank.get_balance(&validator_pubkey); - assert_eq!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - - genesis_config - .accounts - .remove(&feature_set::no_overflow_rent_distribution::id()) - .unwrap(); - let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - let new_validator_lamports = std::panic::catch_unwind(|| { - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - bank.get_balance(&validator_pubkey) - }); - - if let Ok(new_validator_lamports) = new_validator_lamports { - info!("asserting overflowing incorrect rent distribution"); - assert_ne!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - } else { - info!("NOT-asserting overflowing incorrect rent distribution"); - } -} - -#[test] -fn test_distribute_rent_to_validators_rent_paying() { - solana_logger::setup(); - - const RENT_PER_VALIDATOR: u64 = 55; - const TOTAL_RENT: u64 = RENT_PER_VALIDATOR * 4; - - let empty_validator = ValidatorVoteKeypairs::new_rand(); - let rent_paying_validator = ValidatorVoteKeypairs::new_rand(); - let becomes_rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let rent_exempt_validator = ValidatorVoteKeypairs::new_rand(); - let keypairs = vec![ - &empty_validator, - &rent_paying_validator, - &becomes_rent_exempt_validator, - &rent_exempt_validator, - ]; - let genesis_config_info = create_genesis_config_with_vote_accounts( - sol_to_lamports(1000.), - &keypairs, - vec![sol_to_lamports(1000.); 4], - ); - let mut genesis_config = genesis_config_info.genesis_config; - genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - - for deactivate_feature in [false, true] { - if deactivate_feature { - genesis_config - .accounts - .remove(&feature_set::prevent_rent_paying_rent_recipients::id()) - .unwrap(); - } - let bank = Bank::new_for_tests(&genesis_config); - let rent = bank.rent_collector().rent; - let rent_exempt_minimum = rent.minimum_balance(0); - - // Make one validator have an empty identity account - let mut empty_validator_account = bank - .get_account_with_fixed_root(&empty_validator.node_keypair.pubkey()) - .unwrap(); - empty_validator_account.set_lamports(0); - bank.store_account( - &empty_validator.node_keypair.pubkey(), - &empty_validator_account, - ); - - // Make one validator almost rent-exempt, less RENT_PER_VALIDATOR - let mut becomes_rent_exempt_validator_account = bank - .get_account_with_fixed_root(&becomes_rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - becomes_rent_exempt_validator_account - .set_lamports(rent_exempt_minimum - RENT_PER_VALIDATOR); - bank.store_account( - &becomes_rent_exempt_validator.node_keypair.pubkey(), - &becomes_rent_exempt_validator_account, - ); - - // Make one validator rent-exempt - let mut rent_exempt_validator_account = bank - .get_account_with_fixed_root(&rent_exempt_validator.node_keypair.pubkey()) - .unwrap(); - rent_exempt_validator_account.set_lamports(rent_exempt_minimum); - bank.store_account( - &rent_exempt_validator.node_keypair.pubkey(), - &rent_exempt_validator_account, - ); - - let get_rent_state = |bank: &Bank, address: &Pubkey| -> RentState { - let account = bank - .get_account_with_fixed_root(address) - .unwrap_or_default(); - RentState::from_account(&account, &rent) - }; - - // Assert starting RentStates - assert_eq!( - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - RentState::Uninitialized - ); - assert_eq!( - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: 42, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - RentState::RentPaying { - lamports: rent_exempt_minimum - RENT_PER_VALIDATOR, - data_size: 0, - } - ); - assert_eq!( - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - RentState::RentExempt - ); - - let old_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let old_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let old_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let old_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - bank.distribute_rent_to_validators(&bank.vote_accounts(), TOTAL_RENT); - - let new_empty_validator_lamports = bank.get_balance(&empty_validator.node_keypair.pubkey()); - let new_rent_paying_validator_lamports = - bank.get_balance(&rent_paying_validator.node_keypair.pubkey()); - let new_becomes_rent_exempt_validator_lamports = - bank.get_balance(&becomes_rent_exempt_validator.node_keypair.pubkey()); - let new_rent_exempt_validator_lamports = - bank.get_balance(&rent_exempt_validator.node_keypair.pubkey()); - - // Assert ending balances; rent should be withheld if test is active and ending RentState - // is RentPaying, ie. empty_validator and rent_paying_validator - assert_eq!( - if deactivate_feature { - old_empty_validator_lamports + RENT_PER_VALIDATOR - } else { - old_empty_validator_lamports - }, - new_empty_validator_lamports - ); - - assert_eq!( - if deactivate_feature { - old_rent_paying_validator_lamports + RENT_PER_VALIDATOR - } else { - old_rent_paying_validator_lamports - }, - new_rent_paying_validator_lamports - ); - - assert_eq!( - old_becomes_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_becomes_rent_exempt_validator_lamports - ); - - assert_eq!( - old_rent_exempt_validator_lamports + RENT_PER_VALIDATOR, - new_rent_exempt_validator_lamports - ); - - // Assert ending RentStates - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::Uninitialized - }, - get_rent_state(&bank, &empty_validator.node_keypair.pubkey()), - ); - assert_eq!( - if deactivate_feature { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports + RENT_PER_VALIDATOR, - data_size: 0, - } - } else { - RentState::RentPaying { - lamports: old_rent_paying_validator_lamports, - data_size: 0, - } - }, - get_rent_state(&bank, &rent_paying_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &becomes_rent_exempt_validator.node_keypair.pubkey()), - ); - assert_eq!( - RentState::RentExempt, - get_rent_state(&bank, &rent_exempt_validator.node_keypair.pubkey()), - ); - } -} - #[test] fn test_rent_exempt_executable_account() { let (mut genesis_config, mint_keypair) = create_genesis_config(100_000); @@ -2633,22 +2402,6 @@ fn test_transfer_to_sysvar() { assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); } -#[test] -fn test_bank_deposit() { - let bank = create_simple_test_bank(100); - - // Test new account - let key = solana_sdk::pubkey::new_rand(); - let new_balance = bank.deposit(&key, 10).unwrap(); - assert_eq!(new_balance, 10); - assert_eq!(bank.get_balance(&key), 10); - - // Existing account - let new_balance = bank.deposit(&key, 3).unwrap(); - assert_eq!(new_balance, 13); - assert_eq!(bank.get_balance(&key), 13); -} - #[test] fn test_bank_withdraw() { let bank = create_simple_test_bank(100); @@ -2660,7 +2413,7 @@ fn test_bank_withdraw() { Err(TransactionError::AccountNotFound) ); - bank.deposit(&key, 3).unwrap(); + test_utils::deposit(&bank, &key, 3).unwrap(); assert_eq!(bank.get_balance(&key), 3); // Low balance @@ -6679,7 +6432,7 @@ fn test_clean_nonrooted() { // Store some lamports in bank 1 let some_lamports = 123; let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); - bank1.deposit(&pubkey0, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey0, some_lamports).unwrap(); goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.flush_accounts_cache_slot_for_tests(); @@ -6689,7 +6442,7 @@ fn test_clean_nonrooted() { // Store some lamports for pubkey1 in bank 2, root bank 2 // bank2's parent is bank0 let bank2 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 2)); - bank2.deposit(&pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank2, &pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account_zero); goto_end_of_slot(bank2.clone()); bank2.freeze(); @@ -6704,7 +6457,7 @@ fn test_clean_nonrooted() { bank2.clean_accounts_for_tests(); let bank3 = Arc::new(Bank::new_from_parent(bank2, &Pubkey::default(), 3)); - bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); + test_utils::deposit(&bank3, &pubkey1, some_lamports + 1).unwrap(); goto_end_of_slot(bank3.clone()); bank3.freeze(); bank3.squash(); @@ -6758,8 +6511,8 @@ fn test_shrink_candidate_slots_cached() { // Store some lamports in bank 1 let some_lamports = 123; let bank1 = Arc::new(new_from_parent(bank0)); - bank1.deposit(&pubkey1, some_lamports).unwrap(); - bank1.deposit(&pubkey2, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank1, &pubkey2, some_lamports).unwrap(); goto_end_of_slot(bank1.clone()); bank1.freeze(); bank1.squash(); @@ -6769,7 +6522,7 @@ fn test_shrink_candidate_slots_cached() { // Store some lamports for pubkey1 in bank 2, root bank 2 let bank2 = Arc::new(new_from_parent(bank1)); - bank2.deposit(&pubkey1, some_lamports).unwrap(); + test_utils::deposit(&bank2, &pubkey1, some_lamports).unwrap(); bank2.store_account(&pubkey0, &account0); goto_end_of_slot(bank2.clone()); bank2.freeze(); @@ -6966,7 +6719,7 @@ fn test_add_builtin_account_inherited_cap_while_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 2 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -6993,7 +6746,7 @@ fn test_add_builtin_account_squatted_while_not_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 1 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7116,7 +6869,7 @@ fn test_add_precompiled_account_inherited_cap_while_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 2 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7144,7 +6897,7 @@ fn test_add_precompiled_account_squatted_while_not_replacing() { assert_ne!(bank.capitalization(), bank.calculate_capitalization(true)); continue; } - bank.deposit(&program_id, 10).unwrap(); + test_utils::deposit(&bank, &program_id, 10).unwrap(); if pass == 1 { add_root_and_flush_write_cache(&bank); assert_eq!(bank.capitalization(), bank.calculate_capitalization(true)); @@ -7235,7 +6988,7 @@ fn test_bank_load_program() { programdata_account.set_rent_epoch(1); bank.store_account_and_update_capitalization(&key1, &program_account); bank.store_account_and_update_capitalization(&programdata_key, &programdata_account); - let program = bank.load_program(&key1, false); + let program = bank.load_program(&key1, false, None); assert_matches!(program.program, LoadedProgramType::LegacyV1(_)); assert_eq!( program.account_size, @@ -7390,7 +7143,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { assert_eq!(*elf.get(i).unwrap(), *byte); } - let loaded_program = bank.load_program(&program_keypair.pubkey(), false); + let loaded_program = bank.load_program(&program_keypair.pubkey(), false, None); // Invoke deployed program mock_process_instruction( @@ -7980,7 +7733,7 @@ fn test_compute_active_feature_set() { assert!(!feature_set.is_active(&test_feature)); // Depositing into the `test_feature` account should do nothing - bank.deposit(&test_feature, 42).unwrap(); + test_utils::deposit(&bank, &test_feature, 42).unwrap(); let (feature_set, new_activations) = bank.compute_active_feature_set(true); assert!(new_activations.is_empty()); assert!(!feature_set.is_active(&test_feature)); @@ -8013,403 +7766,42 @@ fn test_compute_active_feature_set() { assert!(feature_set.is_active(&test_feature)); } -fn test_program_replace_set_up_account( - bank: &Bank, - pubkey: &Pubkey, - lamports: u64, - state: &T, - owner: &Pubkey, - executable: bool, -) -> AccountSharedData { - let data_len = bincode::serialized_size(state).unwrap() as usize; - let mut account = AccountSharedData::from(Account { - lamports, - owner: *owner, - executable, - data: vec![0u8; data_len], - ..Account::default() - }); - account.serialize_data(state).unwrap(); - bank.store_account_and_update_capitalization(pubkey, &account); - assert_eq!(bank.get_balance(pubkey), lamports); - account -} - #[test] -fn test_replace_non_upgradeable_program_account() { - // Non-upgradeable program - // - Destination: [Destination program data] - // - Source: [*Source program data] - // - // Should replace the destination program account with the source program account: - // - Destination: [*Source program data] - let bpf_id = bpf_loader::id(); - let bank = create_simple_test_bank(0); - - let destination = Pubkey::new_unique(); - let destination_state = vec![0u8; 4]; - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_id, - true, - ); - - let source = Pubkey::new_unique(); - let source_state = vec![6; 30]; - let source_lamports = bank.get_minimum_balance_for_rent_exemption(source_state.len()); - let check_source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_id, - true, - ); - let check_data_account_data = check_source_account.data().to_vec(); - - let original_capitalization = bank.capitalization(); - - replace_non_upgradeable_program_account( - &bank, - &source, - &destination, - "bank-apply_program_replacement", - ) - .unwrap(); - - // Destination program account balance is now the source program account's balance - assert_eq!(bank.get_balance(&destination), source_lamports); - - // Source program account is now empty - assert_eq!(bank.get_balance(&source), 0); - - // Destination program account now holds the source program data, ie: - // - Destination: [*Source program data] - let destination_account = bank.get_account(&destination).unwrap(); - assert_eq!(destination_account.data(), &check_data_account_data); - - // Ownership & executable match the source program account - assert_eq!(destination_account.owner(), &bpf_id); - assert!(destination_account.executable()); - - // The destination account's original lamports balance was burnt - assert_eq!( - bank.capitalization(), - original_capitalization - destination_lamports - ); -} - -#[test_case( - Pubkey::new_unique(), - None; - "Empty destination account _without_ corresponding data account" -)] -#[test_case( - Pubkey::new_unique(), - Some(vec![4; 40]); - "Empty destination account _with_ corresponding data account" -)] -#[test_case( - feature::id(), // `Feature11111111` - None; - "Native destination account _without_ corresponding data account" -)] -#[test_case( - feature::id(), // `Feature11111111` - Some(vec![4; 40]); - "Native destination account _with_ corresponding data account" -)] -fn test_replace_empty_account_with_upgradeable_program_success( - destination: Pubkey, - maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account -) { - // Ensures a program account and data account are created when replacing an - // empty account, ie: - // - Destination: PDA(DestinationData) - // - DestinationData: [Destination program data] - // - // If the destination data account exists, it will be overwritten - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); +fn test_program_replacement() { + let mut bank = create_simple_test_bank(0); - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = UpgradeableLoaderState::Program { - programdata_address: source_data, - }; - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, + // Setup original program account + let old_address = Pubkey::new_unique(); + let new_address = Pubkey::new_unique(); + bank.store_account_and_update_capitalization( + &old_address, + &AccountSharedData::from(Account { + lamports: 100, + ..Account::default() + }), ); - let check_source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, - ); - let check_data_account_data = check_source_data_account.data().to_vec(); + assert_eq!(bank.get_balance(&old_address), 100); - // Derive the well-known PDA address for the destination data account - let (destination_data, _) = - Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); - - // Determine the lamports that will be burnt after the replacement - let burnt_after_rent = if let Some(destination_data_state) = maybe_destination_data_state { - // Create the data account if necessary - let destination_data_lamports = - bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); - test_program_replace_set_up_account( - &bank, - &destination_data, - destination_data_lamports, - &destination_data_state, - &bpf_upgradeable_id, - false, - ); - destination_data_lamports + source_lamports - - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) - } else { - source_lamports - - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()) - }; - - let original_capitalization = bank.capitalization(); - - // Do the replacement - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap(); - - // Destination program account was created and funded to pay for minimum rent - // for the PDA - assert_eq!( - bank.get_balance(&destination), - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()), - ); - - // Destination data account was created, now holds the source data account's balance - assert_eq!(bank.get_balance(&destination_data), source_data_lamports); - - // Source program accounts are now empty - assert_eq!(bank.get_balance(&source), 0); - assert_eq!(bank.get_balance(&source_data), 0); - - // Destination program account holds the PDA, ie: - // - Destination: PDA(DestinationData) - let destination_account = bank.get_account(&destination).unwrap(); - assert_eq!( - destination_account.data(), - &bincode::serialize(&UpgradeableLoaderState::Program { - programdata_address: destination_data - }) - .unwrap(), - ); - - // Destination data account holds the source data, ie: - // - DestinationData: [*Source program data] - let destination_data_account = bank.get_account(&destination_data).unwrap(); - assert_eq!(destination_data_account.data(), &check_data_account_data); - - // Ownership & executable match the source program accounts - assert_eq!(destination_account.owner(), &bpf_upgradeable_id); - assert!(destination_account.executable()); - assert_eq!(destination_data_account.owner(), &bpf_upgradeable_id); - assert!(!destination_data_account.executable()); - - // The remaining lamports from both program accounts minus the rent-exempt - // minimum were burnt - assert_eq!( - bank.capitalization(), - original_capitalization - burnt_after_rent - ); -} - -#[test_case( - None; - "Existing destination account _without_ corresponding data account" -)] -#[test_case( - Some(vec![4; 40]); - "Existing destination account _with_ corresponding data account" -)] -fn test_replace_empty_account_with_upgradeable_program_fail_when_account_exists( - maybe_destination_data_state: Option>, // Inner data of the destination program _data_ account -) { - // Should not be allowed to execute replacement - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); - - // Create the test destination account with some arbitrary data and lamports balance - let destination = Pubkey::new_unique(); - let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - let destination_account = test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_upgradeable_id, - true, - ); - - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = UpgradeableLoaderState::Program { - programdata_address: source_data, - }; - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - let source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, - ); - let source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, - ); - - // Derive the well-known PDA address for the destination data account - let (destination_data, _) = - Pubkey::find_program_address(&[destination.as_ref()], &bpf_upgradeable_id); - - // Create the data account if necessary - let destination_data_account = - if let Some(destination_data_state) = maybe_destination_data_state { - let destination_data_lamports = - bank.get_minimum_balance_for_rent_exemption(destination_data_state.len()); - let destination_data_account = test_program_replace_set_up_account( - &bank, - &destination_data, - destination_data_lamports, - &destination_data_state, - &bpf_upgradeable_id, - false, - ); - Some(destination_data_account) - } else { - None - }; + // Setup new program account + let new_program_account = AccountSharedData::from(Account { + lamports: 123, + ..Account::default() + }); + bank.store_account_and_update_capitalization(&new_address, &new_program_account); + assert_eq!(bank.get_balance(&new_address), 123); let original_capitalization = bank.capitalization(); - // Attempt the replacement - assert_matches!( - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap_err(), - ReplaceAccountError::AccountExists(..) - ); - - // Everything should be unchanged - assert_eq!(bank.get_account(&destination).unwrap(), destination_account); - if let Some(destination_data_account) = destination_data_account { - assert_eq!( - bank.get_account(&destination_data).unwrap(), - destination_data_account - ); - } - assert_eq!(bank.get_account(&source).unwrap(), source_account); - assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); - assert_eq!(bank.capitalization(), original_capitalization); -} - -#[test] -fn test_replace_empty_account_with_upgradeable_program_fail_when_not_upgradeable_program() { - // Should not be allowed to execute replacement - let bpf_upgradeable_id = bpf_loader_upgradeable::id(); - let bank = create_simple_test_bank(0); + bank.replace_program_account(&old_address, &new_address, "bank-apply_program_replacement"); - // Create the test destination account with some arbitrary data and lamports balance - let destination = Pubkey::new_unique(); - let destination_state = vec![0, 0, 0, 0]; // Arbitrary bytes, doesn't matter - let destination_lamports = bank.get_minimum_balance_for_rent_exemption(destination_state.len()); - let destination_account = test_program_replace_set_up_account( - &bank, - &destination, - destination_lamports, - &destination_state, - &bpf_upgradeable_id, - true, - ); + // New program account is now empty + assert_eq!(bank.get_balance(&new_address), 0); - // Create the test source accounts, one for program and one for data - let source = Pubkey::new_unique(); - let (source_data, _) = Pubkey::find_program_address(&[source.as_ref()], &bpf_upgradeable_id); - let source_state = [0, 0, 0, 0]; // Arbitrary bytes, NOT an upgradeable program - let source_lamports = - bank.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let source_data_state = vec![6; 30]; - let source_data_lamports = bank.get_minimum_balance_for_rent_exemption(source_data_state.len()); - let source_account = test_program_replace_set_up_account( - &bank, - &source, - source_lamports, - &source_state, - &bpf_upgradeable_id, - true, - ); - let source_data_account = test_program_replace_set_up_account( - &bank, - &source_data, - source_data_lamports, - &source_data_state, - &bpf_upgradeable_id, - false, - ); - - let original_capitalization = bank.capitalization(); + // Old program account holds the new program account + assert_eq!(bank.get_account(&old_address), Some(new_program_account)); - // Attempt the replacement - assert_matches!( - replace_empty_account_with_upgradeable_program( - &bank, - &source, - &destination, - "bank-apply_empty_account_replacement_for_program", - ) - .unwrap_err(), - ReplaceAccountError::NotAnUpgradeableProgram - ); - - // Everything should be unchanged - assert_eq!(bank.get_account(&destination).unwrap(), destination_account); - assert_eq!(bank.get_account(&source).unwrap(), source_account); - assert_eq!(bank.get_account(&source_data).unwrap(), source_data_account); - assert_eq!(bank.capitalization(), original_capitalization); + // Lamports in the old token account were burnt + assert_eq!(bank.capitalization(), original_capitalization - 100); } fn min_rent_exempt_balance_for_sysvars(bank: &Bank, sysvar_ids: &[Pubkey]) -> u64 { @@ -10120,7 +9512,9 @@ fn test_compute_budget_program_noop() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10133,7 +9527,7 @@ fn test_compute_budget_program_noop() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10163,7 +9557,9 @@ fn test_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10176,7 +9572,7 @@ fn test_compute_request_instruction() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -10213,7 +9609,9 @@ fn test_failed_compute_request_instruction() { assert_eq!( *compute_budget, ComputeBudget { - compute_unit_limit: compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, + compute_unit_limit: u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + ), heap_size: 48 * 1024, ..ComputeBudget::default() } @@ -10444,14 +9842,19 @@ fn calculate_test_fee( remove_congestion_multiplier: bool, ) -> u64 { let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(&remove_deprecated_request_unit_ix::id()); + feature_set.deactivate(&solana_sdk::feature_set::remove_deprecated_request_unit_ix::id()); if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate(&include_loaded_accounts_data_size_in_fee_calculation::id()); + feature_set.deactivate( + &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), + ); } let budget_limits = - ComputeBudget::fee_budget_limits(message.program_instructions_iter(), &feature_set); + process_compute_budget_instructions(message.program_instructions_iter(), &feature_set) + .unwrap_or_default() + .into(); + fee_structure.calculate_fee( message, lamports_per_signature, @@ -11478,7 +10881,9 @@ fn test_rent_state_list_len() { ); let compute_budget = bank.runtime_config.compute_budget.unwrap_or_else(|| { - ComputeBudget::new(compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64) + ComputeBudget::new(u64::from( + compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + )) }); let transaction_context = TransactionContext::new( loaded_txs[0].0.as_ref().unwrap().accounts.clone(), @@ -12496,7 +11901,7 @@ fn test_is_in_slot_hashes_history() { } #[test] -fn test_runtime_feature_enable_with_program_cache() { +fn test_feature_activation_loaded_programs_recompilation_phase() { solana_logger::setup(); // Bank Setup @@ -12562,20 +11967,8 @@ fn test_runtime_feature_enable_with_program_cache() { &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); - // Reroot to call LoadedPrograms::prune() and end the current recompilation phase goto_end_of_slot(bank.clone()); - bank_forks - .write() - .unwrap() - .insert(Arc::into_inner(bank).unwrap()); - let bank = bank_forks.read().unwrap().working_bank(); - bank_forks.read().unwrap().prune_program_cache(bank.slot); - bank_forks - .write() - .unwrap() - .set_root(bank.slot, &AbsRequestSender::default(), None); - - // Advance to next epoch, which starts the next recompilation phase + // Advance to next epoch, which starts the recompilation phase let bank = new_from_parent_next_epoch(bank, 1); // Execute after feature is enabled to check it was filtered out and reverified. @@ -13359,7 +12752,7 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each // iteration are different. Otherwise, all those transactions will be the same, and will not be // executed by the bank except the first one. - bank.register_recent_blockhash(&Hash::new_unique()); + bank.register_unique_recent_blockhash_for_test(); previous_bank = Arc::new(bank); } } diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index b24f3ed3e8ac0f..dabd90e4c2c835 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -4,13 +4,20 @@ use { crate::{ accounts_background_service::{AbsRequestSender, SnapshotRequest, SnapshotRequestKind}, bank::{epoch_accounts_hash_utils, Bank, SquashTiming}, - installed_scheduler_pool::BankWithScheduler, + installed_scheduler_pool::{ + BankWithScheduler, InstalledSchedulerPoolArc, SchedulingContext, + }, snapshot_config::SnapshotConfig, }, log::*, solana_measure::measure::Measure, - solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph, WorkingSlot}, - solana_sdk::{clock::Slot, feature_set, hash::Hash, timing}, + solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, + solana_sdk::{ + clock::{Epoch, Slot}, + feature_set, + hash::Hash, + timing, + }, std::{ collections::{hash_map::Entry, HashMap, HashSet}, ops::Index, @@ -67,6 +74,7 @@ pub struct BankForks { last_accounts_hash_slot: Slot, in_vote_only_mode: Arc, highest_slot_at_startup: Slot, + scheduler_pool: Option, } impl Index for BankForks { @@ -198,6 +206,7 @@ impl BankForks { last_accounts_hash_slot: root, in_vote_only_mode: Arc::new(AtomicBool::new(false)), highest_slot_at_startup: 0, + scheduler_pool: None, })); for bank in bank_forks.read().unwrap().banks.values() { @@ -210,11 +219,26 @@ impl BankForks { bank_forks } + pub fn install_scheduler_pool(&mut self, pool: InstalledSchedulerPoolArc) { + info!("Installed new scheduler_pool into bank_forks: {:?}", pool); + assert!( + self.scheduler_pool.replace(pool).is_none(), + "Reinstalling scheduler pool isn't supported" + ); + } + pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { bank.check_program_modification_slot = self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup; - let bank = BankWithScheduler::new_without_scheduler(Arc::new(bank)); + let bank = Arc::new(bank); + let bank = if let Some(scheduler_pool) = &self.scheduler_pool { + let context = SchedulingContext::new(bank.clone()); + let scheduler = scheduler_pool.take_scheduler(context); + BankWithScheduler::new(bank, Some(scheduler)) + } else { + BankWithScheduler::new_without_scheduler(bank) + }; let prev = self.banks.insert(bank.slot(), bank.clone_with_scheduler()); assert!(prev.is_none()); let slot = bank.slot(); @@ -680,9 +704,11 @@ impl ForkGraph for BankForks { (a == b) .then_some(BlockRelation::Equal) .or_else(|| { - self.banks - .get(&b) - .and_then(|bank| bank.is_ancestor(a).then_some(BlockRelation::Ancestor)) + self.banks.get(&b).and_then(|bank| { + bank.ancestors + .contains_key(&a) + .then_some(BlockRelation::Ancestor) + }) }) .or_else(|| { self.descendants.get(&b).and_then(|slots| { @@ -693,6 +719,10 @@ impl ForkGraph for BankForks { }) .unwrap_or(BlockRelation::Unknown) } + + fn slot_epoch(&self, slot: Slot) -> Option { + self.banks.get(&slot).map(|bank| bank.epoch()) + } } #[cfg(test)] diff --git a/runtime/src/inline_feature_gate_program.rs b/runtime/src/inline_feature_gate_program.rs deleted file mode 100644 index a2c647bbda22a0..00000000000000 --- a/runtime/src/inline_feature_gate_program.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Contains replacement program IDs for the feature gate program - -pub(crate) mod noop_program { - solana_sdk::declare_id!("37Yr1mVPdfUuy6oC2yPjWtg8xyyVi33TYYqyNQocsAkT"); -} diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 9fd3a5546097cc..dde82f2a63f890 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -1,19 +1,198 @@ -//! Currently, there's only one auxiliary type called BankWithScheduler.. This file will be -//! populated by later PRs to align with the filename. +//! Transaction processing glue code, mainly consisting of Object-safe traits +//! +//! [InstalledSchedulerPool] lends one of pooled [InstalledScheduler]s as wrapped in +//! [BankWithScheduler], which can be used by `ReplayStage` and `BankingStage` for transaction +//! execution. After use, the scheduler will be returned to the pool. +//! +//! [InstalledScheduler] can be fed with [SanitizedTransaction]s. Then, it schedules those +//! executions and commits those results into the associated _bank_. +//! +//! It's generally assumed that each [InstalledScheduler] is backed by multiple threads for +//! parallel transaction processing and there are multiple independent schedulers inside a single +//! instance of [InstalledSchedulerPool]. +//! +//! Dynamic dispatch was inevitable due to the desire to piggyback on +//! [BankForks](crate::bank_forks::BankForks)'s pruning for scheduler lifecycle management as the +//! common place both for `ReplayStage` and `BankingStage` and the resultant need of invoking +//! actual implementations provided by the dependent crate (`solana-unified-scheduler-pool`, which +//! in turn depends on `solana-ledger`, which in turn depends on `solana-runtime`), avoiding a +//! cyclic dependency. +//! +//! See [InstalledScheduler] for visualized interaction. -#[cfg(feature = "dev-context-only-utils")] -use qualifier_attr::qualifiers; use { crate::bank::Bank, + log::*, + solana_program_runtime::timings::ExecuteTimings, + solana_sdk::{ + hash::Hash, + slot_history::Slot, + transaction::{Result, SanitizedTransaction}, + }, std::{ fmt::Debug, ops::Deref, sync::{Arc, RwLock}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use {mockall::automock, qualifier_attr::qualifiers}; + +pub trait InstalledSchedulerPool: Send + Sync + Debug { + fn take_scheduler(&self, context: SchedulingContext) -> DefaultInstalledSchedulerBox; +} + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// Schedules, executes, and commits transactions under encapsulated implementation +/// +/// The following chart illustrates the ownership/reference interaction between inter-dependent +/// objects across crates: +/// +/// ```mermaid +/// graph TD +/// Bank["Arc#lt;Bank#gt;"] +/// +/// subgraph solana-runtime +/// BankForks; +/// BankWithScheduler; +/// Bank; +/// LoadExecuteAndCommitTransactions(["load_execute_and_commit_transactions()"]); +/// SchedulingContext; +/// InstalledSchedulerPool{{InstalledSchedulerPool}}; +/// InstalledScheduler{{InstalledScheduler}}; +/// end +/// +/// subgraph solana-unified-scheduler-pool +/// SchedulerPool; +/// PooledScheduler; +/// ScheduleExecution(["schedule_execution()"]); +/// end +/// +/// subgraph solana-ledger +/// ExecuteBatch(["execute_batch()"]); +/// end +/// +/// ScheduleExecution -. calls .-> ExecuteBatch; +/// BankWithScheduler -. dyn-calls .-> ScheduleExecution; +/// ExecuteBatch -. calls .-> LoadExecuteAndCommitTransactions; +/// linkStyle 0,1,2 stroke:gray,color:gray; +/// +/// BankForks -- owns --> BankWithScheduler; +/// BankForks -- owns --> InstalledSchedulerPool; +/// BankWithScheduler -- refs --> Bank; +/// BankWithScheduler -- owns --> InstalledScheduler; +/// SchedulingContext -- refs --> Bank; +/// InstalledScheduler -- owns --> SchedulingContext; +/// +/// SchedulerPool -- owns --> PooledScheduler; +/// SchedulerPool -. impls .-> InstalledSchedulerPool; +/// PooledScheduler -. impls .-> InstalledScheduler; +/// PooledScheduler -- refs --> SchedulerPool; +/// ``` +#[cfg_attr(feature = "dev-context-only-utils", automock)] +// suppress false clippy complaints arising from mockall-derive: +// warning: `#[must_use]` has no effect when applied to a struct field +// warning: the following explicit lifetimes could be elided: 'a +#[cfg_attr( + feature = "dev-context-only-utils", + allow(unused_attributes, clippy::needless_lifetimes) +)] +pub trait InstalledScheduler: Send + Sync + Debug + 'static { + fn id(&self) -> SchedulerId; + fn context(&self) -> &SchedulingContext; -// currently dummy type; will be replaced with the introduction of real type by upcoming pr... -pub type DefaultInstalledSchedulerBox = (); + // Calling this is illegal as soon as wait_for_termination is called. + fn schedule_execution<'a>( + &'a self, + transaction_with_index: &'a (&'a SanitizedTransaction, usize), + ); + + /// Wait for a scheduler to terminate after it is notified with the given reason. + /// + /// Firstly, this function blocks the current thread while waiting for the scheduler to + /// complete all of the executions for the scheduled transactions. This means the scheduler has + /// prepared the finalized `ResultWithTimings` at least internally at the time of existing from + /// this function. If no trsanction is scheduled, the result and timing will be `Ok(())` and + /// `ExecuteTimings::default()` respectively. This is done in the same way regardless of + /// `WaitReason`. + /// + /// After that, the scheduler may behave differently depending on the reason, regarding the + /// final bookkeeping. Specifically, this function guaranteed to return + /// `Some(finalized_result_with_timings)` unless the reason is `PausedForRecentBlockhash`. In + /// the case of `PausedForRecentBlockhash`, the scheduler is responsible to retain the + /// finalized `ResultWithTimings` until it's `wait_for_termination()`-ed with one of the other + /// two reasons later. + #[must_use] + fn wait_for_termination(&mut self, reason: &WaitReason) -> Option; + + fn return_to_pool(self: Box); +} + +pub type DefaultInstalledSchedulerBox = Box; + +pub type InstalledSchedulerPoolArc = Arc; + +pub type SchedulerId = u64; + +/// A small context to propagate a bank and its scheduling mode to the scheduler subsystem. +/// +/// Note that this isn't called `SchedulerContext` because the contexts aren't associated with +/// schedulers one by one. A scheduler will use many SchedulingContexts during its lifetime. +/// "Scheduling" part of the context name refers to an abstract slice of time to schedule and +/// execute all transactions for a given bank for block verification or production. A context is +/// expected to be used by a particular scheduler only for that duration of the time and to be +/// disposed by the scheduler. Then, the scheduler may work on different banks with new +/// `SchedulingContext`s. +#[derive(Clone, Debug)] +pub struct SchedulingContext { + // mode: SchedulingMode, // this will be added later. + bank: Arc, +} + +impl SchedulingContext { + pub fn new(bank: Arc) -> Self { + Self { bank } + } + + pub fn bank(&self) -> &Arc { + &self.bank + } + + pub fn slot(&self) -> Slot { + self.bank().slot() + } +} + +pub type ResultWithTimings = (Result<()>, ExecuteTimings); + +/// A hint from the bank about the reason the caller is waiting on its scheduler termination. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum WaitReason { + // The bank wants its scheduler to terminate after the completion of transaction execution, in + // order to freeze itself immediately thereafter. This is by far the most normal wait reason. + // + // Note that `wait_for_termination(TerminatedToFreeze)` must explicitly be done prior + // to Bank::freeze(). This can't be done inside Bank::freeze() implicitly to remain it + // infallible. + TerminatedToFreeze, + // The bank wants its scheduler to terminate just like `TerminatedToFreeze` and indicate that + // Drop::drop() is the caller. + DroppedFromBankForks, + // The bank wants its scheduler to pause the scheduler after the completion without being + // returned to the pool to collect scheduler's internally-held `ResultWithTimings` later. + PausedForRecentBlockhash, +} + +impl WaitReason { + pub fn is_paused(&self) -> bool { + // Exhaustive `match` is preferred here than `matches!()` to trigger an explicit + // decision to be made, should we add new variants like `PausedForFooBar`... + match self { + WaitReason::PausedForRecentBlockhash => true, + WaitReason::TerminatedToFreeze | WaitReason::DroppedFromBankForks => false, + } + } +} /// Very thin wrapper around Arc /// @@ -40,7 +219,6 @@ pub struct BankWithScheduler { #[derive(Debug)] pub struct BankWithSchedulerInner { bank: Arc, - #[allow(dead_code)] scheduler: InstalledSchedulerRwLock, } pub type InstalledSchedulerRwLock = RwLock>; @@ -48,6 +226,13 @@ pub type InstalledSchedulerRwLock = RwLock> impl BankWithScheduler { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { + if let Some(bank_in_context) = scheduler + .as_ref() + .map(|scheduler| scheduler.context().bank()) + { + assert!(Arc::ptr_eq(&bank, bank_in_context)); + } + Self { inner: Arc::new(BankWithSchedulerInner { bank, @@ -70,11 +255,142 @@ impl BankWithScheduler { self.inner.bank.clone() } + pub fn register_tick(&self, hash: &Hash) { + self.inner.bank.register_tick(hash, &self.inner.scheduler); + } + + pub fn fill_bank_with_ticks_for_tests(&self) { + self.do_fill_bank_with_ticks_for_tests(&self.inner.scheduler); + } + + pub fn has_installed_scheduler(&self) -> bool { + self.inner.scheduler.read().unwrap().is_some() + } + + // 'a is needed; anonymous_lifetime_in_impl_trait isn't stabilized yet... + pub fn schedule_transaction_executions<'a>( + &self, + transactions_with_indexes: impl ExactSizeIterator, + ) { + trace!( + "schedule_transaction_executions(): {} txs", + transactions_with_indexes.len() + ); + + let scheduler_guard = self.inner.scheduler.read().unwrap(); + let scheduler = scheduler_guard.as_ref().unwrap(); + + for (sanitized_transaction, &index) in transactions_with_indexes { + scheduler.schedule_execution(&(sanitized_transaction, index)); + } + } + + // take needless &mut only to communicate its semantic mutability to humans... + #[cfg(feature = "dev-context-only-utils")] + pub fn drop_scheduler(&mut self) { + self.inner.drop_scheduler(); + } + + pub(crate) fn wait_for_paused_scheduler(bank: &Bank, scheduler: &InstalledSchedulerRwLock) { + let maybe_result_with_timings = BankWithSchedulerInner::wait_for_scheduler_termination( + bank, + scheduler, + WaitReason::PausedForRecentBlockhash, + ); + assert!( + maybe_result_with_timings.is_none(), + "Premature result was returned from scheduler after paused" + ); + } + + #[must_use] + pub fn wait_for_completed_scheduler(&self) -> Option { + BankWithSchedulerInner::wait_for_scheduler_termination( + &self.inner.bank, + &self.inner.scheduler, + WaitReason::TerminatedToFreeze, + ) + } + pub const fn no_scheduler_available() -> InstalledSchedulerRwLock { RwLock::new(None) } } +impl BankWithSchedulerInner { + #[must_use] + fn wait_for_completed_scheduler_from_drop(&self) -> Option { + Self::wait_for_scheduler_termination( + &self.bank, + &self.scheduler, + WaitReason::DroppedFromBankForks, + ) + } + + #[must_use] + fn wait_for_scheduler_termination( + bank: &Bank, + scheduler: &InstalledSchedulerRwLock, + reason: WaitReason, + ) -> Option { + debug!( + "wait_for_scheduler_termination(slot: {}, reason: {:?}): started...", + bank.slot(), + reason, + ); + + let mut scheduler = scheduler.write().unwrap(); + let result_with_timings = if scheduler.is_some() { + let result_with_timings = scheduler + .as_mut() + .and_then(|scheduler| scheduler.wait_for_termination(&reason)); + if !reason.is_paused() { + let scheduler = scheduler.take().expect("scheduler after waiting"); + scheduler.return_to_pool(); + } + result_with_timings + } else { + None + }; + debug!( + "wait_for_scheduler_termination(slot: {}, reason: {:?}): finished with: {:?}...", + bank.slot(), + reason, + result_with_timings.as_ref().map(|(result, _)| result), + ); + + result_with_timings + } + + fn drop_scheduler(&self) { + if std::thread::panicking() { + error!( + "BankWithSchedulerInner::drop_scheduler(): slot: {} skipping due to already panicking...", + self.bank.slot(), + ); + return; + } + + // There's no guarantee ResultWithTimings is available or not at all when being dropped. + if let Some(Err(err)) = self + .wait_for_completed_scheduler_from_drop() + .map(|(result, _timings)| result) + { + warn!( + "BankWithSchedulerInner::drop_scheduler(): slot: {} discarding error from scheduler: {:?}", + self.bank.slot(), + err, + ); + } + } +} + +impl Drop for BankWithSchedulerInner { + fn drop(&mut self) { + self.drop_scheduler(); + } +} + impl Deref for BankWithScheduler { type Target = Arc; @@ -82,3 +398,165 @@ impl Deref for BankWithScheduler { &self.inner.bank } } + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + bank::test_utils::goto_end_of_slot_with_scheduler, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }, + assert_matches::assert_matches, + mockall::Sequence, + solana_sdk::system_transaction, + }; + + fn setup_mocked_scheduler_with_extra( + bank: Arc, + wait_reasons: impl Iterator, + f: Option, + ) -> DefaultInstalledSchedulerBox { + let mut mock = MockInstalledScheduler::new(); + let mut seq = Sequence::new(); + + mock.expect_context() + .times(1) + .in_sequence(&mut seq) + .return_const(SchedulingContext::new(bank)); + + for wait_reason in wait_reasons { + mock.expect_wait_for_termination() + .with(mockall::predicate::eq(wait_reason)) + .times(1) + .in_sequence(&mut seq) + .returning(move |_| { + if wait_reason.is_paused() { + None + } else { + Some((Ok(()), ExecuteTimings::default())) + } + }); + } + + mock.expect_return_to_pool() + .times(1) + .in_sequence(&mut seq) + .returning(|| ()); + if let Some(f) = f { + f(&mut mock); + } + + Box::new(mock) + } + + fn setup_mocked_scheduler( + bank: Arc, + wait_reasons: impl Iterator, + ) -> DefaultInstalledSchedulerBox { + setup_mocked_scheduler_with_extra( + bank, + wait_reasons, + None:: ()>, + ) + } + + #[test] + fn test_scheduler_normal_termination() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new( + bank.clone(), + Some(setup_mocked_scheduler( + bank, + [WaitReason::TerminatedToFreeze].into_iter(), + )), + ); + assert!(bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); + + // Repeating to call wait_for_completed_scheduler() is okay with no ResultWithTimings being + // returned. + assert!(!bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), None); + } + + #[test] + fn test_no_scheduler_termination() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new_without_scheduler(bank); + + // Calling wait_for_completed_scheduler() is noop, when no scheduler is installed. + assert!(!bank.has_installed_scheduler()); + assert_matches!(bank.wait_for_completed_scheduler(), None); + } + + #[test] + fn test_scheduler_termination_from_drop() { + solana_logger::setup(); + + let bank = Arc::new(Bank::default_for_tests()); + let bank = BankWithScheduler::new( + bank.clone(), + Some(setup_mocked_scheduler( + bank, + [WaitReason::DroppedFromBankForks].into_iter(), + )), + ); + drop(bank); + } + + #[test] + fn test_scheduler_pause() { + solana_logger::setup(); + + let bank = Arc::new(crate::bank::tests::create_simple_test_bank(42)); + let bank = BankWithScheduler::new( + bank.clone(), + Some(setup_mocked_scheduler( + bank, + [ + WaitReason::PausedForRecentBlockhash, + WaitReason::TerminatedToFreeze, + ] + .into_iter(), + )), + ); + goto_end_of_slot_with_scheduler(&bank); + assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); + } + + #[test] + fn test_schedule_executions() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let tx0 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let mocked_scheduler = setup_mocked_scheduler_with_extra( + bank.clone(), + [WaitReason::DroppedFromBankForks].into_iter(), + Some(|mocked: &mut MockInstalledScheduler| { + mocked + .expect_schedule_execution() + .times(1) + .returning(|(_, _)| ()); + }), + ); + + let bank = BankWithScheduler::new(bank, Some(mocked_scheduler)); + bank.schedule_transaction_executions([(&tx0, &0)].into_iter()); + } +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 1bbd479848e987..e6ba2b1bd8969b 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -14,7 +14,6 @@ pub mod commitment; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; -pub mod inline_feature_gate_program; pub mod inline_spl_associated_token_account; pub mod installed_scheduler_pool; pub mod loader_utils; diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/transaction_priority_details.rs index 0d0a94df4ed393..d7a1ed590894a1 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/transaction_priority_details.rs @@ -1,13 +1,14 @@ use { - solana_program_runtime::compute_budget::ComputeBudget, + solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_sdk::{ + feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, }, }; -#[derive(Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct TransactionPriorityDetails { pub priority: u64, pub compute_unit_limit: u64, @@ -23,18 +24,17 @@ pub trait GetTransactionPriorityDetails { instructions: impl Iterator, _round_compute_unit_price_enabled: bool, ) -> Option { - let mut compute_budget = ComputeBudget::default(); - let prioritization_fee_details = compute_budget - .process_instructions( - instructions, - true, // supports prioritization by request_units_deprecated instruction - true, // enable support set accounts data size instruction - // TODO: round_compute_unit_price_enabled: bool - ) - .ok()?; + let mut feature_set = FeatureSet::default(); + feature_set.activate( + &solana_sdk::feature_set::add_set_tx_loaded_accounts_data_size_instruction::id(), + 0, + ); + + let compute_budget_limits = + process_compute_budget_instructions(instructions, &feature_set).ok()?; Some(TransactionPriorityDetails { - priority: prioritization_fee_details.get_priority(), - compute_unit_limit: compute_budget.compute_unit_limit, + priority: compute_budget_limits.compute_unit_price, + compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), }) } } @@ -98,8 +98,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -111,8 +111,8 @@ mod tests { Some(TransactionPriorityDetails { priority: 0, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } @@ -174,8 +174,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); @@ -187,8 +187,8 @@ mod tests { Some(TransactionPriorityDetails { priority: requested_price, compute_unit_limit: - solana_program_runtime::compute_budget::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64 + solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + as u64, }) ); } diff --git a/scripts/cargo-clippy.sh b/scripts/cargo-clippy.sh new file mode 100755 index 00000000000000..16419cb2cc944d --- /dev/null +++ b/scripts/cargo-clippy.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Runs `cargo clippy` in all individual workspaces in the repository. +# +# We have a number of clippy parameters that we want to enforce across the +# code base. They are defined here. +# +# This script is run by the CI, so if you want to replicate what the CI is +# doing, better run this script, rather than calling `cargo clippy` manually. +# +# TODO It would be nice to provide arguments to narrow clippy checks to a single +# workspace and/or package. To speed up the interactive workflow. + +set -o errexit + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +if [[ -z $cargo ]]; then + >&2 echo "Failed to find cargo. Mac readlink doesn't support -f. Consider switching + to gnu readlink with 'brew install coreutils' and then symlink greadlink as + /usr/local/bin/readlink." + exit 1 +fi + +# shellcheck source=ci/rust-version.sh +source "$here/../ci/rust-version.sh" + +nightly_clippy_allows=(--allow=clippy::redundant_clone) + +# Use nightly clippy, as frozen-abi proc-macro generates a lot of code across +# various crates in this whole monorepo (frozen-abi is enabled only under nightly +# due to the use of unstable rust feature). Likewise, frozen-abi(-macro) crates' +# unit tests are only compiled under nightly. +# Similarly, nightly is desired to run clippy over all of bench files because +# the bench itself isn't stabilized yet... +# ref: https://github.com/rust-lang/rust/issues/66287 +"$here/cargo-for-all-lock-files.sh" -- \ + "+${rust_nightly}" clippy \ + --workspace --all-targets --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding \ + "${nightly_clippy_allows[@]}" + +# temporarily run stable clippy as well to scan the codebase for +# `redundant_clone`s, which is disabled as nightly clippy is buggy: +# https://github.com/solana-labs/solana/issues/31834 +# +# can't use --all-targets: +# error[E0554]: `#![feature]` may not be used on the stable release channel +"$here/cargo-for-all-lock-files.sh" -- \ + clippy \ + --workspace --tests --bins --examples --features dummy-for-ci-check -- \ + --deny=warnings \ + --deny=clippy::default_trait_access \ + --deny=clippy::arithmetic_side_effects \ + --deny=clippy::manual_let_else \ + --deny=clippy::used_underscore_binding diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 583ba6508f917d..4aceef69a4fe73 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -28,22 +28,29 @@ usage() { echo "Error: $*" fi cat <] [--debug] [--validator-only] +usage: $0 [+] [--debug] [--validator-only] [--release-with-debug] EOF exit $exitcode } maybeRustVersion= installDir= -buildVariant=release -maybeReleaseFlag=--release +# buildProfileArg and buildProfile duplicate some information because cargo +# doesn't allow '--profile debug' but we still need to know that the binaries +# will be in target/debug +buildProfileArg='--profile release' +buildProfile='release' validatorOnly= while [[ -n $1 ]]; do if [[ ${1:0:1} = - ]]; then if [[ $1 = --debug ]]; then - maybeReleaseFlag= - buildVariant=debug + buildProfileArg= # the default cargo profile is 'debug' + buildProfile='debug' + shift + elif [[ $1 = --release-with-debug ]]; then + buildProfileArg='--profile release-with-debug' + buildProfile='release-with-debug' shift elif [[ $1 = --validator-only ]]; then validatorOnly=true @@ -68,7 +75,7 @@ fi installDir="$(mkdir -p "$installDir"; cd "$installDir"; pwd)" mkdir -p "$installDir/bin/deps" -echo "Install location: $installDir ($buildVariant)" +echo "Install location: $installDir ($buildProfile)" cd "$(dirname "$0")"/.. @@ -138,7 +145,7 @@ mkdir -p "$installDir/bin" ( set -x # shellcheck disable=SC2086 # Don't want to double quote $rust_version - "$cargo" $maybeRustVersion build $maybeReleaseFlag "${binArgs[@]}" + "$cargo" $maybeRustVersion build $buildProfileArg "${binArgs[@]}" # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then @@ -152,7 +159,7 @@ mkdir -p "$installDir/bin" ) for bin in "${BINS[@]}"; do - cp -fv "target/$buildVariant/$bin" "$installDir"/bin + cp -fv "target/$buildProfile/$bin" "$installDir"/bin done if [[ -d target/perf-libs ]]; then @@ -206,7 +213,7 @@ fi set -x # deps dir can be empty shopt -s nullglob - for dep in target/"$buildVariant"/deps/libsolana*program.*; do + for dep in target/"$buildProfile"/deps/libsolana*program.*; do cp -fv "$dep" "$installDir/bin/deps" done ) diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 0c8ba59e6e9428..74646f7fb7d331 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -1461,7 +1461,7 @@ pub fn create_nonce_account( /// /// When constructing a transaction that includes an `AdvanceNonceInstruction` /// the [`recent_blockhash`] must be treated differently — instead of -/// setting it to a recent blockhash, the value of the nonce must be retreived +/// setting it to a recent blockhash, the value of the nonce must be retrieved /// and deserialized from the nonce account, and that value specified as the /// "recent blockhash". A nonce account can be deserialized with the /// [`solana_rpc_client_nonce_utils::data_from_account`][dfa] function. diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 8682836c2ba247..5357811ee2a738 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -283,7 +283,7 @@ pub mod stake_deactivate_delinquent_instruction { } pub mod stake_redelegate_instruction { - solana_sdk::declare_id!("GUrp5BKMyDazsAp9mBoVD6orE5ihXNRPC3jkBRfx6Lq7"); + solana_sdk::declare_id!("2KKG3C6RBnxQo9jVVrbzsoSh41TDXLK7gBc9gduyxSzW"); } pub mod vote_withdraw_authority_may_change_authorized_voter { @@ -700,10 +700,6 @@ pub mod better_error_codes_for_tx_lamport_check { solana_sdk::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); } -pub mod programify_feature_gate_program { - solana_sdk::declare_id!("8GdovDzVwWU5edz2G697bbB7GZjrUc6aQZLWyNNAtHdg"); -} - pub mod update_hashes_per_tick2 { solana_sdk::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); } @@ -724,6 +720,10 @@ pub mod update_hashes_per_tick6 { solana_sdk::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); } +pub mod validate_fee_collector_account { + solana_sdk::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -894,12 +894,12 @@ lazy_static! { (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), - (programify_feature_gate_program::id(), "move feature gate activation logic to an on-chain program #32783"), (update_hashes_per_tick2::id(), "Update desired hashes per tick to 2.8M"), (update_hashes_per_tick3::id(), "Update desired hashes per tick to 4.4M"), (update_hashes_per_tick4::id(), "Update desired hashes per tick to 7.6M"), (update_hashes_per_tick5::id(), "Update desired hashes per tick to 9.2M"), (update_hashes_per_tick6::id(), "Update desired hashes per tick to 10M"), + (validate_fee_collector_account::id(), "validate fee collector account #33888"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/streamer/src/nonblocking/sendmmsg.rs b/streamer/src/nonblocking/sendmmsg.rs index 106e53d243cf54..15217b906eb5da 100644 --- a/streamer/src/nonblocking/sendmmsg.rs +++ b/streamer/src/nonblocking/sendmmsg.rs @@ -178,16 +178,10 @@ mod tests { let dest_refs: Vec<_> = vec![&ip4, &ip6, &ip4]; let sender = UdpSocket::bind("0.0.0.0:0").await.expect("bind"); - if let Err(SendPktsError::IoError(_, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_eq!(num_failed, 1); - } - if let Err(SendPktsError::IoError(_, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_eq!(num_failed, 1); - } + let res = batch_send(&sender, &packet_refs[..]).await; + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); + let res = multi_target_send(&sender, &packets[0], &dest_refs).await; + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); } #[tokio::test] @@ -205,11 +199,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for batch_send @@ -220,11 +215,12 @@ mod tests { (&packets[3][..], &ipv4local), (&packets[4][..], &ipv4broadcast), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } // test consecutive intermediate failures for batch_send @@ -235,11 +231,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test intermediate failures for multi_target_send @@ -250,11 +247,12 @@ mod tests { &ipv4broadcast, &ipv4local, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match multi_target_send(&sender, &packets[0], &dest_refs).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for multi_target_send @@ -265,11 +263,12 @@ mod tests { &ipv4local, &ipv4broadcast, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs).await - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match multi_target_send(&sender, &packets[0], &dest_refs).await { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } } } diff --git a/streamer/src/sendmmsg.rs b/streamer/src/sendmmsg.rs index 3340b10e6fdeda..459d868a2ed0c8 100644 --- a/streamer/src/sendmmsg.rs +++ b/streamer/src/sendmmsg.rs @@ -282,14 +282,10 @@ mod tests { let dest_refs: Vec<_> = vec![&ip4, &ip6, &ip4]; let sender = UdpSocket::bind("0.0.0.0:0").expect("bind"); - if let Err(SendPktsError::IoError(_, num_failed)) = batch_send(&sender, &packet_refs[..]) { - assert_eq!(num_failed, 1); - } - if let Err(SendPktsError::IoError(_, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_eq!(num_failed, 1); - } + let res = batch_send(&sender, &packet_refs[..]); + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); + let res = multi_target_send(&sender, &packets[0], &dest_refs); + assert_matches!(res, Err(SendPktsError::IoError(_, /*num_failed*/ 1))); } #[test] @@ -307,11 +303,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for batch_send @@ -322,11 +319,12 @@ mod tests { (&packets[3][..], &ipv4local), (&packets[4][..], &ipv4broadcast), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } // test consecutive intermediate failures for batch_send @@ -337,11 +335,12 @@ mod tests { (&packets[3][..], &ipv4broadcast), (&packets[4][..], &ipv4local), ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - batch_send(&sender, &packet_refs[..]) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match batch_send(&sender, &packet_refs[..]) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test intermediate failures for multi_target_send @@ -352,11 +351,12 @@ mod tests { &ipv4broadcast, &ipv4local, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 2); + match multi_target_send(&sender, &packets[0], &dest_refs) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 2); + } } // test leading and trailing failures for multi_target_send @@ -367,11 +367,12 @@ mod tests { &ipv4local, &ipv4broadcast, ]; - if let Err(SendPktsError::IoError(ioerror, num_failed)) = - multi_target_send(&sender, &packets[0], &dest_refs) - { - assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); - assert_eq!(num_failed, 3); + match multi_target_send(&sender, &packets[0], &dest_refs) { + Ok(()) => panic!(), + Err(SendPktsError::IoError(ioerror, num_failed)) => { + assert_matches!(ioerror.kind(), ErrorKind::PermissionDenied); + assert_eq!(num_failed, 3); + } } } } diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index c10ad508d61a1c..8219ffa858ec24 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -42,6 +42,7 @@ use { std::{ cmp::{self}, io, + str::FromStr, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -51,6 +52,7 @@ use { }, }; +/// Allocation is a helper (mostly for tests), prefer using TypedAllocation instead when possible. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct Allocation { pub recipient: String, @@ -58,6 +60,14 @@ pub struct Allocation { pub lockup_date: String, } +/// TypedAllocation is same as Allocation but contains typed fields. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct TypedAllocation { + pub recipient: Pubkey, + pub amount: u64, + pub lockup_date: Option>, +} + #[derive(Debug, PartialEq, Eq)] pub enum FundingSource { FeePayer, @@ -98,8 +108,20 @@ type StakeExtras = Vec<(Keypair, Option>)>; pub enum Error { #[error("I/O error")] IoError(#[from] io::Error), + #[error("CSV file seems to be empty")] + CsvIsEmptyError, #[error("CSV error")] CsvError(#[from] csv::Error), + #[error("Bad input data for pubkey: {input}, error: {err}")] + BadInputPubkeyError { + input: String, + err: pubkey::ParsePubkeyError, + }, + #[error("Bad input data for lockup date: {input}, error: {err}")] + BadInputLockupDate { + input: String, + err: chrono::ParseError, + }, #[error("PickleDb error")] PickleDbError(#[from] pickledb::error::Error), #[error("Transport error")] @@ -118,15 +140,15 @@ pub enum Error { ExitSignal, } -fn merge_allocations(allocations: &[Allocation]) -> Vec { +fn merge_allocations(allocations: &[TypedAllocation]) -> Vec { let mut allocation_map = IndexMap::new(); for allocation in allocations { allocation_map .entry(&allocation.recipient) - .or_insert(Allocation { - recipient: allocation.recipient.clone(), + .or_insert(TypedAllocation { + recipient: allocation.recipient, amount: 0, - lockup_date: "".to_string(), + lockup_date: None, }) .amount += allocation.amount; } @@ -134,13 +156,13 @@ fn merge_allocations(allocations: &[Allocation]) -> Vec { } /// Return true if the recipient and lockups are the same -fn has_same_recipient(allocation: &Allocation, transaction_info: &TransactionInfo) -> bool { - allocation.recipient == transaction_info.recipient.to_string() - && allocation.lockup_date.parse().ok() == transaction_info.lockup_date +fn has_same_recipient(allocation: &TypedAllocation, transaction_info: &TransactionInfo) -> bool { + allocation.recipient == transaction_info.recipient + && allocation.lockup_date == transaction_info.lockup_date } fn apply_previous_transactions( - allocations: &mut Vec, + allocations: &mut Vec, transaction_infos: &[TransactionInfo], ) { for transaction_info in transaction_infos { @@ -179,7 +201,7 @@ fn transfer( } fn distribution_instructions( - allocation: &Allocation, + allocation: &TypedAllocation, new_stake_account_address: &Pubkey, args: &DistributeTokensArgs, lockup_date: Option>, @@ -193,7 +215,7 @@ fn distribution_instructions( // No stake args; a simple token transfer. None => { let from = args.sender_keypair.pubkey(); - let to = allocation.recipient.parse().unwrap(); + let to = allocation.recipient; let lamports = allocation.amount; let instruction = system_instruction::transfer(&from, &to, lamports); vec![instruction] @@ -203,7 +225,7 @@ fn distribution_instructions( Some(stake_args) => { let unlocked_sol = stake_args.unlocked_sol; let sender_pubkey = args.sender_keypair.pubkey(); - let recipient = allocation.recipient.parse().unwrap(); + let recipient = allocation.recipient; let mut instructions = match &stake_args.sender_stake_args { // No source stake account, so create a recipient stake account directly. @@ -304,7 +326,7 @@ fn distribution_instructions( fn build_messages( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, messages: &mut Vec, @@ -318,7 +340,7 @@ fn build_messages( let associated_token_addresses = allocation_chunk .iter() .map(|x| { - let wallet_address = x.recipient.parse().unwrap(); + let wallet_address = x.recipient; get_associated_token_address(&wallet_address, &spl_token_args.mint) }) .collect::>(); @@ -333,11 +355,7 @@ fn build_messages( return Err(Error::ExitSignal); } let new_stake_account_keypair = Keypair::new(); - let lockup_date = if allocation.lockup_date.is_empty() { - None - } else { - Some(allocation.lockup_date.parse::>().unwrap()) - }; + let lockup_date = allocation.lockup_date; let do_create_associated_token_account = if let Some(spl_token_args) = &args.spl_token_args { @@ -382,7 +400,7 @@ fn build_messages( fn send_messages( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, messages: Vec, @@ -404,7 +422,7 @@ fn send_messages( signers.push(&*sender_stake_args.stake_authority); signers.push(&*sender_stake_args.withdraw_authority); signers.push(&new_stake_account_keypair); - if !allocation.lockup_date.is_empty() { + if allocation.lockup_date.is_some() { if let Some(lockup_authority) = &sender_stake_args.lockup_authority { signers.push(&**lockup_authority); } else { @@ -435,7 +453,7 @@ fn send_messages( args.stake_args.as_ref().map(|_| &new_stake_account_address); db::set_transaction_info( db, - &allocation.recipient.parse().unwrap(), + &allocation.recipient, allocation.amount, &transaction, new_stake_account_address_option, @@ -455,7 +473,7 @@ fn send_messages( fn distribute_allocations( client: &RpcClient, db: &mut PickleDb, - allocations: &[Allocation], + allocations: &[TypedAllocation], args: &DistributeTokensArgs, exit: Arc, ) -> Result<(), Error> { @@ -490,63 +508,91 @@ fn distribute_allocations( fn read_allocations( input_csv: &str, transfer_amount: Option, - require_lockup_heading: bool, + with_lockup: bool, raw_amount: bool, -) -> io::Result> { +) -> Result, Error> { let mut rdr = ReaderBuilder::new().trim(Trim::All).from_path(input_csv)?; let allocations = if let Some(amount) = transfer_amount { - let recipients: Vec = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|recipient| Allocation { - recipient, - amount, - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let recipient: String = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount, + lockup_date: None, + }) }) - .collect() - } else if require_lockup_heading { - let recipients: Vec<(String, f64, String)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount, lockup_date)| Allocation { - recipient, - amount: sol_to_lamports(amount), - lockup_date, + .collect::, Error>>()? + } else if with_lockup { + // We only support SOL token in "require lockup" mode. + rdr.deserialize() + .map(|recipient| { + let (recipient, amount, lockup_date): (String, f64, String) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + let lockup_date = if !lockup_date.is_empty() { + let lockup_date = lockup_date.parse::>().map_err(|err| { + Error::BadInputLockupDate { + input: lockup_date, + err, + } + })?; + Some(lockup_date) + } else { + // empty lockup date means no lockup, it's okay to have only some lockups specified + None + }; + Ok(TypedAllocation { + recipient, + amount: sol_to_lamports(amount), + lockup_date, + }) }) - .collect() + .collect::, Error>>()? } else if raw_amount { - let recipients: Vec<(String, u64)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount)| Allocation { - recipient, - amount, - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let (recipient, amount): (String, u64) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount, + lockup_date: None, + }) }) - .collect() + .collect::, Error>>()? } else { - let recipients: Vec<(String, f64)> = rdr - .deserialize() - .map(|recipient| recipient.unwrap()) - .collect(); - recipients - .into_iter() - .map(|(recipient, amount)| Allocation { - recipient, - amount: sol_to_lamports(amount), - lockup_date: "".to_string(), + rdr.deserialize() + .map(|recipient| { + let (recipient, amount): (String, f64) = recipient?; + let recipient = + Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { + input: recipient, + err, + })?; + Ok(TypedAllocation { + recipient, + amount: sol_to_lamports(amount), + lockup_date: None, + }) }) - .collect() + .collect::, Error>>()? }; + if allocations.is_empty() { + return Err(Error::CsvIsEmptyError); + } Ok(allocations) } @@ -566,11 +612,11 @@ pub fn process_allocations( args: &DistributeTokensArgs, exit: Arc, ) -> Result, Error> { - let require_lockup_heading = args.stake_args.is_some(); - let mut allocations: Vec = read_allocations( + let with_lockup = args.stake_args.is_some(); + let mut allocations: Vec = read_allocations( &args.input_csv, args.transfer_amount, - require_lockup_heading, + with_lockup, args.spl_token_args.is_some(), )?; @@ -773,7 +819,7 @@ pub fn get_fee_estimate_for_messages( fn check_payer_balances( messages: &[Message], - allocations: &[Allocation], + allocations: &[TypedAllocation], client: &RpcClient, args: &DistributeTokensArgs, ) -> Result<(), Error> { @@ -857,7 +903,7 @@ pub fn process_balances( args: &BalancesArgs, exit: Arc, ) -> Result<(), Error> { - let allocations: Vec = + let allocations: Vec = read_allocations(&args.input_csv, None, false, args.spl_token_args.is_some())?; let allocations = merge_allocations(&allocations); @@ -885,7 +931,7 @@ pub fn process_balances( if let Some(spl_token_args) = &args.spl_token_args { print_token_balances(client, allocation, spl_token_args)?; } else { - let address: Pubkey = allocation.recipient.parse().unwrap(); + let address: Pubkey = allocation.recipient; let expected = lamports_to_sol(allocation.amount); let actual = lamports_to_sol(client.get_balance(&address).unwrap()); println!( @@ -909,9 +955,13 @@ pub fn process_transaction_log(args: &TransactionLogArgs) -> Result<(), Error> { use { crate::db::check_output_file, - solana_sdk::{pubkey::Pubkey, signature::Keypair}, + solana_sdk::{ + pubkey::{self, Pubkey}, + signature::Keypair, + }, tempfile::{tempdir, NamedTempFile}, }; + pub fn test_process_distribute_tokens_with_client( client: &RpcClient, sender_keypair: Keypair, @@ -939,7 +989,7 @@ pub fn test_process_distribute_tokens_with_client( } else { sol_to_lamports(1000.0) }; - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let allocations_file = NamedTempFile::new().unwrap(); let input_csv = allocations_file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(allocations_file); @@ -1039,7 +1089,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: .unwrap(); let expected_amount = sol_to_lamports(1000.0); - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1161,7 +1211,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp .unwrap(); let expected_amount = sol_to_lamports(1000.0); - let alice_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1328,16 +1378,27 @@ mod tests { #[test] fn test_read_allocations() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let allocation = Allocation { - recipient: alice_pubkey.to_string(), + let alice_pubkey = pubkey::new_rand(); + let allocation = TypedAllocation { + recipient: alice_pubkey, amount: 42, - lockup_date: "".to_string(), + lockup_date: None, }; let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); - wtr.serialize(&allocation).unwrap(); + wtr.serialize(( + "recipient".to_string(), + "amount".to_string(), + "require_lockup".to_string(), + )) + .unwrap(); + wtr.serialize(( + allocation.recipient.to_string(), + allocation.amount, + allocation.lockup_date, + )) + .unwrap(); wtr.flush().unwrap(); assert_eq!( @@ -1345,10 +1406,10 @@ mod tests { vec![allocation] ); - let allocation_sol = Allocation { - recipient: alice_pubkey.to_string(), + let allocation_sol = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), + lockup_date: None, }; assert_eq!( @@ -1367,8 +1428,8 @@ mod tests { #[test] fn test_read_allocations_no_lockup() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1379,15 +1440,15 @@ mod tests { wtr.flush().unwrap(); let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), + TypedAllocation { + recipient: pubkey0, amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey1.to_string(), + TypedAllocation { + recipient: pubkey1, amount: sol_to_lamports(43.0), - lockup_date: "".to_string(), + lockup_date: None, }, ]; assert_eq!( @@ -1397,42 +1458,210 @@ mod tests { } #[test] - #[should_panic] fn test_read_allocations_malformed() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); + + // Empty file. let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); + wtr.flush().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); - let mut wtr = csv::WriterBuilder::new().from_writer(file); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvIsEmptyError))); + + // Missing 2nd column. + let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); + wtr.serialize("recipient".to_string()).unwrap(); + wtr.serialize(pubkey0.to_string()).unwrap(); + wtr.serialize(pubkey1.to_string()).unwrap(); + wtr.flush().unwrap(); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + + // Missing 3rd column. + let file = NamedTempFile::new().unwrap(); + let mut wtr = csv::WriterBuilder::new().from_writer(&file); wtr.serialize(("recipient".to_string(), "amount".to_string())) .unwrap(); - wtr.serialize((&pubkey0.to_string(), 42.0)).unwrap(); - wtr.serialize((&pubkey1.to_string(), 43.0)).unwrap(); + wtr.serialize((pubkey0.to_string(), "42.0".to_string())) + .unwrap(); + wtr.serialize((pubkey1.to_string(), "43.0".to_string())) + .unwrap(); wtr.flush().unwrap(); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, true, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + + let generate_csv_file = |header: (String, String, String), + data: Vec<(String, String, String)>, + file: &NamedTempFile| { + let mut wtr = csv::WriterBuilder::new().from_writer(file); + wtr.serialize(header).unwrap(); + wtr.serialize(&data[0]).unwrap(); + wtr.serialize(&data[1]).unwrap(); + wtr.flush().unwrap(); + }; - let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), - amount: sol_to_lamports(42.0), - lockup_date: "".to_string(), - }, - Allocation { - recipient: pubkey1.to_string(), - amount: sol_to_lamports(43.0), - lockup_date: "".to_string(), - }, - ]; - assert_eq!( - read_allocations(&input_csv, None, true, false).unwrap(), - expected_allocations + let default_header = ( + "recipient".to_string(), + "amount".to_string(), + "require_lockup".to_string(), + ); + + // Bad pubkey (default). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42.0".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, false, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with transfer amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42.0".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, Some(123), false, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with require lockup). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "42.0".to_string(), + "2021-02-07T00:00:00Z".to_string(), + ), + ( + "bad pubkey".to_string(), + "43.0".to_string(), + "2021-02-07T00:00:00Z".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, true, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + // Bad pubkey (with raw amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42".to_string(), "".to_string()), + ("bad pubkey".to_string(), "43".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, false, true).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputPubkeyError { input, .. } if input == *"bad pubkey") + ); + + // Bad value in 2nd column (default). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "bad amount".to_string(), + "".to_string(), + ), + ( + pubkey1.to_string(), + "43.0".to_string().to_string(), + "".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + // Bad value in 2nd column (with require lockup). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "bad amount".to_string(), + "".to_string(), + ), + (pubkey1.to_string(), "43.0".to_string(), "".to_string()), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, true, false); + assert!(matches!(got, Err(Error::CsvError(..)))); + // Bad value in 2nd column (with raw amount). + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + (pubkey0.to_string(), "42".to_string(), "".to_string()), + (pubkey1.to_string(), "43.0".to_string(), "".to_string()), // bad raw amount + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got = read_allocations(&input_csv, None, false, true); + assert!(matches!(got, Err(Error::CsvError(..)))); + + // Bad value in 3rd column. + let file = NamedTempFile::new().unwrap(); + generate_csv_file( + default_header.clone(), + vec![ + ( + pubkey0.to_string(), + "42.0".to_string(), + "2021-01-07T00:00:00Z".to_string(), + ), + ( + pubkey1.to_string(), + "43.0".to_string(), + "bad lockup date".to_string(), + ), + ], + &file, + ); + let input_csv = file.path().to_str().unwrap().to_string(); + let got_err = read_allocations(&input_csv, None, true, false).unwrap_err(); + assert!( + matches!(got_err, Error::BadInputLockupDate { input, .. } if input == *"bad lockup date") ); } #[test] fn test_read_allocations_transfer_amount() { - let pubkey0 = solana_sdk::pubkey::new_rand(); - let pubkey1 = solana_sdk::pubkey::new_rand(); - let pubkey2 = solana_sdk::pubkey::new_rand(); + let pubkey0 = pubkey::new_rand(); + let pubkey1 = pubkey::new_rand(); + let pubkey2 = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); let mut wtr = csv::WriterBuilder::new().from_writer(file); @@ -1445,20 +1674,20 @@ mod tests { let amount = sol_to_lamports(1.5); let expected_allocations = vec![ - Allocation { - recipient: pubkey0.to_string(), + TypedAllocation { + recipient: pubkey0, amount, - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey1.to_string(), + TypedAllocation { + recipient: pubkey1, amount, - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: pubkey2.to_string(), + TypedAllocation { + recipient: pubkey2, amount, - lockup_date: "".to_string(), + lockup_date: None, }, ]; assert_eq!( @@ -1469,18 +1698,18 @@ mod tests { #[test] fn test_apply_previous_transactions() { - let alice = solana_sdk::pubkey::new_rand(); - let bob = solana_sdk::pubkey::new_rand(); + let alice = pubkey::new_rand(); + let bob = pubkey::new_rand(); let mut allocations = vec![ - Allocation { - recipient: alice.to_string(), + TypedAllocation { + recipient: alice, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }, - Allocation { - recipient: bob.to_string(), + TypedAllocation { + recipient: bob, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }, ]; let transaction_infos = vec![TransactionInfo { @@ -1493,24 +1722,24 @@ mod tests { // Ensure that we applied the transaction to the allocation with // a matching recipient address (to bob, not alice). - assert_eq!(allocations[0].recipient, alice.to_string()); + assert_eq!(allocations[0].recipient, alice); } #[test] fn test_has_same_recipient() { - let alice_pubkey = solana_sdk::pubkey::new_rand(); - let bob_pubkey = solana_sdk::pubkey::new_rand(); + let alice_pubkey = pubkey::new_rand(); + let bob_pubkey = pubkey::new_rand(); let lockup0 = "2021-01-07T00:00:00Z".to_string(); let lockup1 = "9999-12-31T23:59:59Z".to_string(); - let alice_alloc = Allocation { - recipient: alice_pubkey.to_string(), + let alice_alloc = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; - let alice_alloc_lockup0 = Allocation { - recipient: alice_pubkey.to_string(), + let alice_alloc_lockup0 = TypedAllocation { + recipient: alice_pubkey, amount: sol_to_lamports(1.0), - lockup_date: lockup0.clone(), + lockup_date: lockup0.parse().ok(), }; let alice_info = TransactionInfo { recipient: alice_pubkey, @@ -1550,13 +1779,13 @@ mod tests { #[test] fn test_set_split_stake_lockup() { let lockup_date_str = "2021-01-07T00:00:00Z"; - let allocation = Allocation { - recipient: Pubkey::default().to_string(), + let allocation = TypedAllocation { + recipient: Pubkey::default(), amount: sol_to_lamports(1.002_282_880), - lockup_date: lockup_date_str.to_string(), + lockup_date: lockup_date_str.parse().ok(), }; - let stake_account_address = solana_sdk::pubkey::new_rand(); - let new_stake_account_address = solana_sdk::pubkey::new_rand(); + let stake_account_address = pubkey::new_rand(); + let new_stake_account_address = pubkey::new_rand(); let lockup_authority = Keypair::new(); let lockup_authority_address = lockup_authority.pubkey(); let sender_stake_args = SenderStakeArgs { @@ -1613,12 +1842,12 @@ mod tests { sender_keypair_file: &str, fee_payer: &str, stake_args: Option, - ) -> (Vec, DistributeTokensArgs) { - let recipient = solana_sdk::pubkey::new_rand(); - let allocations = vec![Allocation { - recipient: recipient.to_string(), + ) -> (Vec, DistributeTokensArgs) { + let recipient = pubkey::new_rand(); + let allocations = vec![TypedAllocation { + recipient, amount: allocation_amount, - lockup_date: "".to_string(), + lockup_date: None, }]; let args = DistributeTokensArgs { sender_keypair: read_keypair_file(sender_keypair_file).unwrap().into(), @@ -1890,10 +2119,10 @@ mod tests { // Underfunded stake-account let expensive_allocation_amount = 5000.0; - let expensive_allocations = vec![Allocation { - recipient: solana_sdk::pubkey::new_rand().to_string(), + let expensive_allocations = vec![TypedAllocation { + recipient: pubkey::new_rand(), amount: sol_to_lamports(expensive_allocation_amount), - lockup_date: "".to_string(), + lockup_date: None, }]; let err_result = check_payer_balances( &[one_signer_message(&client)], @@ -2108,10 +2337,10 @@ mod tests { spl_token_args: None, transfer_amount: None, }; - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; let mut messages: Vec = vec![]; @@ -2230,10 +2459,10 @@ mod tests { spl_token_args: None, transfer_amount: None, }; - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; let message = transaction.message.clone(); @@ -2329,10 +2558,10 @@ mod tests { .to_string(); let mut db = db::open_db(&db_file, false).unwrap(); let recipient = Pubkey::new_unique(); - let allocation = Allocation { - recipient: recipient.to_string(), + let allocation = TypedAllocation { + recipient, amount: sol_to_lamports(1.0), - lockup_date: "".to_string(), + lockup_date: None, }; // This is just dummy data; Args will not affect messages let args = DistributeTokensArgs { diff --git a/tokens/src/spl_token.rs b/tokens/src/spl_token.rs index e3d291c5c19dd8..3e998c1a124e8a 100644 --- a/tokens/src/spl_token.rs +++ b/tokens/src/spl_token.rs @@ -1,7 +1,7 @@ use { crate::{ args::{DistributeTokensArgs, SplTokenArgs}, - commands::{get_fee_estimate_for_messages, Allocation, Error, FundingSource}, + commands::{get_fee_estimate_for_messages, Error, FundingSource, TypedAllocation}, }, console::style, solana_account_decoder::parse_token::{real_number_string, real_number_string_trimmed}, @@ -36,12 +36,8 @@ pub fn update_decimals(client: &RpcClient, args: &mut Option) -> R Ok(()) } -pub fn spl_token_amount(amount: f64, decimals: u8) -> u64 { - (amount * 10_usize.pow(decimals as u32) as f64) as u64 -} - -pub fn build_spl_token_instructions( - allocation: &Allocation, +pub(crate) fn build_spl_token_instructions( + allocation: &TypedAllocation, args: &DistributeTokensArgs, do_create_associated_token_account: bool, ) -> Vec { @@ -49,7 +45,7 @@ pub fn build_spl_token_instructions( .spl_token_args .as_ref() .expect("spl_token_args must be some"); - let wallet_address = allocation.recipient.parse().unwrap(); + let wallet_address = allocation.recipient; let associated_token_address = get_associated_token_address(&wallet_address, &spl_token_args.mint); let mut instructions = vec![]; @@ -77,9 +73,9 @@ pub fn build_spl_token_instructions( instructions } -pub fn check_spl_token_balances( +pub(crate) fn check_spl_token_balances( messages: &[Message], - allocations: &[Allocation], + allocations: &[TypedAllocation], client: &RpcClient, args: &DistributeTokensArgs, created_accounts: u64, @@ -114,12 +110,12 @@ pub fn check_spl_token_balances( Ok(()) } -pub fn print_token_balances( +pub(crate) fn print_token_balances( client: &RpcClient, - allocation: &Allocation, + allocation: &TypedAllocation, spl_token_args: &SplTokenArgs, ) -> Result<(), Error> { - let address = allocation.recipient.parse().unwrap(); + let address = allocation.recipient; let expected = allocation.amount; let associated_token_address = get_associated_token_address(&address, &spl_token_args.mint); let recipient_account = client diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index f866747ad81e67..98566dfa24bc48 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -466,9 +466,12 @@ pub fn broadcast_shreds( transmit_stats.shred_select += shred_select.as_us(); let mut send_mmsg_time = Measure::start("send_mmsg"); - if let Err(SendPktsError::IoError(ioerr, num_failed)) = batch_send(s, &packets[..]) { - transmit_stats.dropped_packets_udp += num_failed; - result = Err(Error::Io(ioerr)); + match batch_send(s, &packets[..]) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, num_failed)) => { + transmit_stats.dropped_packets_udp += num_failed; + result = Err(Error::Io(ioerr)); + } } send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 0db4003a079ce8..bae5945aea0e13 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -376,8 +376,11 @@ impl BroadcastRun for BroadcastDuplicatesRun { .flatten() .collect(); - if let Err(SendPktsError::IoError(ioerr, _)) = batch_send(sock, &packets) { - return Err(Error::Io(ioerr)); + match batch_send(sock, &packets) { + Ok(()) => (), + Err(SendPktsError::IoError(ioerr, _)) => { + return Err(Error::Io(ioerr)); + } } Ok(()) } diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 326f409ae32405..a947f212296fb7 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -5,7 +5,8 @@ use { log::error, quinn::{ ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, SendDatagramError, ServerConfig, TokioRuntime, TransportConfig, VarInt, + EndpointConfig, IdleTimeout, SendDatagramError, ServerConfig, TokioRuntime, + TransportConfig, VarInt, }, rcgen::RcgenError, rustls::{Certificate, PrivateKey}, @@ -39,10 +40,17 @@ use { const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 3072; -const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; const ALPN_TURBINE_PROTOCOL_ID: &[u8] = b"solana-turbine"; const CONNECT_SERVER_NAME: &str = "solana-turbine"; +// Transport config. +const DATAGRAM_RECEIVE_BUFFER_SIZE: usize = 256 * 1024 * 1024; +const DATAGRAM_SEND_BUFFER_SIZE: usize = 128 * 1024 * 1024; +const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = MINIMUM_MAXIMUM_TRANSMISSION_UNIT; +const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(4); +const MAX_IDLE_TIMEOUT: Duration = Duration::from_secs(10); +const MINIMUM_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; + const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); const CONNECTION_CLOSE_ERROR_CODE_INVALID_IDENTITY: VarInt = VarInt::from_u32(3); @@ -173,11 +181,18 @@ fn new_client_config(cert: Certificate, key: PrivateKey) -> Result TransportConfig { + let max_idle_timeout = IdleTimeout::try_from(MAX_IDLE_TIMEOUT).unwrap(); let mut config = TransportConfig::default(); config + .datagram_receive_buffer_size(Some(DATAGRAM_RECEIVE_BUFFER_SIZE)) + .datagram_send_buffer_size(DATAGRAM_SEND_BUFFER_SIZE) + .initial_mtu(INITIAL_MAXIMUM_TRANSMISSION_UNIT) + .keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)) .max_concurrent_bidi_streams(VarInt::from(0u8)) .max_concurrent_uni_streams(VarInt::from(0u8)) - .initial_mtu(INITIAL_MAXIMUM_TRANSMISSION_UNIT); + .max_idle_timeout(Some(max_idle_timeout)) + .min_mtu(MINIMUM_MAXIMUM_TRANSMISSION_UNIT) + .mtu_discovery_config(None); config } @@ -420,10 +435,21 @@ async fn send_datagram_task( connection: Connection, mut receiver: AsyncReceiver, ) -> Result<(), Error> { - while let Some(bytes) = receiver.recv().await { - connection.send_datagram(bytes)?; + tokio::pin! { + let connection_closed = connection.closed(); + } + loop { + tokio::select! { + biased; + bytes = receiver.recv() => { + match bytes { + None => return Ok(()), + Some(bytes) => connection.send_datagram(bytes)?, + } + } + err = &mut connection_closed => return Err(Error::from(err)), + } } - Ok(()) } async fn make_connection_task( diff --git a/validator/src/cli.rs b/validator/src/cli.rs index bd82c0a4ac2727..9aa1c466f8e336 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1194,6 +1194,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .help("Debug option to scan all append vecs and verify account index refcounts prior to clean") .hidden(hidden_unless_forced()) ) + .arg( + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help("Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation") + .hidden(hidden_unless_forced()) + ) .arg( Arg::with_name("no_skip_initial_accounts_db_clean") .long("no-skip-initial-accounts-db-clean") diff --git a/validator/src/main.rs b/validator/src/main.rs index 38bb9813ab3a70..bb8fa537b8ecdb 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -22,7 +22,6 @@ use { solana_core::{ banking_trace::DISABLED_BAKING_TRACE_DIR, consensus::tower_storage, - ledger_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, system_monitor_service::SystemMonitorService, tpu::DEFAULT_TPU_COALESCE, validator::{ @@ -32,6 +31,7 @@ use { }, solana_gossip::{cluster_info::Node, legacy_contact_info::LegacyContactInfo as ContactInfo}, solana_ledger::{ + blockstore_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, blockstore_options::{ BlockstoreCompressionType, BlockstoreRecoveryMode, LedgerColumnOptions, ShredStorageType, @@ -1206,6 +1206,8 @@ pub fn main() { .then_some(CreateAncientStorage::Pack) .unwrap_or_default(), test_partitioned_epoch_rewards, + test_skip_rewrites_but_include_in_bank_hash: matches + .is_present("accounts_db_test_skip_rewrites"), ..AccountsDbConfig::default() }; diff --git a/zk-token-sdk/src/encryption/discrete_log.rs b/zk-token-sdk/src/encryption/discrete_log.rs index 7f98918823225a..b3e02a74625b61 100644 --- a/zk-token-sdk/src/encryption/discrete_log.rs +++ b/zk-token-sdk/src/encryption/discrete_log.rs @@ -130,7 +130,7 @@ impl DiscreteLog { &mut self, compression_batch_size: usize, ) -> Result<(), DiscreteLogError> { - if compression_batch_size >= TWO16 as usize { + if compression_batch_size >= TWO16 as usize || compression_batch_size == 0 { return Err(DiscreteLogError::DiscreteLogBatchSize); } self.compression_batch_size = compression_batch_size;