diff --git a/.buildkite/scripts/build-bench.sh b/.buildkite/scripts/build-bench.sh index d1ad80389107eb..a19e4291bc1426 100755 --- a/.buildkite/scripts/build-bench.sh +++ b/.buildkite/scripts/build-bench.sh @@ -22,5 +22,5 @@ EOF # shellcheck disable=SC2016 group "bench" \ - "$(build_steps "bench-part-1" "ci/bench/part1.sh")" \ - "$(build_steps "bench-part-2" "ci/bench/part2.sh")" + "$(build_steps "bench-part-1" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part1.sh")" \ + "$(build_steps "bench-part-2" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part2.sh")" diff --git a/.github/scripts/cargo-clippy-before-script.sh b/.github/scripts/cargo-clippy-before-script.sh new file mode 100755 index 00000000000000..b9426203aa6ffc --- /dev/null +++ b/.github/scripts/cargo-clippy-before-script.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -e + +os_name="$1" + +case "$os_name" in +"Windows") + ;; +"macOS") + brew install protobuf + ;; +"Linux") ;; +*) + echo "Unknown Operating System" + ;; +esac diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml new file mode 100644 index 00000000000000..54566f705cb160 --- /dev/null +++ b/.github/workflows/cargo.yml @@ -0,0 +1,50 @@ +name: Cargo + +on: + push: + branches: + - master + - v[0-9]+.[0-9]+ + pull_request: + branches: + - master + - v[0-9]+.[0-9]+ + paths: + - "**.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - ".github/scripts/cargo-clippy-before-script.sh" + - ".github/workflows/cargo.yml" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + SHELL: /bin/bash + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + +jobs: + clippy: + strategy: + matrix: + os: + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + + - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - shell: bash + run: .github/scripts/cargo-clippy-before-script.sh ${{ runner.os }} + + - shell: bash + run: | + source ci/rust-version.sh all + rustup component add clippy --toolchain "$rust_stable" + rustup component add clippy --toolchain "$rust_nightly" + scripts/cargo-clippy.sh diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml new file mode 100644 index 00000000000000..c150beef37981b --- /dev/null +++ b/.github/workflows/downstream-project-anchor.yml @@ -0,0 +1,53 @@ +name: Downstream Project - Anchor + +on: + push: + branches: + - master + - v[0-9]+.[0-9]+ + pull_request: + branches: + - master + - v[0-9]+.[0-9]+ + paths: + - "**.rs" + - "Cargo.toml" + - "Cargo.lock" + - "cargo-build-bpf" + - "cargo-test-bpf" + - "cargo-build-sbf" + - "cargo-test-sbf" + - ".github/workflows/downstream-project-anchor.yml" + workflow_call: + inputs: + branch: + required: false + type: string + default: "master" + +env: + SHELL: /bin/bash + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + version: ["v0.29.0"] + steps: + - uses: actions/checkout@v3 + + - shell: bash + run: | + .github/scripts/purge-ubuntu-runner.sh + + - uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - shell: bash + run: | + source .github/scripts/downstream-project-spl-install-deps.sh + ./scripts/build-downstream-anchor-projects.sh ${{ matrix.version }} diff --git a/.mergify.yml b/.mergify.yml index ab81476816764c..0c1e3f3019b094 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -58,6 +58,22 @@ pull_request_rules: # only require docs checks if docs files changed - -files~=^docs/ - status-success=build & deploy docs + - or: + - -files~=(\.rs|Cargo\.toml|Cargo\.lock|\.github/scripts/cargo-clippy-before-script\.sh|\.github/workflows/cargo\.yml)$ + - check-success=clippy (macos-latest) + - or: + - -files~=(\.rs|Cargo\.toml|Cargo\.lock|cargo-build-bpf|cargo-test-bpf|cargo-build-sbf|cargo-test-sbf|ci/downstream-projects/run-spl\.sh|\.github/workflows/downstream-project-spl\.yml)$ + - and: + - status-success=cargo-test-sbf (token/program) + - status-success=cargo-test-sbf (instruction-padding/program, token/program-2022, token/program-2022-test) + - status-success=cargo-test-sbf (associated-token-account/program, associated-token-account/program-test) + - status-success=cargo-test-sbf (token-upgrade/program) + - status-success=cargo-test-sbf (feature-proposal/program) + - status-success=cargo-test-sbf (governance/addin-mock/program, governance/program) + - status-success=cargo-test-sbf (memo/program) + - status-success=cargo-test-sbf (name-service/program) + - status-success=cargo-test-sbf (stake-pool/program) + - status-success=cargo-test-sbf (single-pool/program) actions: merge: method: squash @@ -98,6 +114,19 @@ pull_request_rules: ignore_conflicts: true branches: - v1.16 + - name: v1.16 backport warning comment + conditions: + - label=v1.16 + actions: + comment: + message: > + Backports to the stable branch are to be avoided unless absolutely + necessary for fixing bugs, security issues, and perf regressions. + Changes intended for backport should be structured such that a + minimum effective diff can be committed separately from any + refactoring, plumbing, cleanup, etc that are not strictly + necessary to achieve the goal. Any of the latter should go only + into master and ride the normal stabilization schedule. - name: v1.17 feature-gate backport conditions: - label=v1.17 @@ -122,6 +151,21 @@ pull_request_rules: ignore_conflicts: true branches: - v1.17 + - name: v1.17 backport warning comment + conditions: + - label=v1.17 + actions: + comment: + message: > + Backports to the beta branch are to be avoided unless absolutely + necessary for fixing bugs, security issues, and perf regressions. + Changes intended for backport should be structured such that a + minimum effective diff can be committed separately from any + refactoring, plumbing, cleanup, etc that are not strictly + necessary to achieve the goal. Any of the latter should go only + into master and ride the normal stabilization schedule. Exceptions + include CI/metrics changes, CLI improvements and documentation + updates on a case by case basis. commands_restrictions: # The author of copied PRs is the Mergify user. diff --git a/CHANGELOG.md b/CHANGELOG.md index fb2eb5bf50061f..4e568de857c674 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,10 +16,15 @@ Release channels have their own copy of this changelog: * Changes * Added a github check to support `changelog` label * The default for `--use-snapshot-archives-at-startup` is now `when-newest` (#33883) + * The default for `solana-ledger-tool`, however, remains `always` (#34228) + * Added `central-scheduler` option for `--block-production-method` (#33890) + * Updated to Borsh v1 * Added allow_commission_decrease_at_any_time feature which will allow commission on a vote account to be decreased even in the second half of epochs when the commission_updates_only_allowed_in_first_half_of_epoch feature would have prevented it * Upgrade Notes + * `solana-program` and `solana-sdk` default to support for Borsh v1, with +limited backward compatibility for v0.10 and v0.9. Please upgrade to Borsh v1. ## [1.17.0] * Changes diff --git a/Cargo.lock b/Cargo.lock index 5f4b7160f6b8d0..496820cb230467 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -497,7 +497,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustversion", "serde", @@ -726,6 +726,16 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "borsh" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" +dependencies = [ + "borsh-derive 1.2.1", + "cfg_aliases", +] + [[package]] name = "borsh-derive" version = "0.9.3" @@ -752,6 +762,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b41ff04256c5c8330f3dfdaaae2a5cc976a8e75088bafa4625b0d0208de8c" +dependencies = [ + "once_cell", + "proc-macro-crate 2.0.0", + "proc-macro2", + "quote", + "syn 2.0.39", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" version = "0.9.3" @@ -1029,6 +1053,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -1530,7 +1560,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core 0.9.8", @@ -1997,11 +2027,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -2012,9 +2042,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5fd9bcbe8b1087cbd395b51498c01bc997cef73e778a80b77a811af5e2d29f" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", ] @@ -2341,9 +2371,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "headers" @@ -2598,9 +2628,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2670,7 +2700,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.3", "rayon", ] @@ -2739,9 +2769,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -3488,7 +3518,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 1.1.0", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.39", @@ -3553,9 +3583,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.59" +version = "0.10.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3594,9 +3624,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" dependencies = [ "cc", "libc", @@ -3618,7 +3648,7 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "rand 0.8.5", "thiserror", @@ -3762,9 +3792,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "percentage" @@ -4003,9 +4033,9 @@ dependencies = [ [[package]] name = "prio-graph" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78dd2fa9ca0901b4d0dbf51d9862d7e3fb004605e4f4b4132472c3d08e7d901b" +checksum = "952091df80157ff6f267c9bcb6ad68e42405e217bd83268f2aedee0aa4f03b5c" [[package]] name = "proc-macro-crate" @@ -4026,6 +4056,15 @@ dependencies = [ "toml 0.5.8", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4052,9 +4091,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] @@ -4156,7 +4195,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -4352,9 +4391,9 @@ dependencies = [ [[package]] name = "raptorq" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655b020bbf5c89791160a30f0d4706d8ec7aa5718d6a198f6df19c400e4f4470" +checksum = "6c9cf9270cc5903afdef387f06ef1cd89fb77f45c357c2a425bae78b839fd866" [[package]] name = "rayon" @@ -4512,7 +4551,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustls", "rustls-pemfile 1.0.0", @@ -4525,7 +4564,7 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.1", "tower-service", - "url 2.4.1", + "url 2.5.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4656,9 +4695,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.3", @@ -4846,9 +4885,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -4864,9 +4903,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -5395,7 +5434,7 @@ dependencies = [ name = "solana-banks-client" version = "1.18.0" dependencies = [ - "borsh 0.10.3", + "borsh 1.2.1", "futures 0.3.29", "solana-banks-interface", "solana-banks-server", @@ -5521,6 +5560,7 @@ dependencies = [ "solana-sdk", "solana-zk-token-sdk", "solana_rbpf", + "test-case", "thiserror", ] @@ -5641,7 +5681,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5659,7 +5699,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5727,7 +5767,7 @@ dependencies = [ "serde_yaml 0.9.27", "solana-clap-utils", "solana-sdk", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -6257,7 +6297,7 @@ dependencies = [ "solana-version", "tar", "tempfile", - "url 2.4.1", + "url 2.5.0", "winapi 0.3.9", "winreg", ] @@ -6547,7 +6587,7 @@ dependencies = [ "solana-sdk", "solana-version", "tokio", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -6651,6 +6691,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", + "borsh 1.2.1", "bs58", "bv", "bytemuck", @@ -6770,7 +6811,7 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -7073,6 +7114,7 @@ dependencies = [ "log", "rand 0.8.5", "rustc_version 0.4.0", + "solana-program", "solana-program-runtime", "solana-sdk", "thiserror", @@ -7087,7 +7129,7 @@ dependencies = [ "base64 0.21.5", "bincode", "bitflags 2.4.1", - "borsh 0.10.3", + "borsh 1.2.1", "bs58", "bytemuck", "byteorder", @@ -8071,6 +8113,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -8223,9 +8277,9 @@ checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" [[package]] name = "test-case" -version = "3.2.1" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ "test-case-macros", ] @@ -8540,7 +8594,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.21.0", ] [[package]] @@ -8552,6 +8606,17 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.0" @@ -8583,7 +8648,7 @@ dependencies = [ "http-body", "hyper", "hyper-timeout", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "prost", "rustls-pemfile 1.0.0", @@ -8725,7 +8790,7 @@ dependencies = [ "rustls", "sha1", "thiserror", - "url 2.4.1", + "url 2.5.0", "utf-8", "webpki-roots 0.24.0", ] @@ -8850,13 +8915,13 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", - "percent-encoding 2.3.0", + "idna 0.5.0", + "percent-encoding 2.3.1", ] [[package]] @@ -8939,9 +9004,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8949,9 +9014,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", @@ -8976,9 +9041,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8986,9 +9051,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", @@ -8999,9 +9064,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" diff --git a/Cargo.toml b/Cargo.toml index d8f3fe61dc54c2..631173aa56de59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -155,7 +155,7 @@ bincode = "1.3.3" bitflags = { version = "2.3.3", features = ["serde"] } blake3 = "1.5.0" block-buffer = "0.10.4" -borsh = "0.10.3" +borsh = { version = "1.2.1", features = ["derive", "unstable__schema"] } bs58 = "0.4.0" bv = "0.11.1" byte-unit = "4.0.19" @@ -199,7 +199,7 @@ fast-math = "0.1" fd-lock = "3.0.13" flate2 = "1.0.28" fnv = "1.0.7" -fs-err = "2.10.0" +fs-err = "2.11.0" fs_extra = "1.3.0" futures = "0.3.29" futures-util = "0.3.29" @@ -224,7 +224,7 @@ itertools = "0.10.5" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.65" +js-sys = "0.3.66" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" @@ -263,8 +263,8 @@ pickledb = { version = "0.5.1", default-features = false } pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" -prio-graph = "0.1.0" -proc-macro2 = "1.0.69" +prio-graph = "0.2.0" +proc-macro2 = "1.0.70" proptest = "1.4" prost = "0.11.9" prost-build = "0.11.9" @@ -277,7 +277,7 @@ quinn-proto = "0.10.6" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" -raptorq = "1.7.0" +raptorq = "1.8.0" rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" @@ -286,12 +286,12 @@ reqwest = { version = "0.11.22", default-features = false } rolling-file = "0.2.0" rpassword = "7.3" rustc_version = "0.4" -rustls = { version = "0.21.8", default-features = false, features = ["quic"] } +rustls = { version = "0.21.9", default-features = false, features = ["quic"] } rustversion = "1.0.14" scopeguard = "1.2.0" semver = "1.0.20" seqlock = "0.2.0" -serde = "1.0.192" +serde = "1.0.193" serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.108" @@ -405,7 +405,7 @@ systemstat = "0.2.3" tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.8.1" -test-case = "3.2.1" +test-case = "3.3.1" thiserror = "1.0.50" tiny-bip39 = "0.8.2" tokio = "1.29.1" @@ -419,7 +419,7 @@ tonic-build = "0.9.2" trees = "0.4.2" tungstenite = "0.20.1" uriparse = "0.6.4" -url = "2.4.1" +url = "2.5.0" wasm-bindgen = "0.2" winapi = "0.3.8" winreg = "0.50" @@ -442,16 +442,18 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # * spl-token-2022 # * spl-token-metadata-interface # -# They, in turn, depend on a number of crates that we also include directly using `path` -# specifications. For example, `spl-token` depends on `solana-program`. And we explicitly specify -# `solana-program` above as a local path dependency: +# They, in turn, depend on a number of crates that we also include directly +# using `path` specifications. For example, `spl-token` depends on +# `solana-program`. And we explicitly specify `solana-program` above as a local +# path dependency: # # solana-program = { path = "../../sdk/program", version = "=1.16.0" } # -# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` dependency only using -# what is available on crates.io. Crates.io normally contains a previous version of these crates, -# and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their -# dependencies in our build tree. +# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` +# dependency only using what is available on crates.io. Crates.io normally +# contains a previous version of these crates, and we end up with two versions +# of `solana-program` and `solana-zk-token-sdk` and all of their dependencies in +# our build tree. # # If you are developing downstream using non-crates-io solana-program (local or # forked repo, or from github rev, eg), duplicate the following patch statements @@ -460,8 +462,8 @@ crossbeam-epoch = { git = "https://github.com/solana-labs/crossbeam", rev = "fd2 # -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock # file. # -# There is a similar override in `programs/sbf/Cargo.toml`. Please keep both comments and the -# overrides in sync. +# There is a similar override in `programs/sbf/Cargo.toml`. Please keep both +# comments and the overrides in sync. solana-program = { path = "sdk/program" } solana-zk-token-sdk = { path = "zk-token-sdk" } # @@ -481,9 +483,8 @@ solana-zk-token-sdk = { path = "zk-token-sdk" } # newer versions, but we have not updated yet. As we update, we need to remove # these patch requests. # -# When our dependencies are upgraded, we can remove this patches. Before that -# we might need to maintain these patches in sync with our full dependency -# tree. +# When our dependencies are upgraded, we can remove these patches. Before that +# we might need to maintain these patches in sync with our full dependency tree. # Our dependency tree has `aes-gcm-siv` v0.10.3 and the `zeroize` restriction # was removed in the next commit just after the release. So it seems safe to @@ -506,17 +507,17 @@ git = "https://github.com/RustCrypto/AEADs" rev = "6105d7a5591aefa646a95d12b5e8d3f55a9214ef" # Our dependency tree has `curve25519-dalek` v3.2.1. They have removed the -# constrain in the next major release. Commit that removes `zeroize` constrain -# was added to multiple release branches. Bot not to the 3.2 branch. +# constraint in the next major release. The commit that removes the `zeroize` +# constraint was added to multiple release branches, but not to the 3.2 branch. # # `curve25519-dalek` maintainers are saying they do not want to invest any more # time in the 3.2 release: # # https://github.com/dalek-cryptography/curve25519-dalek/issues/452#issuecomment-1749809428 # -# So we have to fork and create our own release, based on v3.2.1. Commit that -# removed `zeroize` constrain on the `main` branch cherry picked on top of the -# v3.2.1 release. +# So we have to fork and create our own release, based on v3.2.1, with the +# commit that removed `zeroize` constraint on the `main` branch cherry-picked on +# top. # # `curve25519-dalek` v3.2.1 release: # diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 8807020d2f17a7..54a455753831fd 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -34,6 +34,7 @@ spl-token = { workspace = true, features = ["no-entrypoint"] } [dev-dependencies] solana-core = { workspace = true } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } [package.metadata.docs.rs] diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index 7178d62dd6c1c5..e7a33b711d23ca 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -75,7 +75,10 @@ impl AccountStorage { /// return the append vec for 'slot' if it exists /// This is only ever called when shrink is not possibly running and there is a max of 1 append vec per slot. pub fn get_slot_storage_entry(&self, slot: Slot) -> Option> { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); self.get_slot_storage_entry_shrinking_in_progress_ok(slot) } @@ -95,7 +98,10 @@ impl AccountStorage { /// returns true if there is no entry for 'slot' #[cfg(test)] pub(crate) fn is_empty_entry(&self, slot: Slot) -> bool { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); self.map.get(&slot).is_none() } @@ -124,7 +130,10 @@ impl AccountStorage { } pub(crate) fn insert(&self, slot: Slot, store: Arc) { - assert!(self.no_shrink_in_progress()); + assert!( + self.no_shrink_in_progress(), + "self.no_shrink_in_progress(): {slot}" + ); assert!(self .map .insert( diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 0e76c7a716a023..145fe52b19883a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -57,14 +57,6 @@ pub struct AccountLocks { readonly_locks: HashMap, } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum RewardInterval { - /// the slot within the epoch is INSIDE the reward distribution interval - InsideInterval, - /// the slot within the epoch is OUTSIDE the reward distribution interval - OutsideInterval, -} - impl AccountLocks { fn is_locked_readonly(&self, key: &Pubkey) -> bool { self.readonly_locks diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f22d5a4e4320a0..60e3827e4af46a 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -31,7 +31,9 @@ use { AccountStorage, AccountStorageStatus, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, - accounts_file::{AccountsFile, AccountsFileError}, + accounts_file::{ + AccountsFile, AccountsFileError, MatchAccountOwnerError, ALIGN_BOUNDARY_OFFSET, + }, accounts_hash::{ AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash, @@ -54,8 +56,7 @@ use { get_ancient_append_vec_capacity, is_ancient, AccountsToStore, StorageSelector, }, append_vec::{ - aligned_stored_size, AppendVec, MatchAccountOwnerError, APPEND_VEC_MMAPPED_FILES_OPEN, - STORE_META_OVERHEAD, + aligned_stored_size, AppendVec, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD, }, cache_hash_data::{CacheHashData, CacheHashDataFileReference}, contains::Contains, @@ -67,6 +68,7 @@ use { rent_collector::RentCollector, sorted_storages::SortedStorages, storable_accounts::StorableAccounts, + u64_align, verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, blake3::traits::digest::Digest, @@ -75,6 +77,7 @@ use { log::*, rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, + seqlock::SeqLock, serde::{Deserialize, Serialize}, smallvec::SmallVec, solana_measure::{measure::Measure, measure_us}, @@ -87,7 +90,6 @@ use { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, - rent::Rent, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, @@ -100,7 +102,6 @@ use { io::Result as IoResult, ops::{Range, RangeBounds}, path::{Path, PathBuf}, - str::FromStr, sync::{ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Condvar, Mutex, RwLock, @@ -337,13 +338,17 @@ impl CurrentAncientAppendVec { } } + /// Create ancient append vec for a slot + /// min_bytes: the new append vec needs to have at least this capacity #[must_use] fn create_ancient_append_vec<'a>( &mut self, slot: Slot, db: &'a AccountsDb, + min_bytes: usize, ) -> ShrinkInProgress<'a> { - let shrink_in_progress = db.get_store_for_shrink(slot, get_ancient_append_vec_capacity()); + let size = get_ancient_append_vec_capacity().max(min_bytes as u64); + let shrink_in_progress = db.get_store_for_shrink(slot, size); *self = Self::new(slot, Arc::clone(shrink_in_progress.new_storage())); shrink_in_progress } @@ -352,9 +357,10 @@ impl CurrentAncientAppendVec { &mut self, slot: Slot, db: &'a AccountsDb, + min_bytes: usize, ) -> Option> { if self.slot_and_append_vec.is_none() { - Some(self.create_ancient_append_vec(slot, db)) + Some(self.create_ancient_append_vec(slot, db, min_bytes)) } else { None } @@ -371,21 +377,31 @@ impl CurrentAncientAppendVec { } /// helper function to cleanup call to 'store_accounts_frozen' + /// return timing and bytes written fn store_ancient_accounts( &self, db: &AccountsDb, accounts_to_store: &AccountsToStore, storage_selector: StorageSelector, - ) -> StoreAccountsTiming { + ) -> (StoreAccountsTiming, u64) { let accounts = accounts_to_store.get(storage_selector); - db.store_accounts_frozen( - (self.slot(), accounts, accounts_to_store.slot), + let previous_available = self.append_vec().accounts.remaining_bytes(); + let timing = db.store_accounts_frozen( + (self.slot(), accounts, accounts_to_store.slot()), None::>, self.append_vec(), None, StoreReclaims::Ignore, - ) + ); + let bytes_written = + previous_available.saturating_sub(self.append_vec().accounts.remaining_bytes()); + assert_eq!( + bytes_written, + u64_align!(accounts_to_store.get_bytes(storage_selector)) as u64 + ); + + (timing, bytes_written) } } @@ -475,7 +491,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -488,7 +503,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -522,26 +536,6 @@ pub struct AccountsAddRootTiming { pub store_us: u64, } -#[derive(Debug, Clone, Copy)] -pub struct FillerAccountsConfig { - /// Number of filler accounts - pub count: usize, - /// Data size per account, in bytes - pub size: usize, -} - -impl FillerAccountsConfig { - pub const fn const_default() -> Self { - Self { count: 0, size: 0 } - } -} - -impl Default for FillerAccountsConfig { - fn default() -> Self { - Self::const_default() - } -} - const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option = Some(-10_000); #[derive(Debug, Default, Clone)] @@ -550,7 +544,6 @@ pub struct AccountsDbConfig { /// Base directory for various necessary files pub base_working_path: Option, pub accounts_hash_cache_path: Option, - pub filler_accounts_config: FillerAccountsConfig, pub write_cache_limit_bytes: Option, /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) @@ -1020,7 +1013,7 @@ pub struct AccountStorageEntry { /// any accounts in it /// status corresponding to the storage, lets us know that /// the append_vec, once maxed out, then emptied, can be reclaimed - count_and_status: RwLock<(usize, AccountStorageStatus)>, + count_and_status: SeqLock<(usize, AccountStorageStatus)>, /// This is the total number of accounts stored ever since initialized to keep /// track of lifetime count of all store operations. And this differs from @@ -1043,7 +1036,7 @@ impl AccountStorageEntry { id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, - count_and_status: RwLock::new((0, AccountStorageStatus::Available)), + count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(0), alive_bytes: AtomicUsize::new(0), } @@ -1059,14 +1052,14 @@ impl AccountStorageEntry { id: AtomicAppendVecId::new(id), slot: AtomicU64::new(slot), accounts, - count_and_status: RwLock::new((0, AccountStorageStatus::Available)), + count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), approx_store_count: AtomicUsize::new(num_accounts), alive_bytes: AtomicUsize::new(0), } } pub fn set_status(&self, mut status: AccountStorageStatus) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let count = count_and_status.0; @@ -1087,7 +1080,7 @@ impl AccountStorageEntry { } pub fn recycle(&self, slot: Slot, id: AppendVecId) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); self.accounts.reset(); *count_and_status = (0, AccountStorageStatus::Available); self.slot.store(slot, Ordering::Release); @@ -1097,11 +1090,11 @@ impl AccountStorageEntry { } pub fn status(&self) -> AccountStorageStatus { - self.count_and_status.read().unwrap().1 + self.count_and_status.read().1 } pub fn count(&self) -> usize { - self.count_and_status.read().unwrap().0 + self.count_and_status.read().0 } pub fn approx_stored_count(&self) -> usize { @@ -1141,14 +1134,14 @@ impl AccountStorageEntry { } fn add_account(&self, num_bytes: usize) { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); *count_and_status = (count_and_status.0 + 1, count_and_status.1); self.approx_store_count.fetch_add(1, Ordering::Relaxed); self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst); } fn try_available(&self) -> bool { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let (count, status) = *count_and_status; if status == AccountStorageStatus::Available { @@ -1164,7 +1157,7 @@ impl AccountStorageEntry { } fn remove_account(&self, num_bytes: usize, reset_accounts: bool) -> usize { - let mut count_and_status = self.count_and_status.write().unwrap(); + let mut count_and_status = self.count_and_status.lock_write(); let (mut count, mut status) = *count_and_status; if count == 1 && status == AccountStorageStatus::Full && reset_accounts { @@ -1537,17 +1530,8 @@ pub struct AccountsDb { /// GeyserPlugin accounts update notifier accounts_update_notifier: Option, - filler_accounts_config: FillerAccountsConfig, - pub filler_account_suffix: Option, - pub(crate) active_stats: ActiveStats, - /// number of filler accounts to add for each slot - pub filler_accounts_per_slot: AtomicU64, - - /// number of slots remaining where filler accounts should be added - pub filler_account_slots_remaining: AtomicU64, - pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground, /// Used to disable logging dead slots during removal. @@ -2385,7 +2369,6 @@ struct ScanState<'a> { bin_range: &'a Range, config: &'a CalcAccountsHashConfig<'a>, mismatch_found: Arc, - filler_account_suffix: Option<&'a Pubkey>, range: usize, sort_time: Arc, pubkey_to_bin_index: usize, @@ -2415,9 +2398,7 @@ impl<'a> AppendVecScan for ScanState<'a> { let mut loaded_hash = loaded_account.loaded_hash(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (self.config.check_hash || hash_is_missing) - && !AccountsDb::is_filler_account_helper(pubkey, self.filler_account_suffix) - { + if self.config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { loaded_hash = computed_hash; @@ -2498,8 +2479,6 @@ impl AccountsDb { AccountsDb { create_ancient_storage: CreateAncientStorage::Pack, verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(), - filler_accounts_per_slot: AtomicU64::default(), - filler_account_slots_remaining: AtomicU64::default(), active_stats: ActiveStats::default(), skip_initial_hash_calc: false, ancient_append_vec_offset: None, @@ -2552,8 +2531,6 @@ impl AccountsDb { dirty_stores: DashMap::default(), zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(), accounts_update_notifier: None, - filler_accounts_config: FillerAccountsConfig::default(), - filler_account_suffix: None, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), @@ -2605,10 +2582,6 @@ impl AccountsDb { let accounts_hash_cache_path = accounts_db_config .as_ref() .and_then(|config| config.accounts_hash_cache_path.clone()); - let filler_accounts_config = accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config) - .unwrap_or_default(); let skip_initial_hash_calc = accounts_db_config .as_ref() .map(|config| config.skip_initial_hash_calc) @@ -2642,11 +2615,6 @@ impl AccountsDb { let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); - let filler_account_suffix = if filler_accounts_config.count > 0 { - Some(solana_sdk::pubkey::new_rand()) - } else { - None - }; let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -2656,8 +2624,6 @@ impl AccountsDb { account_indexes, shrink_ratio, accounts_update_notifier, - filler_accounts_config, - filler_account_suffix, create_ancient_storage, write_cache_limit_bytes: accounts_db_config .as_ref() @@ -2689,20 +2655,6 @@ impl AccountsDb { new } - /// Gradual means filler accounts will be added over the course of an epoch, during cache flush. - /// This is in contrast to adding all the filler accounts immediately before the validator starts. - fn init_gradual_filler_accounts(&self, slots_per_epoch: Slot) { - let count = self.filler_accounts_config.count; - if count > 0 { - // filler accounts are a debug only feature. integer division is fine here - let accounts_per_slot = (count as u64) / slots_per_epoch; - self.filler_accounts_per_slot - .store(accounts_per_slot, Ordering::Release); - self.filler_account_slots_remaining - .store(slots_per_epoch, Ordering::Release); - } - } - pub fn set_shrink_paths(&self, paths: Vec) { assert!(!paths.is_empty()); let mut shrink_paths = self.shrink_paths.write().unwrap(); @@ -4422,15 +4374,6 @@ impl AccountsDb { .get_all_less_than(slot) } - fn get_prior_root(&self, slot: Slot) -> Option { - self.accounts_index - .roots_tracker - .read() - .unwrap() - .alive_roots - .get_prior(slot) - } - /// return all slots that are more than one epoch old and thus could already be an ancient append vec /// or which could need to be combined into a new or existing ancient append vec /// offset is used to combine newer slots than we normally would. This is designed to be used for testing. @@ -4663,8 +4606,10 @@ impl AccountsDb { } let mut stats_sub = ShrinkStatsSub::default(); - let (mut shrink_in_progress, create_and_insert_store_elapsed_us) = - measure_us!(current_ancient.create_if_necessary(slot, self)); + let mut bytes_remaining_to_write = shrink_collect.alive_total_bytes; + let (mut shrink_in_progress, create_and_insert_store_elapsed_us) = measure_us!( + current_ancient.create_if_necessary(slot, self, shrink_collect.alive_total_bytes) + ); stats_sub.create_and_insert_store_elapsed_us = create_and_insert_store_elapsed_us; let available_bytes = current_ancient.append_vec().accounts.remaining_bytes(); // split accounts in 'slot' into: @@ -4686,8 +4631,10 @@ impl AccountsDb { let mut rewrite_elapsed = Measure::start("rewrite_elapsed"); // write what we can to the current ancient storage - stats_sub.store_accounts_timing = + let (store_accounts_timing, bytes_written) = current_ancient.store_ancient_accounts(self, &to_store, StorageSelector::Primary); + stats_sub.store_accounts_timing = store_accounts_timing; + bytes_remaining_to_write = bytes_remaining_to_write.saturating_sub(bytes_written as usize); // handle accounts from 'slot' which did not fit into the current ancient append vec if to_store.has_overflow() { @@ -4695,8 +4642,14 @@ impl AccountsDb { // Assert: it cannot be the case that we already had an ancient append vec at this slot and // yet that ancient append vec does not have room for the accounts stored at this slot currently assert_ne!(slot, current_ancient.slot()); - let (shrink_in_progress_overflow, time_us) = - measure_us!(current_ancient.create_ancient_append_vec(slot, self)); + + // Now we create an ancient append vec at `slot` to store the overflows. + let (shrink_in_progress_overflow, time_us) = measure_us!(current_ancient + .create_ancient_append_vec( + slot, + self, + to_store.get_bytes(StorageSelector::Overflow) + )); stats_sub.create_and_insert_store_elapsed_us += time_us; // We cannot possibly be shrinking the original slot that created an ancient append vec // AND not have enough room in the ancient append vec at that slot @@ -4708,10 +4661,16 @@ impl AccountsDb { shrink_in_progress = Some(shrink_in_progress_overflow); // write the overflow accounts to the next ancient storage - let timing = + let (store_accounts_timing, bytes_written) = current_ancient.store_ancient_accounts(self, &to_store, StorageSelector::Overflow); - stats_sub.store_accounts_timing.accumulate(&timing); + bytes_remaining_to_write = + bytes_remaining_to_write.saturating_sub(bytes_written as usize); + + stats_sub + .store_accounts_timing + .accumulate(&store_accounts_timing); } + assert_eq!(bytes_remaining_to_write, 0); rewrite_elapsed.stop(); stats_sub.rewrite_elapsed_us = rewrite_elapsed.as_us(); @@ -5752,7 +5711,7 @@ impl AccountsDb { fn has_space_available(&self, slot: Slot, size: u64) -> bool { let store = self.storage.get_slot_storage_entry(slot).unwrap(); if store.status() == AccountStorageStatus::Available - && (store.accounts.capacity() - store.accounts.len() as u64) > size + && store.accounts.remaining_bytes() >= size { return true; } @@ -6310,6 +6269,14 @@ impl AccountsDb { .unwrap_or_default(); let data_len = (data_len + STORE_META_OVERHEAD) as u64; if !self.has_space_available(slot, data_len) { + info!( + "write_accounts_to_storage, no space: {}, {}, {}, {}, {}", + storage.accounts.capacity(), + storage.accounts.remaining_bytes(), + data_len, + infos.len(), + accounts_and_meta_to_store.len() + ); let special_store_size = std::cmp::max(data_len * 2, self.file_size); if self .try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX) @@ -6568,30 +6535,6 @@ impl AccountsDb { } } - let mut filler_accounts = 0; - if self.filler_accounts_enabled() { - let slots_remaining = self.filler_account_slots_remaining.load(Ordering::Acquire); - if slots_remaining > 0 { - // figure out - let pr = self.get_prior_root(slot); - - if let Some(prior_root) = pr { - let filler_account_slots = - std::cmp::min(slot.saturating_sub(prior_root), slots_remaining); - self.filler_account_slots_remaining - .fetch_sub(filler_account_slots, Ordering::Release); - let filler_accounts_per_slot = - self.filler_accounts_per_slot.load(Ordering::Acquire); - filler_accounts = filler_account_slots * filler_accounts_per_slot; - - // keep space for filler accounts - let addl_size = filler_accounts - * (aligned_stored_size(self.filler_accounts_config.size) as u64); - total_size += addl_size; - } - } - } - let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items .iter() .filter_map(|iter_item| { @@ -6641,25 +6584,6 @@ impl AccountsDb { StoreReclaims::Default, ); - if filler_accounts > 0 { - // add extra filler accounts at the end of the append vec - let (account, hash) = self.get_filler_account(&Rent::default()); - let mut accounts = Vec::with_capacity(filler_accounts as usize); - let mut hashes = Vec::with_capacity(filler_accounts as usize); - let pubkeys = self.get_filler_account_pubkeys(filler_accounts as usize); - pubkeys.iter().for_each(|key| { - accounts.push((key, &account)); - hashes.push(hash); - }); - self.store_accounts_frozen( - (slot, &accounts[..]), - Some(hashes), - &flushed_store, - None, - StoreReclaims::Ignore, - ); - } - // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot assert!(self.storage.get_slot_storage_entry(slot).is_some()); @@ -6989,7 +6913,6 @@ impl AccountsDb { max_slot: Slot, config: &CalcAccountsHashConfig<'_>, ) -> Result<(AccountsHash, u64), AccountsHashVerificationError> { - use AccountsHashVerificationError::*; let mut collect = Measure::start("collect"); let keys: Vec<_> = self .accounts_index @@ -7017,9 +6940,6 @@ impl AccountsDb { let result: Vec = pubkeys .iter() .filter_map(|pubkey| { - if self.is_filler_account(pubkey) { - return None; - } if let AccountIndexGetResult::Found(lock, index) = self.accounts_index.get(pubkey, config.ancestors, Some(max_slot)) { @@ -7045,7 +6965,7 @@ impl AccountsDb { let mut loaded_hash = loaded_account.loaded_hash(); let balance = loaded_account.lamports(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (config.check_hash || hash_is_missing) && !self.is_filler_account(pubkey) { + if config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { @@ -7088,7 +7008,7 @@ impl AccountsDb { "{} mismatched account hash(es) found", mismatch_found.load(Ordering::Relaxed) ); - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } scan.stop(); @@ -7636,12 +7556,13 @@ impl AccountsDb { bins: usize, bin_range: &Range, config: &CalcAccountsHashConfig<'_>, - filler_account_suffix: Option<&Pubkey>, ) -> Result, AccountsHashVerificationError> { + assert!(bin_range.start < bins); + assert!(bin_range.end <= bins); + assert!(bin_range.start < bin_range.end); let _guard = self.active_stats.activate(ActiveStatItem::HashScan); let bin_calculator = PubkeyBinCalculator24::new(bins); - assert!(bin_range.start < bins && bin_range.end <= bins && bin_range.start < bin_range.end); let mut time = Measure::start("scan all accounts"); stats.num_snapshot_storage = storages.storage_count(); stats.num_slots = storages.slot_count(); @@ -7655,7 +7576,6 @@ impl AccountsDb { bin_calculator: &bin_calculator, config, mismatch_found: mismatch_found.clone(), - filler_account_suffix, range, bin_range, sort_time: sort_time.clone(), @@ -7798,11 +7718,6 @@ impl AccountsDb { }; let accounts_hasher = AccountsHasher { - filler_account_suffix: if self.filler_accounts_config.count > 0 { - self.filler_account_suffix - } else { - None - }, zero_lamport_accounts: kind.zero_lamport_accounts(), dir_for_temp_cache_files: transient_accounts_hash_cache_path, active_stats: &self.active_stats, @@ -7816,7 +7731,6 @@ impl AccountsDb { PUBKEY_BINS_FOR_CALCULATING_HASHES, &bounds, config, - accounts_hasher.filler_account_suffix.as_ref(), )?; let cache_hash_data_files = cache_hash_data_file_references @@ -7874,7 +7788,6 @@ impl AccountsDb { base: Option<(Slot, /*capitalization*/ u64)>, config: VerifyAccountsHashAndLamportsConfig, ) -> Result<(), AccountsHashVerificationError> { - use AccountsHashVerificationError::*; let calc_config = CalcAccountsHashConfig { use_bg_thread_pool: config.use_bg_thread_pool, check_hash: false, @@ -7898,14 +7811,14 @@ impl AccountsDb { )?; let found_incremental_accounts_hash = self .get_incremental_accounts_hash(slot) - .ok_or(MissingAccountsHash)?; + .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; if calculated_incremental_accounts_hash != found_incremental_accounts_hash { warn!( "mismatched incremental accounts hash for slot {slot}: \ {calculated_incremental_accounts_hash:?} (calculated) != {found_incremental_accounts_hash:?} (expected)" ); if hash_mismatch_is_error { - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } } } else { @@ -7923,18 +7836,22 @@ impl AccountsDb { "Mismatched total lamports: {} calculated: {}", total_lamports, calculated_lamports ); - return Err(MismatchedTotalLamports(calculated_lamports, total_lamports)); + return Err(AccountsHashVerificationError::MismatchedTotalLamports( + calculated_lamports, + total_lamports, + )); } - let (found_accounts_hash, _) = - self.get_accounts_hash(slot).ok_or(MissingAccountsHash)?; + let (found_accounts_hash, _) = self + .get_accounts_hash(slot) + .ok_or(AccountsHashVerificationError::MissingAccountsHash)?; if calculated_accounts_hash != found_accounts_hash { warn!( "Mismatched accounts hash for slot {slot}: \ {calculated_accounts_hash:?} (calculated) != {found_accounts_hash:?} (expected)" ); if hash_mismatch_is_error { - return Err(MismatchedAccountsHash); + return Err(AccountsHashVerificationError::MismatchedAccountsHash); } } } @@ -8045,11 +7962,6 @@ impl AccountsDb { hashes.retain(|k| k.0 != ignore); } - if self.filler_accounts_enabled() { - // filler accounts must be added to 'dirty_keys' above but cannot be used to calculate hash - hashes.retain(|(pubkey, _hash)| !self.is_filler_account(pubkey)); - } - let accounts_delta_hash = AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes)); accumulate.stop(); @@ -9101,91 +9013,6 @@ impl AccountsDb { } } - fn filler_unique_id_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_rent_partition_prefix_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_prefix_bytes() -> usize { - Self::filler_unique_id_bytes() + Self::filler_rent_partition_prefix_bytes() - } - - pub fn is_filler_account_helper( - pubkey: &Pubkey, - filler_account_suffix: Option<&Pubkey>, - ) -> bool { - let offset = Self::filler_prefix_bytes(); - filler_account_suffix - .as_ref() - .map(|filler_account_suffix| { - pubkey.as_ref()[offset..] == filler_account_suffix.as_ref()[offset..] - }) - .unwrap_or_default() - } - - /// true if 'pubkey' is a filler account - pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - Self::is_filler_account_helper(pubkey, self.filler_account_suffix.as_ref()) - } - - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - - /// return 'AccountSharedData' and a hash for a filler account - fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, AccountHash) { - let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo"; - let hash = AccountHash(Hash::from_str(string).unwrap()); - let owner = Pubkey::from_str(string).unwrap(); - let space = self.filler_accounts_config.size; - let rent_exempt_reserve = rent.minimum_balance(space); - let lamports = rent_exempt_reserve; - let mut account = AccountSharedData::new(lamports, space, &owner); - // just non-zero rent epoch. filler accounts are rent-exempt - let dummy_rent_epoch = 2; - account.set_rent_epoch(dummy_rent_epoch); - (account, hash) - } - - fn get_filler_account_pubkeys(&self, count: usize) -> Vec { - (0..count) - .map(|_| { - let subrange = solana_sdk::pubkey::new_rand(); - self.get_filler_account_pubkey(&subrange) - }) - .collect() - } - - fn get_filler_account_pubkey(&self, subrange: &Pubkey) -> Pubkey { - // pubkey begins life as entire filler 'suffix' pubkey - let mut key = self.filler_account_suffix.unwrap(); - let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); - // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes - key.as_mut()[0..rent_prefix_bytes] - .copy_from_slice(&subrange.as_ref()[0..rent_prefix_bytes]); - key - } - - /// filler accounts are space-holding accounts which are ignored by hash calculations and rent. - /// They are designed to allow a validator to run against a network successfully while simulating having many more accounts present. - /// All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup. - /// The filler accounts are added to each slot in the snapshot after index generation. - /// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. - /// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully. - pub fn maybe_add_filler_accounts(&self, epoch_schedule: &EpochSchedule, slot: Slot) { - if self.filler_accounts_config.count == 0 { - return; - } - - self.init_gradual_filler_accounts( - epoch_schedule.get_slots_in_epoch(epoch_schedule.get_epoch(slot)), - ); - } - pub fn generate_index( &self, limit_load_slot_count_from_snapshot: Option, @@ -9199,17 +9026,17 @@ impl AccountsDb { slots.truncate(limit); // get rid of the newer slots and keep just the older } let max_slot = slots.last().cloned().unwrap_or_default(); - let schedule = genesis_config.epoch_schedule; + let schedule = &genesis_config.epoch_schedule; let rent_collector = RentCollector::new( schedule.get_epoch(max_slot), - schedule, + schedule.clone(), genesis_config.slots_per_year(), - genesis_config.rent, + genesis_config.rent.clone(), ); let accounts_data_len = AtomicU64::new(0); let rent_paying_accounts_by_partition = - Mutex::new(RentPayingAccountsByPartition::new(&schedule)); + Mutex::new(RentPayingAccountsByPartition::new(schedule)); // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. @@ -9597,7 +9424,7 @@ impl AccountsDb { store.count(), ); { - let mut count_and_status = store.count_and_status.write().unwrap(); + let mut count_and_status = store.count_and_status.lock_write(); assert_eq!(count_and_status.0, 0); count_and_status.0 = entry.count; } @@ -9610,7 +9437,7 @@ impl AccountsDb { ); } else { trace!("id: {} clearing count", id); - store.count_and_status.write().unwrap().0 = 0; + store.count_and_status.lock_write().0 = 0; } } storage_size_storages_time.stop(); @@ -9627,7 +9454,7 @@ impl AccountsDb { " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})", entry.slot(), entry.append_vec_id(), - *entry.count_and_status.read().unwrap(), + entry.count_and_status.read(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), @@ -9665,7 +9492,7 @@ impl AccountsDb { " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}", slot, entry.append_vec_id(), - *entry.count_and_status.read().unwrap(), + entry.count_and_status.read(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), @@ -10024,6 +9851,7 @@ pub mod tests { accounts_index::{ tests::*, AccountSecondaryIndexesIncludeExclude, ReadAccountMapEntry, RefCount, }, + ancient_append_vecs, append_vec::{test_utils::TempFile, AppendVecStoredAccountMeta}, cache_hash_data::CacheHashDataFile, inline_spl_token, @@ -10080,7 +9908,6 @@ pub mod tests { check_hash, ..CalcAccountsHashConfig::default() }, - None, ) .map(|references| { references @@ -10201,6 +10028,47 @@ pub mod tests { } } + #[test] + fn test_create_ancient_append_vec() { + let ancient_append_vec_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let db = AccountsDb::new_single_for_tests(); + + { + // create an ancient appendvec from a small appendvec, the size of + // the ancient appendvec should be the size of the ideal ancient + // appendvec size. + let mut current_ancient = CurrentAncientAppendVec::default(); + let slot0 = 0; + + // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense + let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); + let _ = current_ancient.create_ancient_append_vec(slot0, &db, 0); + assert_eq!( + current_ancient.append_vec().capacity(), + ancient_append_vec_size + ); + } + + { + // create an ancient appendvec from a large appendvec (bigger than + // current ancient_append_vec_size), the ancient appendvec should be + // the size of the bigger ancient appendvec size. + let mut current_ancient = CurrentAncientAppendVec::default(); + let slot1 = 1; + // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense + let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); + let _ = current_ancient.create_ancient_append_vec( + slot1, + &db, + 2 * ancient_append_vec_size as usize, + ); + assert_eq!( + current_ancient.append_vec().capacity(), + 2 * ancient_append_vec_size + ); + } + } + #[test] fn test_maybe_unref_accounts_already_in_ancient() { let db = AccountsDb::new_single_for_tests(); @@ -10247,7 +10115,7 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot0, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot0, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot0, &db, 0); } let mut ancient_slot_pubkeys = AncientSlotPubkeys::default(); assert!(ancient_slot_pubkeys.inner.is_none()); @@ -10262,7 +10130,7 @@ pub mod tests { // different slot than current_ancient, so update 'ancient_slot_pubkeys' // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot1, 1000, "test"); - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot1, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot1, &db, 0); let slot2 = 2; ancient_slot_pubkeys.maybe_unref_accounts_already_in_ancient( slot2, @@ -10431,9 +10299,7 @@ pub mod tests { } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.start < bins")] fn test_accountsdb_scan_snapshot_stores_illegal_range_start() { let mut stats = HashStats::default(); let bounds = Range { start: 2, end: 2 }; @@ -10444,9 +10310,7 @@ pub mod tests { .unwrap(); } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.end <= bins")] fn test_accountsdb_scan_snapshot_stores_illegal_range_end() { let mut stats = HashStats::default(); let bounds = Range { start: 1, end: 3 }; @@ -10458,9 +10322,7 @@ pub mod tests { } #[test] - #[should_panic( - expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end" - )] + #[should_panic(expected = "bin_range.start < bin_range.end")] fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() { let mut stats = HashStats::default(); let bounds = Range { start: 1, end: 0 }; @@ -11262,7 +11124,7 @@ pub mod tests { sample_storage_with_entries_id(tf, write_version, slot, pubkey, 0, mark_alive, None) } - fn sample_storage_with_entries_id( + fn sample_storage_with_entries_id_fill_percentage( tf: &TempFile, write_version: StoredMetaWriteVersion, slot: Slot, @@ -11270,11 +11132,17 @@ pub mod tests { id: AppendVecId, mark_alive: bool, account_data_size: Option, + fill_percentage: u64, ) -> Arc { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let size: usize = aligned_stored_size(account_data_size.unwrap_or(123) as usize); - let mut data = AccountStorageEntry::new(&paths[0], slot, id, size as u64); - let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, (1024 * 1024).max(size))); + let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage; + let size_aligned: usize = aligned_stored_size(file_size as usize); + let mut data = AccountStorageEntry::new(&paths[0], slot, id, size_aligned as u64); + let av = AccountsFile::AppendVec(AppendVec::new( + &tf.path, + true, + (1024 * 1024).max(size_aligned), + )); data.accounts = av; let arc = Arc::new(data); @@ -11282,6 +11150,27 @@ pub mod tests { arc } + fn sample_storage_with_entries_id( + tf: &TempFile, + write_version: StoredMetaWriteVersion, + slot: Slot, + pubkey: &Pubkey, + id: AppendVecId, + mark_alive: bool, + account_data_size: Option, + ) -> Arc { + sample_storage_with_entries_id_fill_percentage( + tf, + write_version, + slot, + pubkey, + id, + mark_alive, + account_data_size, + 100, + ) + } + #[test] fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() { solana_logger::setup(); @@ -12831,7 +12720,6 @@ pub mod tests { #[test] fn test_verify_accounts_hash() { - use AccountsHashVerificationError::*; solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); @@ -12863,7 +12751,7 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config.clone()), - Err(MissingAccountsHash) + Err(AccountsHashVerificationError::MissingAccountsHash) ); db.set_accounts_hash( @@ -12873,14 +12761,13 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config), - Err(MismatchedAccountsHash) + Err(AccountsHashVerificationError::MismatchedAccountsHash) ); } #[test] fn test_verify_bank_capitalization() { for pass in 0..2 { - use AccountsHashVerificationError::*; solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); @@ -12927,7 +12814,7 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 10, None, config), - Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 + Err(AccountsHashVerificationError::MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10 ); } } @@ -12959,7 +12846,6 @@ pub mod tests { #[test] fn test_verify_accounts_hash_bad_account_hash() { - use AccountsHashVerificationError::*; solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); @@ -12994,7 +12880,7 @@ pub mod tests { assert_matches!( db.verify_accounts_hash_and_lamports(some_slot, 1, None, config), - Err(MismatchedAccountsHash) + Err(AccountsHashVerificationError::MismatchedAccountsHash) ); } @@ -15911,7 +15797,7 @@ pub mod tests { // fake out the store count to avoid the assert for (_, store) in accounts.storage.iter() { store.alive_bytes.store(0, Ordering::Release); - let mut count_and_status = store.count_and_status.write().unwrap(); + let mut count_and_status = store.count_and_status.lock_write(); count_and_status.0 = 0; } @@ -15930,14 +15816,14 @@ pub mod tests { ); for (_, store) in accounts.storage.iter() { - assert_eq!(store.count_and_status.read().unwrap().0, 0); + assert_eq!(store.count_and_status.read().0, 0); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 0); } accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { assert_eq!(store.append_vec_id(), 0); - assert_eq!(store.count_and_status.read().unwrap().0, count); + assert_eq!(store.count_and_status.read().0, count); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2); } } @@ -17092,7 +16978,7 @@ pub mod tests { append_vec.append_vec_id() ); - let _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db); + let _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); assert_eq!(current_ancient.slot(), slot); assert_eq!(current_ancient.append_vec_id(), append_vec.append_vec_id()); } @@ -17104,13 +16990,13 @@ pub mod tests { let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); let mut current_ancient = CurrentAncientAppendVec::default(); - let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db); + let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); assert!(is_ancient(¤t_ancient.append_vec().accounts)); let slot3 = 3; // should do nothing - let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db); + let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot2); assert_eq!(current_ancient.append_vec_id(), id); assert!(is_ancient(¤t_ancient.append_vec().accounts)); @@ -17124,7 +17010,7 @@ pub mod tests { let _existing_append_vec = db.create_and_insert_store(slot2, 1000, "test"); { - let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot2, &db); + let _shrink_in_progress = current_ancient.create_ancient_append_vec(slot2, &db, 0); } let id = current_ancient.append_vec_id(); assert_eq!(current_ancient.slot(), slot2); @@ -17133,7 +17019,7 @@ pub mod tests { // there has to be an existing append vec at this slot for a new current ancient at the slot to make sense let _existing_append_vec = db.create_and_insert_store(slot3, 1000, "test"); - let mut _shrink_in_progress = current_ancient.create_ancient_append_vec(slot3, &db); + let mut _shrink_in_progress = current_ancient.create_ancient_append_vec(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot3); assert!(is_ancient(¤t_ancient.append_vec().accounts)); assert_ne!(current_ancient.append_vec_id(), id); @@ -17474,6 +17360,152 @@ pub mod tests { ); } + #[test] + fn test_shrink_ancient_overflow_with_min_size() { + solana_logger::setup(); + + let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let num_normal_slots = 2; + + // build an ancient append vec at slot 'ancient_slot' with one `fat` + // account that's larger than the ideal size of ancient append vec to + // simulate the *oversized* append vec for shrinking. + let account_size = (1.5 * ideal_av_size as f64) as u64; + let (db, ancient_slot) = get_one_ancient_append_vec_and_others_with_account_size( + true, + num_normal_slots, + Some(account_size), + ); + + let max_slot_inclusive = ancient_slot + (num_normal_slots as Slot); + let initial_accounts = get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)); + + let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); + + // assert that the min_size, which about 1.5 * ideal_av_size, kicked in + // and result that the ancient append vec capacity exceeds the ideal_av_size + assert!(ancient.capacity() > ideal_av_size); + + // combine 1 normal append vec into existing oversize ancient append vec. + db.combine_ancient_slots( + (ancient_slot..max_slot_inclusive).collect(), + CAN_RANDOMLY_SHRINK_FALSE, + ); + + compare_all_accounts( + &initial_accounts, + &get_all_accounts(&db, ancient_slot..max_slot_inclusive), + ); + + // the append vec at max_slot_inclusive-1 should NOT have been removed + // since the append vec is already oversized and we created an ancient + // append vec there. + let ancient2 = db + .storage + .get_slot_storage_entry(max_slot_inclusive - 1) + .unwrap(); + assert!(is_ancient(&ancient2.accounts)); + assert!(ancient2.capacity() > ideal_av_size); // min_size kicked in, which cause the appendvec to be larger than the ideal_av_size + + // Combine normal append vec(s) into existing ancient append vec this + // will overflow the original ancient append vec because of the oversized + // ancient append vec is full. + db.combine_ancient_slots( + (ancient_slot..=max_slot_inclusive).collect(), + CAN_RANDOMLY_SHRINK_FALSE, + ); + + compare_all_accounts( + &initial_accounts, + &get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)), + ); + + // Nothing should be combined because the append vec are oversized. + // min_size kicked in, which cause the appendvecs to be larger than the ideal_av_size. + let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); + assert!(is_ancient(&ancient.accounts)); + assert!(ancient.capacity() > ideal_av_size); + + let ancient2 = db + .storage + .get_slot_storage_entry(max_slot_inclusive - 1) + .unwrap(); + assert!(is_ancient(&ancient2.accounts)); + assert!(ancient2.capacity() > ideal_av_size); + + let ancient3 = db + .storage + .get_slot_storage_entry(max_slot_inclusive) + .unwrap(); + assert!(is_ancient(&ancient3.accounts)); + assert!(ancient3.capacity() > ideal_av_size); + } + + #[test] + fn test_shink_overflow_too_much() { + let num_normal_slots = 2; + let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); + let fat_account_size = (1.5 * ideal_av_size as f64) as u64; + + // Prepare 4 appendvec to combine [small, big, small, small] + let account_data_sizes = vec![100, fat_account_size, 100, 100]; + let (db, slot1) = create_db_with_storages_and_index_with_customized_account_size_per_slot( + true, + num_normal_slots + 1, + account_data_sizes, + ); + let storage = db.get_storage_for_slot(slot1).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + + // Adjust alive_ratio for slot2 to test it is shrinkable and is a + // candidate for squashing into the previous ancient append vec. + // However, due to the fact that this appendvec is `oversized`, it can't + // be squashed into the ancient append vec at previous slot (exceeds the + // size limit). Therefore, a new "oversized" ancient append vec are + // created at slot2 as the overflow. This is where the "min_bytes" in + // `fn create_ancient_append_vec` used for. + let slot2 = slot1 + 1; + let storage2 = db.storage.get_slot_storage_entry(slot2).unwrap(); + let original_cap_slot2 = storage2.accounts.capacity(); + storage2 + .accounts + .set_current_len_for_tests(original_cap_slot2 as usize); + + // Combine appendvec into ancient append vec. + let slots_to_combine: Vec = (slot1..slot1 + (num_normal_slots + 1) as Slot).collect(); + db.combine_ancient_slots(slots_to_combine, CAN_RANDOMLY_SHRINK_FALSE); + + // slot2 is too big to fit into ideal ancient append vec at slot1. So slot2 won't be merged into slot1. + // slot1 will have its own ancient append vec. + assert!(db.storage.get_slot_storage_entry(slot1).is_some()); + let ancient = db.get_storage_for_slot(slot1).unwrap(); + assert!(is_ancient(&ancient.accounts)); + assert_eq!(ancient.capacity(), ideal_av_size); + + let after_store = db.get_storage_for_slot(slot1).unwrap(); + let GetUniqueAccountsResult { + stored_accounts: after_stored_accounts, + capacity: after_capacity, + } = db.get_unique_accounts_from_storage(&after_store); + assert!(created_accounts.capacity <= after_capacity); + assert_eq!(created_accounts.stored_accounts.len(), 1); + assert_eq!(after_stored_accounts.len(), 1); + + // slot2, even after shrinking, it is still oversized. Therefore, there + // exists as an ancient append vec at slot2. + let storage2_after = db.storage.get_slot_storage_entry(slot2).unwrap(); + assert!(is_ancient(&storage2_after.accounts)); + assert!(storage2_after.capacity() > ideal_av_size); + let after_store = db.get_storage_for_slot(slot2).unwrap(); + let GetUniqueAccountsResult { + stored_accounts: after_stored_accounts, + capacity: after_capacity, + } = db.get_unique_accounts_from_storage(&after_store); + assert!(created_accounts.capacity <= after_capacity); + assert_eq!(created_accounts.stored_accounts.len(), 1); + assert_eq!(after_stored_accounts.len(), 1); + } + #[test] fn test_shrink_ancient_overflow() { solana_logger::setup(); @@ -17756,6 +17788,55 @@ pub mod tests { storage.remove_account(num_bytes, reset_accounts); } + pub(crate) fn create_storages_and_update_index_with_customized_account_size_per_slot( + db: &AccountsDb, + tf: Option<&TempFile>, + starting_slot: Slot, + num_slots: usize, + alive: bool, + account_data_sizes: Vec, + ) { + if num_slots == 0 { + return; + } + assert!(account_data_sizes.len() == num_slots + 1); + let local_tf = (tf.is_none()).then(|| { + crate::append_vec::test_utils::get_append_vec_path("create_storages_and_update_index") + }); + let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap()); + + let write_version1 = 0; + let starting_id = db + .storage + .iter() + .map(|storage| storage.1.append_vec_id()) + .max() + .unwrap_or(999); + for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) { + let id = starting_id + (i as AppendVecId); + let pubkey1 = solana_sdk::pubkey::new_rand(); + let storage = sample_storage_with_entries_id_fill_percentage( + tf, + write_version1, + starting_slot + (i as Slot), + &pubkey1, + id, + alive, + Some(*account_data_size), + 50, + ); + insert_store(db, Arc::clone(&storage)); + } + + let storage = db.get_storage_for_slot(starting_slot).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + assert_eq!(created_accounts.stored_accounts.len(), 1); + + if alive { + populate_index(db, starting_slot..(starting_slot + (num_slots as Slot) + 1)); + } + } + pub(crate) fn create_storages_and_update_index( db: &AccountsDb, tf: Option<&TempFile>, @@ -17825,11 +17906,41 @@ pub mod tests { (db, slot1) } - fn get_one_ancient_append_vec_and_others( + pub(crate) fn create_db_with_storages_and_index_with_customized_account_size_per_slot( + alive: bool, + num_slots: usize, + account_data_size: Vec, + ) -> (AccountsDb, Slot) { + solana_logger::setup(); + + let db = AccountsDb::new_single_for_tests(); + + // create a single append vec with a single account in a slot + // add the pubkey to index if alive + // call combine_ancient_slots with the slot + // verify we create an ancient appendvec that has alive accounts and does not have dead accounts + + let slot1 = 1; + create_storages_and_update_index_with_customized_account_size_per_slot( + &db, + None, + slot1, + num_slots, + alive, + account_data_size, + ); + + let slot1 = slot1 as Slot; + (db, slot1) + } + + fn get_one_ancient_append_vec_and_others_with_account_size( alive: bool, num_normal_slots: usize, + account_data_size: Option, ) -> (AccountsDb, Slot) { - let (db, slot1) = create_db_with_storages_and_index(alive, num_normal_slots + 1, None); + let (db, slot1) = + create_db_with_storages_and_index(alive, num_normal_slots + 1, account_data_size); let storage = db.get_storage_for_slot(slot1).unwrap(); let created_accounts = db.get_unique_accounts_from_storage(&storage); @@ -17843,7 +17954,7 @@ pub mod tests { capacity: after_capacity, } = db.get_unique_accounts_from_storage(&after_store); if alive { - assert_ne!(created_accounts.capacity, after_capacity); + assert!(created_accounts.capacity <= after_capacity); } else { assert_eq!(created_accounts.capacity, after_capacity); } @@ -17854,6 +17965,13 @@ pub mod tests { (db, slot1) } + fn get_one_ancient_append_vec_and_others( + alive: bool, + num_normal_slots: usize, + ) -> (AccountsDb, Slot) { + get_one_ancient_append_vec_and_others_with_account_size(alive, num_normal_slots, None) + } + #[test] fn test_handle_dropped_roots_for_ancient() { solana_logger::setup(); diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 77f1717a9ca259..97c761616e7ce3 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -4,7 +4,7 @@ use { StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, }, accounts_hash::AccountHash, - append_vec::{AppendVec, AppendVecError, MatchAccountOwnerError}, + append_vec::{AppendVec, AppendVecError}, storable_accounts::StorableAccounts, tiered_storage::error::TieredStorageError, }, @@ -40,6 +40,14 @@ pub enum AccountsFileError { TieredStorageError(#[from] TieredStorageError), } +#[derive(Error, Debug, PartialEq, Eq)] +pub enum MatchAccountOwnerError { + #[error("The account owner does not match with the provided list")] + NoMatch, + #[error("Unable to load the account")] + UnableToLoad, +} + pub type Result = std::result::Result; #[derive(Debug)] diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 7631ea694635b8..72f74be6f130d1 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -469,7 +469,6 @@ impl CumulativeOffsets { #[derive(Debug)] pub struct AccountsHasher<'a> { - pub filler_account_suffix: Option, pub zero_lamport_accounts: ZeroLamportAccounts, /// The directory where temporary cache files are put pub dir_for_temp_cache_files: PathBuf, @@ -495,11 +494,6 @@ struct ItemLocation<'a> { } impl<'a> AccountsHasher<'a> { - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - pub fn calculate_hash(hashes: Vec>) -> (Hash, usize) { let cumulative_offsets = CumulativeOffsets::from_raw(&hashes); @@ -1151,7 +1145,6 @@ impl<'a> AccountsHasher<'a> { }; let mut overall_sum = 0; - let filler_accounts_enabled = self.filler_accounts_enabled(); while let Some(pointer) = working_set.pop() { let key = &sorted_data_by_pubkey[pointer.slot_group_index][pointer.offset].pubkey; @@ -1166,13 +1159,10 @@ impl<'a> AccountsHasher<'a> { // add lamports and get hash if item.lamports != 0 { - // do not include filler accounts in the hash - if !(filler_accounts_enabled && self.is_filler_account(&item.pubkey)) { - overall_sum = Self::checked_cast_for_capitalization( - item.lamports as u128 + overall_sum as u128, - ); - hashes.write(&item.hash.0); - } + overall_sum = Self::checked_cast_for_capitalization( + item.lamports as u128 + overall_sum as u128, + ); + hashes.write(&item.hash.0); } else { // if lamports == 0, check if they should be included if self.zero_lamport_accounts == ZeroLamportAccounts::Included { @@ -1196,13 +1186,6 @@ impl<'a> AccountsHasher<'a> { (hashes, overall_sum) } - fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - crate::accounts_db::AccountsDb::is_filler_account_helper( - pubkey, - self.filler_account_suffix.as_ref(), - ) - } - /// input: /// vec: group of slot data, ordered by Slot (low to high) /// vec: [..] - items found in that slot range Sorted by: Pubkey, higher Slot, higher Write version (if pubkey =) @@ -1343,7 +1326,6 @@ mod tests { impl<'a> AccountsHasher<'a> { fn new(dir_for_temp_cache_files: PathBuf) -> Self { Self { - filler_account_suffix: None, zero_lamport_accounts: ZeroLamportAccounts::Excluded, dir_for_temp_cache_files, active_stats: &ACTIVE_STATS, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 78951c5e06f762..0b567da483f3d3 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -867,7 +867,11 @@ pub struct AccountsToStore<'a> { /// if 'accounts' contains more items than can be contained in the primary storage, then we have to split these accounts. /// 'index_first_item_overflow' specifies the index of the first item in 'accounts' that will go into the overflow storage index_first_item_overflow: usize, - pub slot: Slot, + slot: Slot, + /// bytes required to store primary accounts + bytes_primary: usize, + /// bytes required to store overflow accounts + bytes_overflow: usize, } impl<'a> AccountsToStore<'a> { @@ -880,8 +884,11 @@ impl<'a> AccountsToStore<'a> { slot: Slot, ) -> Self { let num_accounts = accounts.len(); + let mut bytes_primary = alive_total_bytes; + let mut bytes_overflow = 0; // index of the first account that doesn't fit in the current append vec let mut index_first_item_overflow = num_accounts; // assume all fit + let initial_available_bytes = available_bytes as usize; if alive_total_bytes > available_bytes as usize { // not all the alive bytes fit, so we have to find how many accounts fit within available_bytes for (i, account) in accounts.iter().enumerate() { @@ -891,6 +898,9 @@ impl<'a> AccountsToStore<'a> { } else if index_first_item_overflow == num_accounts { // the # of accounts we have so far seen is the most that will fit in the current ancient append vec index_first_item_overflow = i; + bytes_primary = + initial_available_bytes.saturating_sub(available_bytes as usize); + bytes_overflow = alive_total_bytes.saturating_sub(bytes_primary); break; } } @@ -899,6 +909,8 @@ impl<'a> AccountsToStore<'a> { accounts, index_first_item_overflow, slot, + bytes_primary, + bytes_overflow, } } @@ -907,6 +919,14 @@ impl<'a> AccountsToStore<'a> { self.index_first_item_overflow < self.accounts.len() } + /// return # required bytes for the given selector + pub fn get_bytes(&self, selector: StorageSelector) -> usize { + match selector { + StorageSelector::Primary => self.bytes_primary, + StorageSelector::Overflow => self.bytes_overflow, + } + } + /// get the accounts to store in the given 'storage' pub fn get(&self, storage: StorageSelector) -> &[&'a StoredAccountMeta<'a>] { let range = match storage { @@ -915,6 +935,10 @@ impl<'a> AccountsToStore<'a> { }; &self.accounts[range] } + + pub fn slot(&self) -> Slot { + self.slot + } } /// capacity of an ancient append vec @@ -2040,6 +2064,9 @@ pub mod tests { accounts_to_store.has_overflow() ); assert!(accounts.is_empty()); + + assert_eq!(accounts_to_store.get_bytes(selector), account_size); + assert_eq!(accounts_to_store.get_bytes(get_opposite(&selector)), 0); } } fn get_opposite(selector: &StorageSelector) -> StorageSelector { diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index bd789aa3092ad9..c7100ebac24c4c 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -10,7 +10,7 @@ use { AccountMeta, StorableAccountsWithHashesAndWriteVersions, StoredAccountInfo, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion, }, - accounts_file::{AccountsFileError, Result, ALIGN_BOUNDARY_OFFSET}, + accounts_file::{AccountsFileError, MatchAccountOwnerError, Result, ALIGN_BOUNDARY_OFFSET}, accounts_hash::AccountHash, storable_accounts::StorableAccounts, u64_align, @@ -96,14 +96,6 @@ impl<'append_vec> Iterator for AppendVecAccountsIter<'append_vec> { } } -#[derive(Error, Debug, PartialEq, Eq)] -pub enum MatchAccountOwnerError { - #[error("The account owner does not match with the provided list")] - NoMatch, - #[error("Unable to load the account")] - UnableToLoad, -} - /// References to account data stored elsewhere. Getting an `Account` requires cloning /// (see `StoredAccountMeta::clone_account()`). #[derive(PartialEq, Eq, Debug)] @@ -327,7 +319,8 @@ impl AppendVec { /// how many more bytes can be stored in this append vec pub fn remaining_bytes(&self) -> u64 { - (self.capacity()).saturating_sub(self.len() as u64) + self.capacity() + .saturating_sub(u64_align!(self.len()) as u64) } pub fn len(&self) -> usize { @@ -1002,10 +995,36 @@ pub mod tests { let av = AppendVec::new(&path.path, true, sz); assert_eq!(av.capacity(), sz64); assert_eq!(av.remaining_bytes(), sz64); + + // append first account, an u64 aligned account (136 bytes) + let mut av_len = 0; let account = create_test_account(0); av.append_account_test(&account).unwrap(); + av_len += STORE_META_OVERHEAD; assert_eq!(av.capacity(), sz64); assert_eq!(av.remaining_bytes(), sz64 - (STORE_META_OVERHEAD as u64)); + assert_eq!(av.len(), av_len); + + // append second account, a *not* u64 aligned account (137 bytes) + let account = create_test_account(1); + let account_storage_len = STORE_META_OVERHEAD + 1; + av_len += account_storage_len; + av.append_account_test(&account).unwrap(); + assert_eq!(av.capacity(), sz64); + assert_eq!(av.len(), av_len); + let alignment_bytes = u64_align!(av_len) - av_len; // bytes used for alignment (7 bytes) + assert_eq!(alignment_bytes, 7); + assert_eq!(av.remaining_bytes(), sz64 - u64_align!(av_len) as u64); + + // append third account, a *not* u64 aligned account (137 bytes) + let account = create_test_account(1); + av.append_account_test(&account).unwrap(); + let account_storage_len = STORE_META_OVERHEAD + 1; + av_len += alignment_bytes; // bytes used for alignment at the end of previous account + av_len += account_storage_len; + assert_eq!(av.capacity(), sz64); + assert_eq!(av.len(), av_len); + assert_eq!(av.remaining_bytes(), sz64 - u64_align!(av_len) as u64); } #[test] diff --git a/accounts-db/src/partitioned_rewards.rs b/accounts-db/src/partitioned_rewards.rs index 9d012a71a4a018..652a907d3a7bb6 100644 --- a/accounts-db/src/partitioned_rewards.rs +++ b/accounts-db/src/partitioned_rewards.rs @@ -2,7 +2,6 @@ //! use solana_sdk::clock::Slot; -#[allow(dead_code)] #[derive(Debug)] /// Configuration options for partitioned epoch rewards. /// This struct allows various forms of testing, especially prior to feature activation. @@ -53,7 +52,6 @@ pub enum TestPartitionedEpochRewards { }, } -#[allow(dead_code)] impl PartitionedEpochRewardsConfig { pub fn new(test: TestPartitionedEpochRewards) -> Self { match test { diff --git a/accounts-db/src/rent_collector.rs b/accounts-db/src/rent_collector.rs index cea0a07c9883b3..1a72cac88308b3 100644 --- a/accounts-db/src/rent_collector.rs +++ b/accounts-db/src/rent_collector.rs @@ -111,10 +111,9 @@ impl RentCollector { &self, address: &Pubkey, account: &mut AccountSharedData, - filler_account_suffix: Option<&Pubkey>, set_exempt_rent_epoch_max: bool, ) -> CollectedInfo { - match self.calculate_rent_result(address, account, filler_account_suffix) { + match self.calculate_rent_result(address, account) { RentResult::Exempt => { if set_exempt_rent_epoch_max { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); @@ -151,19 +150,13 @@ impl RentCollector { &self, address: &Pubkey, account: &impl ReadableAccount, - filler_account_suffix: Option<&Pubkey>, ) -> RentResult { if account.rent_epoch() == RENT_EXEMPT_RENT_EPOCH || account.rent_epoch() > self.epoch { // potentially rent paying account (or known and already marked exempt) // Maybe collect rent later, leave account alone for now. return RentResult::NoRentCollectionNow; } - if !self.should_collect_rent(address, account) - || crate::accounts_db::AccountsDb::is_filler_account_helper( - address, - filler_account_suffix, - ) - { + if !self.should_collect_rent(address, account) { // easy to determine this account should not consider having rent collected from it return RentResult::Exempt; } @@ -230,12 +223,7 @@ mod tests { ) -> CollectedInfo { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account( - address, - account, - /*filler_account_suffix:*/ None, - set_exempt_rent_epoch_max, - ) + self.collect_from_existing_account(address, account, set_exempt_rent_epoch_max) } } @@ -246,7 +234,7 @@ mod tests { let mut account = AccountSharedData::default(); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -255,7 +243,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -265,7 +252,7 @@ mod tests { account.set_executable(true); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::Exempt ); { @@ -278,7 +265,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -288,7 +274,7 @@ mod tests { account.set_executable(false); assert_matches!( - rent_collector.calculate_rent_result(&incinerator::id(), &account, None), + rent_collector.calculate_rent_result(&incinerator::id(), &account), RentResult::Exempt ); { @@ -301,7 +287,6 @@ mod tests { rent_collector.collect_from_existing_account( &incinerator::id(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -309,49 +294,44 @@ mod tests { assert_eq!(account_clone, account_expected); } - // try a few combinations of rent collector rent epoch and collecting rent with and without filler accounts specified (but we aren't a filler) - let filler_account = solana_sdk::pubkey::new_rand(); - - for filler_accounts in [None, Some(&filler_account)] { - for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { - rent_collector.epoch = rent_epoch; - account.set_lamports(10); - account.set_rent_epoch(1); - let new_rent_epoch_expected = rent_collector.epoch + 1; - assert!( - matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, filler_accounts), - RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + // try a few combinations of rent collector rent epoch and collecting rent + for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { + rent_collector.epoch = rent_epoch; + account.set_lamports(10); + account.set_rent_epoch(1); + let new_rent_epoch_expected = rent_collector.epoch + 1; + assert!( + matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + ), + "{:?}", + rent_collector.calculate_rent_result(&Pubkey::default(), &account) + ); + + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector.collect_from_existing_account( + &Pubkey::default(), + &mut account_clone, + set_exempt_rent_epoch_max ), - "{:?}", - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None,) + CollectedInfo { + rent_amount: rent_due_expected, + account_data_len_reclaimed: 0 + } ); - - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - filler_accounts, - set_exempt_rent_epoch_max - ), - CollectedInfo { - rent_amount: rent_due_expected, - account_data_len_reclaimed: 0 - } - ); - let mut account_expected = account.clone(); - account_expected.set_lamports(account.lamports() - rent_due_expected); - account_expected.set_rent_epoch(new_rent_epoch_expected); - assert_eq!(account_clone, account_expected); - } + let mut account_expected = account.clone(); + account_expected.set_lamports(account.lamports() - rent_due_expected); + account_expected.set_rent_epoch(new_rent_epoch_expected); + assert_eq!(account_clone, account_expected); } } // enough lamports to make us exempt account.set_lamports(1_000_000); - let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account, None); + let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); assert!( matches!(result, RentResult::Exempt), "{result:?}, set_exempt_rent_epoch_max: {set_exempt_rent_epoch_max}", @@ -366,7 +346,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -379,7 +358,7 @@ mod tests { // We don't calculate rent amount vs data if the rent_epoch is already in the future. account.set_rent_epoch(1_000_000); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -388,42 +367,12 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() ); assert_eq!(account_clone, account); } - - // filler accounts are exempt - account.set_rent_epoch(1); - account.set_lamports(10); - assert_matches!( - rent_collector.calculate_rent_result( - &filler_account, - &account, - Some(&filler_account), - ), - RentResult::Exempt - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &filler_account, - &mut account_clone, - Some(&filler_account), - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } } } @@ -464,7 +413,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &solana_sdk::pubkey::new_rand(), &mut existing_account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert!(existing_account.lamports() < old_lamports); @@ -502,7 +450,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), huge_lamports); @@ -519,7 +466,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); @@ -546,7 +492,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), 0); @@ -573,7 +518,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &Pubkey::new_unique(), &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); diff --git a/accounts-db/src/tiered_storage/error.rs b/accounts-db/src/tiered_storage/error.rs index 822b8bcde4810b..9b9d07d977e804 100644 --- a/accounts-db/src/tiered_storage/error.rs +++ b/accounts-db/src/tiered_storage/error.rs @@ -1,4 +1,4 @@ -use {std::path::PathBuf, thiserror::Error}; +use {super::footer::SanitizeFooterError, std::path::PathBuf, thiserror::Error}; #[derive(Error, Debug)] pub enum TieredStorageError { @@ -16,4 +16,13 @@ pub enum TieredStorageError { #[error("Unsupported: the feature is not yet supported")] Unsupported(), + + #[error("invalid footer size: {0}, expected: {1}")] + InvalidFooterSize(u64, u64), + + #[error("invalid footer version: {0}")] + InvalidFooterVersion(u64), + + #[error("footer is unsanitary: {0}")] + SanitizeFooter(#[from] SanitizeFooterError), } diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 6d5397f812b784..07faa3e811362e 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -15,11 +15,10 @@ impl TieredStorageFile { .read(true) .create(false) .open(&file_path) - .unwrap_or_else(|e| { + .unwrap_or_else(|err| { panic!( - "[TieredStorageError] Unable to open {:?} as read-only: {:?}", + "[TieredStorageError] Unable to open {} as read-only: {err}", file_path.as_ref().display(), - e ); }), ) @@ -36,18 +35,14 @@ impl TieredStorageFile { pub fn write_type(&self, value: &T) -> IoResult { let ptr = value as *const _ as *const u8; - let slice = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; - (&self.0).write_all(slice)?; - - Ok(std::mem::size_of::()) + let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; + self.write_bytes(bytes) } pub fn read_type(&self, value: &mut T) -> IoResult<()> { let ptr = value as *mut _ as *mut u8; - let slice = unsafe { std::slice::from_raw_parts_mut(ptr, mem::size_of::()) }; - (&self.0).read_exact(slice)?; - - Ok(()) + let bytes = unsafe { std::slice::from_raw_parts_mut(ptr, mem::size_of::()) }; + self.read_bytes(bytes) } pub fn seek(&self, offset: u64) -> IoResult { @@ -65,8 +60,6 @@ impl TieredStorageFile { } pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { - (&self.0).read_exact(buffer)?; - - Ok(()) + (&self.0).read_exact(buffer) } } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index 0cbf39ba8f8ce6..8bf9b5f228c3e3 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -3,9 +3,12 @@ use { error::TieredStorageError, file::TieredStorageFile, index::IndexBlockFormat, mmap_utils::get_type, TieredStorageResult, }, + bytemuck::{Pod, Zeroable}, memmap2::Mmap, + num_enum::TryFromPrimitiveError, solana_sdk::{hash::Hash, pubkey::Pubkey}, std::{mem, path::Path}, + thiserror::Error, }; pub const FOOTER_FORMAT_VERSION: u64 = 1; @@ -22,7 +25,7 @@ pub const FOOTER_TAIL_SIZE: usize = 24; /// The ending 8 bytes of a valid tiered account storage file. pub const FOOTER_MAGIC_NUMBER: u64 = 0x502A2AB5; // SOLALABS -> SOLANA LABS -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] #[repr(C)] pub struct TieredStorageMagicNumber(pub u64); @@ -86,7 +89,7 @@ pub enum OwnersBlockFormat { LocalIndex = 0, } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] #[repr(C)] pub struct TieredStorageFooter { // formats @@ -133,17 +136,41 @@ pub struct TieredStorageFooter { /// A hash that represents a tiered accounts file for consistency check. pub hash: Hash, + /// The format version of the tiered accounts file. + pub format_version: u64, // The below fields belong to footer tail. // The sum of their sizes should match FOOTER_TAIL_SIZE. /// The size of the footer including the magic number. pub footer_size: u64, - /// The format version of the tiered accounts file. - pub format_version: u64, // This field is persisted in the storage but not in this struct. // The number should match FOOTER_MAGIC_NUMBER. // pub magic_number: u64, } +// It is undefined behavior to read/write uninitialized bytes. +// The `Pod` marker trait indicates there are no uninitialized bytes. +// In order to safely guarantee a type is POD, it cannot have any padding. +const _: () = assert!( + std::mem::size_of::() + == std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::() // account_entry_count + + std::mem::size_of::() // account_meta_entry_size + + std::mem::size_of::() // account_block_size + + std::mem::size_of::() // owner_count + + std::mem::size_of::() // owner_entry_size + + std::mem::size_of::() // index_block_offset + + std::mem::size_of::() // owners_block_offset + + std::mem::size_of::() // min_account_address + + std::mem::size_of::() // max_account_address + + std::mem::size_of::() // hash + + std::mem::size_of::() // format_version + + std::mem::size_of::(), // footer_size + "TieredStorageFooter cannot have any padding" +); + impl Default for TieredStorageFooter { fn default() -> Self { Self { @@ -161,8 +188,8 @@ impl Default for TieredStorageFooter { hash: Hash::new_unique(), min_account_address: Pubkey::default(), max_account_address: Pubkey::default(), - footer_size: FOOTER_SIZE as u64, format_version: FOOTER_FORMAT_VERSION, + footer_size: FOOTER_SIZE as u64, } } } @@ -181,15 +208,25 @@ impl TieredStorageFooter { } pub fn new_from_footer_block(file: &TieredStorageFile) -> TieredStorageResult { - let mut footer_size: u64 = 0; + file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; + let mut footer_version: u64 = 0; - let mut magic_number = TieredStorageMagicNumber(0); + file.read_type(&mut footer_version)?; + if footer_version != FOOTER_FORMAT_VERSION { + return Err(TieredStorageError::InvalidFooterVersion(footer_version)); + } - file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; + let mut footer_size: u64 = 0; file.read_type(&mut footer_size)?; - file.read_type(&mut footer_version)?; - file.read_type(&mut magic_number)?; + if footer_size != FOOTER_SIZE as u64 { + return Err(TieredStorageError::InvalidFooterSize( + footer_size, + FOOTER_SIZE as u64, + )); + } + let mut magic_number = TieredStorageMagicNumber::zeroed(); + file.read_type(&mut magic_number)?; if magic_number != TieredStorageMagicNumber::default() { return Err(TieredStorageError::MagicNumberMismatch( TieredStorageMagicNumber::default().0, @@ -200,16 +237,28 @@ impl TieredStorageFooter { let mut footer = Self::default(); file.seek_from_end(-(footer_size as i64))?; file.read_type(&mut footer)?; + Self::sanitize(&footer)?; Ok(footer) } pub fn new_from_mmap(mmap: &Mmap) -> TieredStorageResult<&TieredStorageFooter> { let offset = mmap.len().saturating_sub(FOOTER_TAIL_SIZE); - let (footer_size, offset) = get_type::(mmap, offset)?; - let (_footer_version, offset) = get_type::(mmap, offset)?; - let (magic_number, _offset) = get_type::(mmap, offset)?; + let (footer_version, offset) = get_type::(mmap, offset)?; + if *footer_version != FOOTER_FORMAT_VERSION { + return Err(TieredStorageError::InvalidFooterVersion(*footer_version)); + } + + let (&footer_size, offset) = get_type::(mmap, offset)?; + if footer_size != FOOTER_SIZE as u64 { + return Err(TieredStorageError::InvalidFooterSize( + footer_size, + FOOTER_SIZE as u64, + )); + } + + let (magic_number, _offset) = get_type::(mmap, offset)?; if *magic_number != TieredStorageMagicNumber::default() { return Err(TieredStorageError::MagicNumberMismatch( TieredStorageMagicNumber::default().0, @@ -217,13 +266,64 @@ impl TieredStorageFooter { )); } - let (footer, _offset) = get_type::( - mmap, - mmap.len().saturating_sub(*footer_size as usize), - )?; + let footer_offset = mmap.len().saturating_sub(footer_size as usize); + let (footer, _offset) = get_type::(mmap, footer_offset)?; + Self::sanitize(footer)?; Ok(footer) } + + /// Sanitizes the footer + /// + /// Since the various formats only have specific valid values, they must be sanitized + /// prior to use. This ensures the formats are valid to interpret as (rust) enums. + fn sanitize(footer: &Self) -> Result<(), SanitizeFooterError> { + let account_meta_format_u16 = + unsafe { &*(&footer.account_meta_format as *const _ as *const u16) }; + let owners_block_format_u16 = + unsafe { &*(&footer.owners_block_format as *const _ as *const u16) }; + let index_block_format_u16 = + unsafe { &*(&footer.index_block_format as *const _ as *const u16) }; + let account_block_format_u16 = + unsafe { &*(&footer.account_block_format as *const _ as *const u16) }; + + _ = AccountMetaFormat::try_from(*account_meta_format_u16) + .map_err(SanitizeFooterError::InvalidAccountMetaFormat)?; + _ = OwnersBlockFormat::try_from(*owners_block_format_u16) + .map_err(SanitizeFooterError::InvalidOwnersBlockFormat)?; + _ = IndexBlockFormat::try_from(*index_block_format_u16) + .map_err(SanitizeFooterError::InvalidIndexBlockFormat)?; + _ = AccountBlockFormat::try_from(*account_block_format_u16) + .map_err(SanitizeFooterError::InvalidAccountBlockFormat)?; + + // Since we just sanitized the formats within the footer, + // it is now safe to read them as (rust) enums. + // + // from https://doc.rust-lang.org/reference/items/enumerations.html#casting: + // > If an enumeration is unit-only (with no tuple and struct variants), + // > then its discriminant can be directly accessed with a numeric cast; + // + // from https://doc.rust-lang.org/reference/items/enumerations.html#pointer-casting: + // > If the enumeration specifies a primitive representation, + // > then the discriminant may be reliably accessed via unsafe pointer casting + Ok(()) + } +} + +/// Errors that can happen while sanitizing the footer +#[derive(Error, Debug)] +pub enum SanitizeFooterError { + #[error("invalid account meta format: {0}")] + InvalidAccountMetaFormat(#[from] TryFromPrimitiveError), + + #[error("invalid owners block format: {0}")] + InvalidOwnersBlockFormat(#[from] TryFromPrimitiveError), + + #[error("invalid index block format: {0}")] + InvalidIndexBlockFormat(#[from] TryFromPrimitiveError), + + #[error("invalid account block format: {0}")] + InvalidAccountBlockFormat(#[from] TryFromPrimitiveError), } #[cfg(test)] @@ -243,7 +343,7 @@ mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 24, @@ -255,8 +355,8 @@ mod tests { hash: Hash::new_unique(), min_account_address: Pubkey::default(), max_account_address: Pubkey::new_unique(), - footer_size: FOOTER_SIZE as u64, format_version: FOOTER_FORMAT_VERSION, + footer_size: FOOTER_SIZE as u64, }; // Persist the expected footer. @@ -292,7 +392,78 @@ mod tests { assert_eq!(offset_of!(TieredStorageFooter, min_account_address), 0x30); assert_eq!(offset_of!(TieredStorageFooter, max_account_address), 0x50); assert_eq!(offset_of!(TieredStorageFooter, hash), 0x70); - assert_eq!(offset_of!(TieredStorageFooter, footer_size), 0x90); - assert_eq!(offset_of!(TieredStorageFooter, format_version), 0x98); + assert_eq!(offset_of!(TieredStorageFooter, format_version), 0x90); + assert_eq!(offset_of!(TieredStorageFooter, footer_size), 0x98); + } + + #[test] + fn test_sanitize() { + // test: all good + { + let footer = TieredStorageFooter::default(); + let result = TieredStorageFooter::sanitize(&footer); + assert!(result.is_ok()); + } + + // test: bad account meta format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.account_meta_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidAccountMetaFormat(_)) + )); + } + + // test: bad owners block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.owners_block_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidOwnersBlockFormat(_)) + )); + } + + // test: bad index block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write(&mut footer.index_block_format as *mut _ as *mut u16, 0xBAD0); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidIndexBlockFormat(_)) + )); + } + + // test: bad account block format + { + let mut footer = TieredStorageFooter::default(); + unsafe { + std::ptr::write( + &mut footer.account_block_format as *mut _ as *mut u16, + 0xBAD0, + ); + } + let result = TieredStorageFooter::sanitize(&footer); + assert!(matches!( + result, + Err(SanitizeFooterError::InvalidAccountBlockFormat(_)) + )); + } } } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index a72ab83c5c5414..28f09a9e664358 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -1,4 +1,3 @@ -#![allow(dead_code)] //! The account meta and related structs for hot accounts. use { @@ -26,7 +25,7 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { meta_entry_size: std::mem::size_of::(), account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, }; @@ -34,7 +33,11 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { const MAX_HOT_PADDING: u8 = 7; /// The maximum allowed value for the owner index of a hot account. -const MAX_HOT_OWNER_INDEX: u32 = (1 << 29) - 1; +const MAX_HOT_OWNER_OFFSET: OwnerOffset = OwnerOffset((1 << 29) - 1); + +/// The multiplier for converting AccountOffset to the internal hot account +/// offset. This increases the maximum size of a hot accounts file. +const HOT_ACCOUNT_OFFSET_MULTIPLIER: usize = 8; #[bitfield(bits = 32)] #[repr(C)] @@ -51,7 +54,7 @@ struct HotMetaPackedFields { /// in its hot account entry. padding: B3, /// The index to the owner of a hot account inside an AccountsFile. - owner_index: B29, + owner_offset: B29, } /// The storage and in-memory representation of the metadata entry for a @@ -94,11 +97,11 @@ impl TieredAccountMeta for HotAccountMeta { } /// A builder function that initializes the owner's index. - fn with_owner_index(mut self, owner_index: u32) -> Self { - if owner_index > MAX_HOT_OWNER_INDEX { - panic!("owner_index exceeds MAX_HOT_OWNER_INDEX"); + fn with_owner_offset(mut self, owner_offset: OwnerOffset) -> Self { + if owner_offset > MAX_HOT_OWNER_OFFSET { + panic!("owner_offset exceeds MAX_HOT_OWNER_OFFSET"); } - self.packed_fields.set_owner_index(owner_index); + self.packed_fields.set_owner_offset(owner_offset.0); self } @@ -127,8 +130,8 @@ impl TieredAccountMeta for HotAccountMeta { } /// Returns the index to the accounts' owner in the current AccountsFile. - fn owner_index(&self) -> u32 { - self.packed_fields.owner_index() + fn owner_offset(&self) -> OwnerOffset { + OwnerOffset(self.packed_fields.owner_offset()) } /// Returns the AccountMetaFlags of the current meta. @@ -203,11 +206,11 @@ impl HotStorageReader { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { let file = OpenOptions::new().read(true).open(path)?; let mmap = unsafe { MmapOptions::new().map(&file)? }; - // Here we are cloning the footer as accessing any data in a + // Here we are copying the footer, as accessing any data in a // TieredStorage instance requires accessing its Footer. // This can help improve cache locality and reduce the overhead // of indirection associated with memory-mapped accesses. - let footer = TieredStorageFooter::new_from_mmap(&mmap)?.clone(); + let footer = *TieredStorageFooter::new_from_mmap(&mmap)?; Ok(Self { mmap, footer }) } @@ -228,7 +231,9 @@ impl HotStorageReader { &self, account_offset: AccountOffset, ) -> TieredStorageResult<&HotAccountMeta> { - let (meta, _) = get_type::(&self.mmap, account_offset.block)?; + let internal_account_offset = account_offset.block as usize * HOT_ACCOUNT_OFFSET_MULTIPLIER; + + let (meta, _) = get_type::(&self.mmap, internal_account_offset)?; Ok(meta) } @@ -285,31 +290,31 @@ pub mod tests { #[test] fn test_packed_fields() { const TEST_PADDING: u8 = 7; - const TEST_OWNER_INDEX: u32 = 0x1fff_ef98; + const TEST_OWNER_OFFSET: u32 = 0x1fff_ef98; let mut packed_fields = HotMetaPackedFields::default(); packed_fields.set_padding(TEST_PADDING); - packed_fields.set_owner_index(TEST_OWNER_INDEX); + packed_fields.set_owner_offset(TEST_OWNER_OFFSET); assert_eq!(packed_fields.padding(), TEST_PADDING); - assert_eq!(packed_fields.owner_index(), TEST_OWNER_INDEX); + assert_eq!(packed_fields.owner_offset(), TEST_OWNER_OFFSET); } #[test] fn test_packed_fields_max_values() { let mut packed_fields = HotMetaPackedFields::default(); packed_fields.set_padding(MAX_HOT_PADDING); - packed_fields.set_owner_index(MAX_HOT_OWNER_INDEX); + packed_fields.set_owner_offset(MAX_HOT_OWNER_OFFSET.0); assert_eq!(packed_fields.padding(), MAX_HOT_PADDING); - assert_eq!(packed_fields.owner_index(), MAX_HOT_OWNER_INDEX); + assert_eq!(packed_fields.owner_offset(), MAX_HOT_OWNER_OFFSET.0); } #[test] fn test_hot_meta_max_values() { let meta = HotAccountMeta::new() .with_account_data_padding(MAX_HOT_PADDING) - .with_owner_index(MAX_HOT_OWNER_INDEX); + .with_owner_offset(MAX_HOT_OWNER_OFFSET); assert_eq!(meta.account_data_padding(), MAX_HOT_PADDING); - assert_eq!(meta.owner_index(), MAX_HOT_OWNER_INDEX); + assert_eq!(meta.owner_offset(), MAX_HOT_OWNER_OFFSET); } #[test] @@ -319,16 +324,16 @@ pub mod tests { } #[test] - #[should_panic(expected = "owner_index exceeds MAX_HOT_OWNER_INDEX")] - fn test_hot_meta_owner_index_exceeds_limit() { - HotAccountMeta::new().with_owner_index(MAX_HOT_OWNER_INDEX + 1); + #[should_panic(expected = "owner_offset exceeds MAX_HOT_OWNER_OFFSET")] + fn test_hot_meta_owner_offset_exceeds_limit() { + HotAccountMeta::new().with_owner_offset(OwnerOffset(MAX_HOT_OWNER_OFFSET.0 + 1)); } #[test] fn test_hot_account_meta() { const TEST_LAMPORTS: u64 = 2314232137; const TEST_PADDING: u8 = 5; - const TEST_OWNER_INDEX: u32 = 0x1fef_1234; + const TEST_OWNER_OFFSET: OwnerOffset = OwnerOffset(0x1fef_1234); const TEST_RENT_EPOCH: Epoch = 7; let optional_fields = AccountMetaOptionalFields { @@ -340,12 +345,12 @@ pub mod tests { let meta = HotAccountMeta::new() .with_lamports(TEST_LAMPORTS) .with_account_data_padding(TEST_PADDING) - .with_owner_index(TEST_OWNER_INDEX) + .with_owner_offset(TEST_OWNER_OFFSET) .with_flags(&flags); assert_eq!(meta.lamports(), TEST_LAMPORTS); assert_eq!(meta.account_data_padding(), TEST_PADDING); - assert_eq!(meta.owner_index(), TEST_OWNER_INDEX); + assert_eq!(meta.owner_offset(), TEST_OWNER_OFFSET); assert_eq!(*meta.flags(), flags); } @@ -355,7 +360,7 @@ pub mod tests { let padding = [0u8; 5]; const TEST_LAMPORT: u64 = 2314232137; - const OWNER_INDEX: u32 = 0x1fef_1234; + const OWNER_OFFSET: u32 = 0x1fef_1234; const TEST_RENT_EPOCH: Epoch = 7; let optional_fields = AccountMetaOptionalFields { @@ -367,7 +372,7 @@ pub mod tests { let expected_meta = HotAccountMeta::new() .with_lamports(TEST_LAMPORT) .with_account_data_padding(padding.len().try_into().unwrap()) - .with_owner_index(OWNER_INDEX) + .with_owner_offset(OwnerOffset(OWNER_OFFSET)) .with_flags(&flags); let mut writer = ByteBlockWriter::new(AccountBlockFormat::AlignedRaw); @@ -407,7 +412,7 @@ pub mod tests { let expected_footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, owners_block_format: OwnersBlockFormat::LocalIndex, - index_block_format: IndexBlockFormat::AddressAndOffset, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, account_block_format: AccountBlockFormat::AlignedRaw, account_entry_count: 300, account_meta_entry_size: 16, @@ -449,7 +454,7 @@ pub mod tests { .map(|_| { HotAccountMeta::new() .with_lamports(rng.gen_range(0..u64::MAX)) - .with_owner_index(rng.gen_range(0..NUM_ACCOUNTS)) + .with_owner_offset(OwnerOffset(rng.gen_range(0..NUM_ACCOUNTS))) }) .collect(); @@ -467,8 +472,11 @@ pub mod tests { .iter() .map(|meta| { let prev_offset = current_offset; - current_offset += file.write_type(meta).unwrap(); - AccountOffset { block: prev_offset } + current_offset += file.write_type(meta).unwrap() as u32; + assert_eq!(prev_offset % HOT_ACCOUNT_OFFSET_MULTIPLIER as u32, 0); + AccountOffset { + block: prev_offset / HOT_ACCOUNT_OFFSET_MULTIPLIER as u32, + } }) .collect(); // while the test only focuses on account metas, writing a footer @@ -503,7 +511,7 @@ pub mod tests { .iter() .map(|address| AccountIndexWriterEntry { address, - block_offset: rng.gen_range(0..u64::MAX), + block_offset: rng.gen_range(0..u32::MAX), intra_block_offset: rng.gen_range(0..4096), }) .collect(); @@ -531,10 +539,14 @@ pub mod tests { let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); for (i, index_writer_entry) in index_writer_entries.iter().enumerate() { - let account_offset = hot_storage.get_account_offset(IndexOffset(i)).unwrap(); - assert_eq!(account_offset.block as u64, index_writer_entry.block_offset); + let account_offset = hot_storage + .get_account_offset(IndexOffset(i as u32)) + .unwrap(); + assert_eq!(account_offset.block, index_writer_entry.block_offset); - let account_address = hot_storage.get_account_address(IndexOffset(i)).unwrap(); + let account_address = hot_storage + .get_account_address(IndexOffset(i as u32)) + .unwrap(); assert_eq!(account_address, index_writer_entry.address); } } diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index acd406d1c02b23..778eb3237e304e 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -13,8 +13,8 @@ use { #[derive(Debug)] pub struct AccountIndexWriterEntry<'a> { pub address: &'a Pubkey, - pub block_offset: u64, - pub intra_block_offset: u64, + pub block_offset: u32, + pub intra_block_offset: u32, } /// The offset to an account stored inside its accounts block. @@ -23,14 +23,14 @@ pub struct AccountIndexWriterEntry<'a> { #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct AccountOffset { /// The offset to the accounts block that contains the account meta/data. - pub block: usize, + pub block: u32, } /// The offset to an account/address entry in the accounts index block. /// This can be used to obtain the AccountOffset and address by looking through /// the accounts index block. #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct IndexOffset(pub usize); +pub struct IndexOffset(pub u32); /// The index format of a tiered accounts file. #[repr(u16)] @@ -47,10 +47,10 @@ pub struct IndexOffset(pub usize); )] pub enum IndexBlockFormat { /// This format optimizes the storage size by storing only account addresses - /// and offsets. It skips storing the size of account data by storing account - /// block entries and index block entries in the same order. + /// and block offsets. It skips storing the size of account data by storing + /// account block entries and index block entries in the same order. #[default] - AddressAndOffset = 0, + AddressAndBlockOffsetOnly = 0, } impl IndexBlockFormat { @@ -62,7 +62,7 @@ impl IndexBlockFormat { index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { - Self::AddressAndOffset => { + Self::AddressAndBlockOffsetOnly => { let mut bytes_written = 0; for index_entry in index_entries { bytes_written += file.write_type(index_entry.address)?; @@ -83,8 +83,9 @@ impl IndexBlockFormat { index_offset: IndexOffset, ) -> TieredStorageResult<&'a Pubkey> { let account_offset = match self { - Self::AddressAndOffset => { - footer.index_block_offset as usize + std::mem::size_of::() * index_offset.0 + Self::AddressAndBlockOffsetOnly => { + footer.index_block_offset as usize + + std::mem::size_of::() * (index_offset.0 as usize) } }; let (address, _) = get_type::(mmap, account_offset)?; @@ -99,13 +100,14 @@ impl IndexBlockFormat { index_offset: IndexOffset, ) -> TieredStorageResult { match self { - Self::AddressAndOffset => { + Self::AddressAndBlockOffsetOnly => { let account_offset = footer.index_block_offset as usize + std::mem::size_of::() * footer.account_entry_count as usize - + index_offset.0 * std::mem::size_of::(); - let (account_block_offset, _) = get_type(mmap, account_offset)?; + + std::mem::size_of::() * index_offset.0 as usize; + let (block_offset, _) = get_type(mmap, account_offset)?; + Ok(AccountOffset { - block: *account_block_offset, + block: *block_offset, }) } } @@ -114,7 +116,9 @@ impl IndexBlockFormat { /// Returns the size of one index entry. pub fn entry_size(&self) -> usize { match self { - Self::AddressAndOffset => std::mem::size_of::() + std::mem::size_of::(), + Self::AddressAndBlockOffsetOnly => { + std::mem::size_of::() + std::mem::size_of::() + } } } } @@ -150,11 +154,11 @@ mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); - let indexer = IndexBlockFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; indexer.write_index_block(&file, &index_entries).unwrap(); } - let indexer = IndexBlockFormat::AddressAndOffset; + let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; let file = OpenOptions::new() .read(true) .create(false) @@ -163,11 +167,11 @@ mod tests { let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; for (i, index_entry) in index_entries.iter().enumerate() { let account_offset = indexer - .get_account_offset(&mmap, &footer, IndexOffset(i)) + .get_account_offset(&mmap, &footer, IndexOffset(i as u32)) .unwrap(); - assert_eq!(index_entry.block_offset, account_offset.block as u64); + assert_eq!(index_entry.block_offset, account_offset.block); let address = indexer - .get_account_address(&mmap, &footer, IndexOffset(i)) + .get_account_address(&mmap, &footer, IndexOffset(i as u32)) .unwrap(); assert_eq!(index_entry.address, address); } diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 668c6ab93d8310..0111902fb8369d 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -1,7 +1,8 @@ -#![allow(dead_code)] //! The account meta and related structs for the tiered storage. + use { - crate::accounts_hash::AccountHash, modular_bitfield::prelude::*, + crate::{accounts_hash::AccountHash, tiered_storage::owners::OwnerOffset}, + modular_bitfield::prelude::*, solana_sdk::stake_history::Epoch, }; @@ -31,8 +32,8 @@ pub trait TieredAccountMeta: Sized { /// for the account data associated with the current meta. fn with_account_data_padding(self, padding: u8) -> Self; - /// A builder function that initializes the owner's index. - fn with_owner_index(self, index: u32) -> Self; + /// A builder function that initializes the owner offset. + fn with_owner_offset(self, owner_offset: OwnerOffset) -> Self; /// A builder function that initializes the account data size. /// The size here represents the logical data size without compression. @@ -48,8 +49,8 @@ pub trait TieredAccountMeta: Sized { /// Returns the number of padding bytes for the associated account data fn account_data_padding(&self) -> u8; - /// Returns the index to the accounts' owner in the current AccountsFile. - fn owner_index(&self) -> u32; + /// Returns the offset to the accounts' owner in the current AccountsFile. + fn owner_offset(&self) -> OwnerOffset; /// Returns the AccountMetaFlags of the current meta. fn flags(&self) -> &AccountMetaFlags; diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index 1f570674cd47b6..7cd548e3a00c8d 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -18,7 +18,7 @@ pub struct OwnersBlock; /// /// Note that as its internal type is u32, it means the maximum number of /// unique owners in one TieredStorageFile is 2^32. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)] pub struct OwnerOffset(pub u32); /// OwnersBlock is persisted as a consecutive bytes of pubkeys without any diff --git a/accounts-db/src/verify_accounts_hash_in_background.rs b/accounts-db/src/verify_accounts_hash_in_background.rs index d4676cfe128f81..f03e4e0482ce8e 100644 --- a/accounts-db/src/verify_accounts_hash_in_background.rs +++ b/accounts-db/src/verify_accounts_hash_in_background.rs @@ -67,7 +67,7 @@ impl VerifyAccountsHashInBackground { } let result = lock.take().unwrap().join().unwrap(); if !result { - panic!("initial hash verification failed: {result:?}"); + panic!("initial background accounts hash verification failed: {result}"); } // we never have to check again self.verification_complete(); @@ -139,7 +139,7 @@ pub mod tests { } #[test] - #[should_panic(expected = "initial hash verification failed")] + #[should_panic(expected = "initial background accounts hash verification failed")] fn test_panic() { let verify = Arc::new(VerifyAccountsHashInBackground::default()); start_thread_and_return(&verify, false, || {}); diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index 44453a5e35d2e3..ed791d94499a0f 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -22,7 +22,7 @@ solana-logger = { workspace = true } solana-measure = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index 6d5bf5b398e50c..b6739ea452d79c 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -22,7 +22,7 @@ tokio-serde = { workspace = true, features = ["bincode"] } [dev-dependencies] solana-banks-server = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [lib] crate-type = ["lib"] diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 803e820a5bd576..cd40eb1c833c1c 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -44,6 +44,7 @@ thiserror = { workspace = true } [dev-dependencies] serial_test = { workspace = true } solana-local-cluster = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } tempfile = { workspace = true } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 029937f391cccd..f33b69241509af 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -1160,17 +1160,17 @@ mod tests { }, }; - fn bank_with_all_features(genesis_config: &GenesisConfig) -> Bank { + fn bank_with_all_features(genesis_config: &GenesisConfig) -> Arc { let mut bank = Bank::new_for_tests(genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - bank + bank.wrap_with_bank_forks_for_tests().0 } #[test] fn test_bench_tps_bank_client() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let config = Config { id, @@ -1191,7 +1191,7 @@ mod tests { fn test_bench_tps_fund_keys() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); @@ -1216,7 +1216,7 @@ mod tests { let fee_rate_governor = FeeRateGovernor::new(11, 0); genesis_config.fee_rate_governor = fee_rate_governor; let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap(); @@ -1234,7 +1234,7 @@ mod tests { fn test_bench_tps_create_durable_nonce() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let bank = bank_with_all_features(&genesis_config); - let client = Arc::new(BankClient::new(bank)); + let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 10; let lamports = 10_000_000; diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 036743c214dc46..6eb755c8cccc6c 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -116,7 +116,6 @@ pub struct Bucket { /// true if this bucket was loaded (as opposed to created blank). /// When populating, we want to prioritize looking for data on disk that already matches as opposed to writing new data. - #[allow(dead_code)] reused_file_at_startup: bool, } diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index e5449a814a5be9..6677d6932e1e82 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -26,7 +26,6 @@ pub struct BucketApi { /// keeps track of which index file this bucket is currently using /// or at startup, which bucket file this bucket should initially use - #[allow(dead_code)] restartable_bucket: RestartableBucket, } diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index c81c6a1a7a3444..700cc22f25f694 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -219,8 +219,6 @@ impl BucketStorage { offset } - // temporary tag - #[allow(dead_code)] /// load and mmap the file that is this disk bucket if possible pub(crate) fn load_on_restart( path: PathBuf, diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 8535905bfee4d0..91ffe0e3a3bbb2 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -156,7 +156,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest "ci/test-docs.sh" 15 + command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -182,7 +182,7 @@ all_test_steps() { cargo-test-sbf$ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-sbf.sh" + - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -240,6 +240,8 @@ EOF ^ci/rust-version.sh \ ^ci/test-coverage.sh \ ^ci/test-bench.sh \ + ^ci/bench \ + .buildkite/scripts/build-bench.sh \ ; then .buildkite/scripts/build-bench.sh >> "$output_file" else diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index ede70e6229d5f8..03bbccfd20fda3 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -146,7 +146,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest "ci/test-docs.sh" 15 + command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -168,7 +168,7 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-sbf.sh" + - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index baf7e09632bac6..eb214fdb11173e 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.73.0 +FROM solanalabs/rust:1.74.0 ARG date ARG GRCOV_VERSION=v0.8.18 diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 8619d5e68e30a0..47057c1c8d7618 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG \ - RUST_VERSION=1.73.0 \ + RUST_VERSION=1.74.0 \ GOLANG_VERSION=1.21.3 \ NODE_MAJOR=18 \ SCCACHE_VERSION=v0.5.4 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index a38910accda10b..fcfed6bd961243 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -29,7 +29,7 @@ fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2023-10-05 + nightly_version=2023-11-16 fi diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs index a1be08a577c07f..0a968e1b74444e 100644 --- a/cli/src/address_lookup_table.rs +++ b/cli/src/address_lookup_table.rs @@ -80,10 +80,12 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_pubkey) .help( - "Lookup table authority address [default: the default configured keypair]. \ - WARNING: Cannot be used for creating a lookup table for a cluster running v1.11 - or earlier which requires the authority to sign for lookup table creation.", - ) + "Lookup table authority address \ + [default: the default configured keypair]. \ + WARNING: Cannot be used for creating a lookup table for \ + a cluster running v1.11 or earlier which requires the \ + authority to sign for lookup table creation.", + ), ) .arg( Arg::with_name("authority_signer") @@ -92,7 +94,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .conflicts_with("authority") .validator(is_valid_signer) - .help("Lookup table authority keypair [default: the default configured keypair].") + .help( + "Lookup table authority keypair \ + [default: the default configured keypair].", + ), ) .arg( Arg::with_name("payer") @@ -100,8 +105,11 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("PAYER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Account that will pay rent fees for the created lookup table [default: the default configured keypair]") - ) + .help( + "Account that will pay rent fees for the created lookup table \ + [default: the default configured keypair]", + ), + ), ) .subcommand( SubCommand::with_name("freeze") @@ -113,7 +121,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_pubkey) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -121,7 +129,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("bypass_warning") @@ -140,7 +151,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_pubkey) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -148,7 +159,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("payer") @@ -156,7 +170,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("PAYER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Account that will pay rent fees for the extended lookup table [default: the default configured keypair]") + .help( + "Account that will pay rent fees for the extended lookup \ + table [default: the default configured keypair]", + ), ) .arg( Arg::with_name("addresses") @@ -166,8 +183,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .use_delimiter(true) .required(true) .validator(is_pubkey) - .help("Comma separated list of addresses to append") - ) + .help("Comma separated list of addresses to append"), + ), ) .subcommand( SubCommand::with_name("deactivate") @@ -178,7 +195,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) .required(true) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("authority") @@ -186,7 +203,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), ) .arg( Arg::with_name("bypass_warning") @@ -204,7 +224,7 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) .required(true) - .help("Address of the lookup table") + .help("Address of the lookup table"), ) .arg( Arg::with_name("recipient") @@ -212,7 +232,10 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("RECIPIENT_ADDRESS") .takes_value(true) .validator(is_pubkey) - .help("Address of the recipient account to deposit the closed account's lamports [default: the default configured keypair]") + .help( + "Address of the recipient account to deposit the closed \ + account's lamports [default: the default configured keypair]", + ), ) .arg( Arg::with_name("authority") @@ -220,8 +243,11 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Lookup table authority [default: the default configured keypair]") - ) + .help( + "Lookup table authority \ + [default: the default configured keypair]", + ), + ), ) .subcommand( SubCommand::with_name("get") @@ -231,9 +257,9 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .index(1) .value_name("LOOKUP_TABLE_ADDRESS") .takes_value(true) - .help("Address of the lookup table to show") - ) - ) + .help("Address of the lookup table to show"), + ), + ), ) } } @@ -593,9 +619,9 @@ fn process_create_lookup_table( } } -pub const FREEZE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ -Once a lookup table is frozen, it can never be modified or unfrozen again. \ -To proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; +pub const FREEZE_LOOKUP_TABLE_WARNING: &str = + "WARNING! Once a lookup table is frozen, it can never be modified or unfrozen again. To \ + proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; fn process_freeze_lookup_table( rpc_client: &RpcClient, @@ -613,9 +639,10 @@ fn process_freeze_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } if !bypass_warning { @@ -671,9 +698,10 @@ fn process_extend_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let authority_address = authority_signer.pubkey(); @@ -709,10 +737,10 @@ fn process_extend_lookup_table( } } -pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ -Once a lookup table is deactivated, it is no longer usable by transactions. -Deactivated lookup tables may only be closed and cannot be recreated at the same address. \ -To proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; +pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = + "WARNING! Once a lookup table is deactivated, it is no longer usable by transactions. +Deactivated lookup tables may only be closed and cannot be recreated at the same address. To \ + proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; fn process_deactivate_lookup_table( rpc_client: &RpcClient, @@ -730,9 +758,10 @@ fn process_deactivate_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } if !bypass_warning { @@ -783,17 +812,19 @@ fn process_close_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; if lookup_table_account.meta.deactivation_slot == u64::MAX { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated lookup tables may be closed", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated \ + lookup tables may be closed", + ) + .into()); } let authority_address = authority_signer.pubkey(); @@ -836,9 +867,10 @@ fn process_show_lookup_table( })?; if !address_lookup_table::program::check_id(&lookup_table_account.owner) { return Err(format!( - "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", - ) - .into()); + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table \ + program", + ) + .into()); } let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 74d9b998badbf8..3706b3e6c2fea9 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -38,7 +38,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .validator(is_url_or_moniker) .help( "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + [mainnet-beta, testnet, devnet, localhost]", ), ) .arg( @@ -67,16 +67,19 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A "processed", "confirmed", "finalized", - "recent", // Deprecated as of v1.5.5 - "single", // Deprecated as of v1.5.5 + "recent", // Deprecated as of v1.5.5 + "single", // Deprecated as of v1.5.5 "singleGossip", // Deprecated as of v1.5.5 - "root", // Deprecated as of v1.5.5 - "max", // Deprecated as of v1.5.5 + "root", // Deprecated as of v1.5.5 + "max", // Deprecated as of v1.5.5 ]) .value_name("COMMITMENT_LEVEL") .hide_possible_values(true) .global(true) - .help("Return information at the selected commitment level [possible values: processed, confirmed, finalized]"), + .help( + "Return information at the selected commitment level \ + [possible values: processed, confirmed, finalized]", + ), ) .arg( Arg::with_name("verbose") @@ -207,14 +210,14 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A ) .subcommand( SubCommand::with_name("completion") - .about("Generate completion scripts for various shells") - .arg( - Arg::with_name("shell") - .long("shell") - .short("s") - .takes_value(true) - .possible_values(&["bash", "fish", "zsh", "powershell", "elvish"]) - .default_value("bash") - ) + .about("Generate completion scripts for various shells") + .arg( + Arg::with_name("shell") + .long("shell") + .short("s") + .takes_value(true) + .possible_values(&["bash", "fish", "zsh", "powershell", "elvish"]) + .default_value("bash"), + ), ) } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index ee683081ed4790..0f55566935ba4c 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -103,20 +103,23 @@ impl ClusterQuerySubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("catchup") .about("Wait for a validator to catch up to the cluster") - .arg( - pubkey!(Arg::with_name("node_pubkey") + .arg(pubkey!( + Arg::with_name("node_pubkey") .index(1) .value_name("OUR_VALIDATOR_PUBKEY") .required(false), - "Identity pubkey of the validator"), - ) + "Identity pubkey of the validator" + )) .arg( Arg::with_name("node_json_rpc_url") .index(2) .value_name("OUR_URL") .takes_value(true) .validator(is_url) - .help("JSON RPC URL for validator, which is useful for validators with a private RPC service") + .help( + "JSON RPC URL for validator, which is useful for validators with a \ + private RPC service", + ), ) .arg( Arg::with_name("follow") @@ -131,19 +134,19 @@ impl ClusterQuerySubCommands for App<'_, '_> { .value_name("PORT") .default_value(DEFAULT_RPC_PORT_STR) .validator(is_port) - .help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"), + .help( + "Guess Identity pubkey and validator rpc node assuming local \ + (possibly private) validator", + ), ) - .arg( - Arg::with_name("log") - .long("log") - .takes_value(false) - .help("Don't update the progress inplace; instead show updates with its own new lines"), - ), - ) - .subcommand( - SubCommand::with_name("cluster-date") - .about("Get current cluster date, computed from genesis creation time and network time"), + .arg(Arg::with_name("log").long("log").takes_value(false).help( + "Don't update the progress inplace; instead show updates with its own new \ + lines", + )), ) + .subcommand(SubCommand::with_name("cluster-date").about( + "Get current cluster date, computed from genesis creation time and network time", + )) .subcommand( SubCommand::with_name("cluster-version") .about("Get the version of the cluster entrypoint"), @@ -151,94 +154,97 @@ impl ClusterQuerySubCommands for App<'_, '_> { // Deprecated in v1.8.0 .subcommand( SubCommand::with_name("fees") - .about("Display current cluster fees (Deprecated in v1.8.0)") - .arg( - Arg::with_name("blockhash") - .long("blockhash") - .takes_value(true) - .value_name("BLOCKHASH") - .validator(is_hash) - .help("Query fees for BLOCKHASH instead of the the most recent blockhash") - ), + .about("Display current cluster fees (Deprecated in v1.8.0)") + .arg( + Arg::with_name("blockhash") + .long("blockhash") + .takes_value(true) + .value_name("BLOCKHASH") + .validator(is_hash) + .help("Query fees for BLOCKHASH instead of the the most recent blockhash"), + ), ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), ) - .subcommand(SubCommand::with_name("block-time") - .about("Get estimated production time of a block") - .alias("get-block-time") - .arg( - Arg::with_name("slot") - .index(1) - .takes_value(true) - .value_name("SLOT") - .help("Slot number of the block to query") - ) - ) - .subcommand(SubCommand::with_name("leader-schedule") - .about("Display leader schedule") - .arg( - Arg::with_name("epoch") - .long("epoch") - .takes_value(true) - .value_name("EPOCH") - .validator(is_epoch) - .help("Epoch to show leader schedule for. [default: current]") - ) - ) .subcommand( - SubCommand::with_name("epoch-info") - .about("Get information about the current epoch") - .alias("get-epoch-info"), + SubCommand::with_name("block-time") + .about("Get estimated production time of a block") + .alias("get-block-time") + .arg( + Arg::with_name("slot") + .index(1) + .takes_value(true) + .value_name("SLOT") + .help("Slot number of the block to query"), + ), ) .subcommand( - SubCommand::with_name("genesis-hash") - .about("Get the genesis hash") - .alias("get-genesis-hash") + SubCommand::with_name("leader-schedule") + .about("Display leader schedule") + .arg( + Arg::with_name("epoch") + .long("epoch") + .takes_value(true) + .value_name("EPOCH") + .validator(is_epoch) + .help("Epoch to show leader schedule for [default: current]"), + ), ) .subcommand( - SubCommand::with_name("slot").about("Get current slot") - .alias("get-slot"), + SubCommand::with_name("epoch-info") + .about("Get information about the current epoch") + .alias("get-epoch-info"), ) .subcommand( - SubCommand::with_name("block-height").about("Get current block height"), + SubCommand::with_name("genesis-hash") + .about("Get the genesis hash") + .alias("get-genesis-hash"), ) .subcommand( - SubCommand::with_name("epoch").about("Get current epoch"), + SubCommand::with_name("slot") + .about("Get current slot") + .alias("get-slot"), ) + .subcommand(SubCommand::with_name("block-height").about("Get current block height")) + .subcommand(SubCommand::with_name("epoch").about("Get current epoch")) .subcommand( - SubCommand::with_name("largest-accounts").about("Get addresses of largest cluster accounts") - .arg( - Arg::with_name("circulating") - .long("circulating") - .takes_value(false) - .help("Filter address list to only circulating accounts") - ) - .arg( - Arg::with_name("non_circulating") - .long("non-circulating") - .takes_value(false) - .conflicts_with("circulating") - .help("Filter address list to only non-circulating accounts") - ), + SubCommand::with_name("largest-accounts") + .about("Get addresses of largest cluster accounts") + .arg( + Arg::with_name("circulating") + .long("circulating") + .takes_value(false) + .help("Filter address list to only circulating accounts"), + ) + .arg( + Arg::with_name("non_circulating") + .long("non-circulating") + .takes_value(false) + .conflicts_with("circulating") + .help("Filter address list to only non-circulating accounts"), + ), ) .subcommand( - SubCommand::with_name("supply").about("Get information about the cluster supply of SOL") - .arg( - Arg::with_name("print_accounts") - .long("print-accounts") - .takes_value(false) - .help("Print list of non-circualting account addresses") - ), + SubCommand::with_name("supply") + .about("Get information about the cluster supply of SOL") + .arg( + Arg::with_name("print_accounts") + .long("print-accounts") + .takes_value(false) + .help("Print list of non-circualting account addresses"), + ), ) .subcommand( - SubCommand::with_name("total-supply").about("Get total number of SOL") - .setting(AppSettings::Hidden), + SubCommand::with_name("total-supply") + .about("Get total number of SOL") + .setting(AppSettings::Hidden), ) .subcommand( - SubCommand::with_name("transaction-count").about("Get current transaction count") - .alias("get-transaction-count"), + SubCommand::with_name("transaction-count") + .about("Get current transaction count") + .alias("get-transaction-count"), ) .subcommand( SubCommand::with_name("ping") @@ -265,7 +271,10 @@ impl ClusterQuerySubCommands for App<'_, '_> { .short("D") .long("print-timestamp") .takes_value(false) - .help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"), + .help( + "Print timestamp (unix time + microseconds as in gettimeofday) before \ + each line", + ), ) .arg( Arg::with_name("timeout") @@ -286,20 +295,17 @@ impl ClusterQuerySubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("logs") .about("Stream transaction logs") - .arg( - pubkey!(Arg::with_name("address") - .index(1) - .value_name("ADDRESS"), - "Account address to monitor \ - [default: monitor all transactions except for votes] \ - ") - ) + .arg(pubkey!( + Arg::with_name("address").index(1).value_name("ADDRESS"), + "Account address to monitor [default: monitor all transactions except for \ + votes] " + )) .arg( Arg::with_name("include_votes") .long("include-votes") .takes_value(false) .conflicts_with("address") - .help("Include vote transactions when monitoring all transactions") + .help("Include vote transactions when monitoring all transactions"), ), ) .subcommand( @@ -316,13 +322,16 @@ impl ClusterQuerySubCommands for App<'_, '_> { Arg::with_name("slot_limit") .long("slot-limit") .takes_value(true) - .help("Limit results to this many slots from the end of the epoch [default: full epoch]"), + .help( + "Limit results to this many slots from the end of the epoch \ + [default: full epoch]", + ), ), ) .subcommand( SubCommand::with_name("gossip") .about("Show the current gossip network nodes") - .alias("show-gossip") + .alias("show-gossip"), ) .subcommand( SubCommand::with_name("stakes") @@ -333,19 +342,19 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(false) .help("Display balance in lamports instead of SOL"), ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkeys") + .arg(pubkey!( + Arg::with_name("vote_account_pubkeys") .index(1) .value_name("VOTE_ACCOUNT_PUBKEYS") .multiple(true), - "Only show stake accounts delegated to the provided vote accounts. "), - ) - .arg( - pubkey!(Arg::with_name("withdraw_authority") - .value_name("PUBKEY") - .long("withdraw-authority"), - "Only show stake accounts with the provided withdraw authority. "), - ), + "Only show stake accounts delegated to the provided vote accounts. " + )) + .arg(pubkey!( + Arg::with_name("withdraw_authority") + .value_name("PUBKEY") + .long("withdraw-authority"), + "Only show stake accounts with the provided withdraw authority. " + )), ) .subcommand( SubCommand::with_name("validators") @@ -394,7 +403,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { Arg::with_name("keep_unstaked_delinquents") .long("keep-unstaked-delinquents") .takes_value(false) - .help("Don't discard unstaked, delinquent validators") + .help("Don't discard unstaked, delinquent validators"), ) .arg( Arg::with_name("delinquent_slot_distance") @@ -402,25 +411,27 @@ impl ClusterQuerySubCommands for App<'_, '_> { .takes_value(true) .value_name("SLOT_DISTANCE") .validator(is_slot) - .help( - concatcp!( - "Minimum slot distance from the tip to consider a validator delinquent. [default: ", - DELINQUENT_VALIDATOR_SLOT_DISTANCE, - "]", - )) + .help(concatcp!( + "Minimum slot distance from the tip to consider a validator \ + delinquent [default: ", + DELINQUENT_VALIDATOR_SLOT_DISTANCE, + "]", + )), ), ) .subcommand( SubCommand::with_name("transaction-history") - .about("Show historical transactions affecting the given address \ - from newest to oldest") - .arg( - pubkey!(Arg::with_name("address") + .about( + "Show historical transactions affecting the given address from newest to \ + oldest", + ) + .arg(pubkey!( + Arg::with_name("address") .index(1) .value_name("ADDRESS") .required(true), - "Account address"), - ) + "Account address" + )) .arg( Arg::with_name("limit") .long("limit") @@ -442,18 +453,22 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("until") .value_name("TRANSACTION_SIGNATURE") .takes_value(true) - .help("List until this transaction signature, if found before limit reached"), + .help( + "List until this transaction signature, if found before limit reached", + ), ) .arg( Arg::with_name("show_transactions") .long("show-transactions") .takes_value(false) .help("Display the full transactions"), - ) + ), ) .subcommand( SubCommand::with_name("wait-for-max-stake") - .about("Wait for the max stake of any one node to drop below a percentage of total.") + .about( + "Wait for the max stake of any one node to drop below a percentage of total.", + ) .arg( Arg::with_name("max_percent") .long("max-percent") @@ -475,7 +490,10 @@ impl ClusterQuerySubCommands for App<'_, '_> { .map(|_| ()) .map_err(|e| e.to_string()) }) - .help("Length of data field in the account to calculate rent for, or moniker: [nonce, stake, system, vote]"), + .help( + "Length of data field in the account to calculate rent for, or \ + moniker: [nonce, stake, system, vote]", + ), ) .arg( Arg::with_name("lamports") @@ -502,8 +520,8 @@ pub fn parse_catchup( // requirement of node_pubkey is relaxed only if our_localhost_port if our_localhost_port.is_none() && node_pubkey.is_none() { return Err(CliError::BadParameter( - "OUR_VALIDATOR_PUBKEY (and possibly OUR_URL) must be specified \ - unless --our-localhost is given" + "OUR_VALIDATOR_PUBKEY (and possibly OUR_URL) must be specified unless --our-localhost \ + is given" .into(), )); } @@ -737,8 +755,7 @@ pub fn process_catchup( if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given rpc ({}) as us, \ - although --our-localhost is given\n", + "Prefering explicitly given rpc ({}) as us, although --our-localhost is given\n", node_json_rpc_url.as_ref().unwrap() ); } else { @@ -754,8 +771,8 @@ pub fn process_catchup( (if node_pubkey.is_some() && node_pubkey != guessed_default { // go to new line to leave this message on console println!( - "Prefering explicitly given node pubkey ({}) as us, \ - although --our-localhost is given\n", + "Prefering explicitly given node pubkey ({}) as us, although --our-localhost \ + is given\n", node_pubkey.unwrap() ); node_pubkey @@ -807,13 +824,18 @@ pub fn process_catchup( if reported_node_pubkey != node_pubkey { return Err(format!( - "The identity reported by node RPC URL does not match. Expected: {node_pubkey:?}. Reported: {reported_node_pubkey:?}" + "The identity reported by node RPC URL does not match. Expected: {node_pubkey:?}. \ + Reported: {reported_node_pubkey:?}" ) .into()); } if rpc_client.get_identity()? == node_pubkey { - return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into()); + return Err( + "Both RPC URLs reference the same node, unable to monitor for catchup. Try a \ + different --url" + .into(), + ); } let mut previous_rpc_slot = std::u64::MAX; @@ -1213,44 +1235,45 @@ pub fn process_show_block_production( CliError::RpcRequestError("Failed to deserialize slot history".to_string()) })?; - let (confirmed_blocks, start_slot) = if start_slot >= slot_history.oldest() - && end_slot <= slot_history.newest() - { - // Fast, more reliable path using the SlotHistory sysvar + let (confirmed_blocks, start_slot) = + if start_slot >= slot_history.oldest() && end_slot <= slot_history.newest() { + // Fast, more reliable path using the SlotHistory sysvar - let confirmed_blocks: Vec<_> = (start_slot..=end_slot) - .filter(|slot| slot_history.check(*slot) == slot_history::Check::Found) - .collect(); - (confirmed_blocks, start_slot) - } else { - // Slow, less reliable path using `getBlocks`. - // - // "less reliable" because if the RPC node has holds in its ledger then the block production data will be - // incorrect. This condition currently can't be detected over RPC - // - - let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; - if minimum_ledger_slot > end_slot { - return Err(format!( - "Ledger data not available for slots {start_slot} to {end_slot} (minimum ledger slot is {minimum_ledger_slot})" + let confirmed_blocks: Vec<_> = (start_slot..=end_slot) + .filter(|slot| slot_history.check(*slot) == slot_history::Check::Found) + .collect(); + (confirmed_blocks, start_slot) + } else { + // Slow, less reliable path using `getBlocks`. + // + // "less reliable" because if the RPC node has holds in its ledger then the block production data will be + // incorrect. This condition currently can't be detected over RPC + // + + let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; + if minimum_ledger_slot > end_slot { + return Err(format!( + "Ledger data not available for slots {start_slot} to {end_slot} (minimum \ + ledger slot is {minimum_ledger_slot})" ) .into()); - } + } - if minimum_ledger_slot > start_slot { - progress_bar.println(format!( + if minimum_ledger_slot > start_slot { + progress_bar.println(format!( "{}", style(format!( - "Note: Requested start slot was {start_slot} but minimum ledger slot is {minimum_ledger_slot}" + "Note: Requested start slot was {start_slot} but minimum ledger slot is \ + {minimum_ledger_slot}" )) .italic(), )); - start_slot = minimum_ledger_slot; - } + start_slot = minimum_ledger_slot; + } - let confirmed_blocks = rpc_client.get_blocks(start_slot, Some(end_slot))?; - (confirmed_blocks, start_slot) - }; + let confirmed_blocks = rpc_client.get_blocks(start_slot, Some(end_slot))?; + (confirmed_blocks, start_slot) + }; let start_slot_index = (start_slot - first_slot_in_epoch) as usize; let end_slot_index = (end_slot - first_slot_in_epoch) as usize; @@ -1281,7 +1304,8 @@ pub fn process_show_block_production( } progress_bar.set_message(format!( - "Processing {total_slots} slots containing {total_blocks_produced} blocks and {total_slots_skipped} empty slots..." + "Processing {total_slots} slots containing {total_blocks_produced} blocks and \ + {total_slots_skipped} empty slots..." )); let mut confirmed_blocks_index = 0; diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 708ea302b9ac27..65d117c2686c2d 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -240,7 +240,9 @@ impl fmt::Display for CliClusterSoftwareVersions { f, "{}", style(format!( - "{software_version_title:max_stake_percent_len$} {rpc_percent_title:>max_rpc_percent_len$}", + "{software_version_title:max_stake_percent_len$} \ + {rpc_percent_title:>max_rpc_percent_len$}", )) .bold(), )?; @@ -318,8 +320,12 @@ impl fmt::Display for CliClusterFeatureSets { writeln!( f, "\n{}", - style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster") - .bold())?; + style( + "To activate features the tool and cluster feature sets must match, select a \ + tool version that matches the cluster" + ) + .bold() + )?; } else { if !self.stake_allowed { write!( @@ -349,7 +355,10 @@ impl fmt::Display for CliClusterFeatureSets { f, "{}", style(format!( - "{software_versions_title:max_stake_percent_len$} {rpc_percent_title:>max_rpc_percent_len$}", + "{software_versions_title:max_stake_percent_len$} \ + {rpc_percent_title:>max_rpc_percent_len$}", )) .bold(), )?; @@ -402,8 +411,8 @@ fn check_rpc_genesis_hash( if rpc_genesis_hash != genesis_hash { return Err(format!( "The genesis hash for the specified cluster {cluster_type:?} does not match the \ - genesis hash reported by the specified RPC. Cluster genesis hash: {genesis_hash}, \ - RPC reported genesis hash: {rpc_genesis_hash}" + genesis hash reported by the specified RPC. Cluster genesis hash: \ + {genesis_hash}, RPC reported genesis hash: {rpc_genesis_hash}" ) .into()); } @@ -927,11 +936,17 @@ fn process_activate( if !feature_activation_allowed(rpc_client, false)?.0 { match force { - ForceActivation::Almost => - return Err("Add force argument once more to override the sanity check to force feature activation ".into()), - ForceActivation::Yes => println!("FEATURE ACTIVATION FORCED"), - ForceActivation::No => - return Err("Feature activation is not allowed at this time".into()), + ForceActivation::Almost => { + return Err( + "Add force argument once more to override the sanity check to force feature \ + activation " + .into(), + ) + } + ForceActivation::Yes => println!("FEATURE ACTIVATION FORCED"), + ForceActivation::No => { + return Err("Feature activation is not allowed at this time".into()) + } } } diff --git a/cli/src/main.rs b/cli/src/main.rs index 8e14fbe26016dd..e1b4f94bc4ee86 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -27,7 +27,8 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result { println!( - "{} Either provide the `--config` arg or ensure home directory exists to use the default config location", + "{} Either provide the `--config` arg or ensure home directory exists to \ + use the default config location", style("No config file found.").bold() ); return Ok(false); diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 8ec5b6a23b7182..166c1d39ed9dec 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -48,20 +48,20 @@ impl NonceSubCommands for App<'_, '_> { self.subcommand( SubCommand::with_name("authorize-nonce-account") .about("Assign account authority to a new entity") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account. "), - ) - .arg( - pubkey!(Arg::with_name("new_authority") + "Address of the nonce account. " + )) + .arg(pubkey!( + Arg::with_name("new_authority") .index(2) .value_name("AUTHORITY_PUBKEY") .required(true), - "Account to be granted authority of the nonce account. "), - ) + "Account to be granted authority of the nonce account. " + )) .arg(nonce_authority_arg()) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -85,20 +85,26 @@ impl NonceSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_amount_or_all) - .help("The amount to load the nonce account with, in SOL; accepts keyword ALL"), + .help( + "The amount to load the nonce account with, in SOL; accepts keyword \ + ALL", + ), ) - .arg( - pubkey!(Arg::with_name(NONCE_AUTHORITY_ARG.name) + .arg(pubkey!( + Arg::with_name(NONCE_AUTHORITY_ARG.name) .long(NONCE_AUTHORITY_ARG.long) .value_name("PUBKEY"), - "Assign noncing authority to another entity. "), - ) + "Assign noncing authority to another entity. " + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the NONCE_ACCOUNT pubkey", + ), ) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -107,24 +113,24 @@ impl NonceSubCommands for App<'_, '_> { SubCommand::with_name("nonce") .about("Get the current nonce value") .alias("get-nonce") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account to display. "), - ), + "Address of the nonce account to display. " + )), ) .subcommand( SubCommand::with_name("new-nonce") .about("Generate a new nonce, rendering the existing nonce useless") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account. "), - ) + "Address of the nonce account. " + )) .arg(nonce_authority_arg()) .arg(memo_arg()) .arg(compute_unit_price_arg()), @@ -133,13 +139,13 @@ impl NonceSubCommands for App<'_, '_> { SubCommand::with_name("nonce-account") .about("Show the contents of a nonce account") .alias("show-nonce-account") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Address of the nonce account to display. "), - ) + "Address of the nonce account to display. " + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -150,20 +156,20 @@ impl NonceSubCommands for App<'_, '_> { .subcommand( SubCommand::with_name("withdraw-from-nonce-account") .about("Withdraw SOL from the nonce account") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Nonce account to withdraw from. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Nonce account to withdraw from. " + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The account to which the SOL should be transferred. "), - ) + "The account to which the SOL should be transferred. " + )) .arg( Arg::with_name("amount") .index(3) @@ -179,15 +185,17 @@ impl NonceSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("upgrade-nonce-account") - .about("One-time idempotent upgrade of legacy nonce versions \ - in order to bump them out of chain blockhash domain.") - .arg( - pubkey!(Arg::with_name("nonce_account_pubkey") + .about( + "One-time idempotent upgrade of legacy nonce versions in order to bump them \ + out of chain blockhash domain.", + ) + .arg(pubkey!( + Arg::with_name("nonce_account_pubkey") .index(1) .value_name("NONCE_ACCOUNT_ADDRESS") .required(true), - "Nonce account to upgrade. "), - ) + "Nonce account to upgrade. " + )) .arg(memo_arg()) .arg(compute_unit_price_arg()), ) @@ -502,7 +510,8 @@ pub fn process_create_nonce_account( let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(State::size())?; if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for nonce account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for nonce account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } diff --git a/cli/src/program.rs b/cli/src/program.rs index 7a37a0a93d2571..98cfa1c13bf2b2 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -12,7 +12,12 @@ use { solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_clap_utils::{ - self, hidden_unless_forced, input_parsers::*, input_validators::*, keypair::*, + self, + fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, + hidden_unless_forced, + input_parsers::*, + input_validators::*, + keypair::*, }, solana_cli_output::{ CliProgram, CliProgramAccountType, CliProgramAuthority, CliProgramBuffer, CliProgramId, @@ -63,15 +68,16 @@ use { }, }; -pub const CLOSE_PROGRAM_WARNING: &str = "WARNING! \ -Closed programs cannot be recreated at the same program id. \ -Once a program is closed, it can never be invoked again. \ -To proceed with closing, rerun the `close` command with the `--bypass-warning` flag"; +pub const CLOSE_PROGRAM_WARNING: &str = "WARNING! Closed programs cannot be recreated at the same \ + program id. Once a program is closed, it can never be \ + invoked again. To proceed with closing, rerun the \ + `close` command with the `--bypass-warning` flag"; #[derive(Debug, PartialEq, Eq)] pub enum ProgramCliCommand { Deploy { program_location: Option, + fee_payer_signer_index: SignerIndex, program_signer_index: Option, program_pubkey: Option, buffer_signer_index: Option, @@ -84,6 +90,7 @@ pub enum ProgramCliCommand { }, WriteBuffer { program_location: String, + fee_payer_signer_index: SignerIndex, buffer_signer_index: Option, buffer_pubkey: Option, buffer_authority_signer_index: SignerIndex, @@ -145,7 +152,7 @@ impl ProgramSubCommands for App<'_, '_> { .long("skip-fee-check") .hidden(hidden_unless_forced()) .takes_value(false) - .global(true) + .global(true), ) .subcommand( SubCommand::with_name("deploy") @@ -157,14 +164,17 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .help("/path/to/program.so"), ) + .arg(fee_payer_arg()) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Intermediate buffer account to write data to, which can be used to resume a failed deploy \ - [default: random address]") + .help( + "Intermediate buffer account to write data to, which can be \ + used to resume a failed deploy [default: random address]", + ), ) .arg( Arg::with_name("upgrade_authority") @@ -172,19 +182,22 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade authority [default: the default configured keypair]") + .help( + "Upgrade authority [default: the default configured keypair]", + ), ) - .arg( - pubkey!(Arg::with_name("program_id") + .arg(pubkey!( + Arg::with_name("program_id") .long("program-id") .value_name("PROGRAM_ID"), - "Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \ - [default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"), - ) + "Executable program's address, must be a keypair for initial deploys, \ + can be a pubkey for upgrades [default: address of keypair at \ + /path/to/program-keypair.json if present, otherwise a random address]" + )) .arg( Arg::with_name("final") .long("final") - .help("The program will not be upgradeable") + .help("The program will not be upgradeable"), ) .arg( Arg::with_name("max_len") @@ -192,14 +205,19 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("max_len") .takes_value(true) .required(false) - .help("Maximum length of the upgradeable program \ - [default: twice the length of the original deployed program]") + .help( + "Maximum length of the upgradeable program \ + [default: twice the length of the original deployed program]", + ), ) .arg( Arg::with_name("allow_excessive_balance") .long("allow-excessive-deploy-account-balance") .takes_value(false) - .help("Use the designated program id even if the account already holds a large balance of SOL") + .help( + "Use the designated program id even if the account already \ + holds a large balance of SOL", + ), ), ) .subcommand( @@ -213,13 +231,16 @@ impl ProgramSubCommands for App<'_, '_> { .required(true) .help("/path/to/program.so"), ) + .arg(fee_payer_arg()) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer account to write data into [default: random address]") + .help( + "Buffer account to write data into [default: random address]", + ), ) .arg( Arg::with_name("buffer_authority") @@ -227,7 +248,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer authority [default: the default configured keypair]") + .help("Buffer authority [default: the default configured keypair]"), ) .arg( Arg::with_name("max_len") @@ -235,8 +256,10 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("max_len") .takes_value(true) .required(false) - .help("Maximum length of the upgradeable program \ - [default: twice the length of the original deployed program]") + .help( + "Maximum length of the upgradeable program \ + [default: twice the length of the original deployed program]", + ), ), ) .subcommand( @@ -248,7 +271,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_PUBKEY") .takes_value(true) .required(true) - .help("Public key of the buffer") + .help("Public key of the buffer"), ) .arg( Arg::with_name("buffer_authority") @@ -256,15 +279,15 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Buffer authority [default: the default configured keypair]") + .help("Buffer authority [default: the default configured keypair]"), ) - .arg( - pubkey!(Arg::with_name("new_buffer_authority") + .arg(pubkey!( + Arg::with_name("new_buffer_authority") .long("new-buffer-authority") .value_name("NEW_BUFFER_AUTHORITY") .required(true), - "Address of the new buffer authority"), - ) + "Address of the new buffer authority" + )), ) .subcommand( SubCommand::with_name("set-upgrade-authority") @@ -275,7 +298,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("PROGRAM_ADDRESS") .takes_value(true) .required(true) - .help("Address of the program to upgrade") + .help("Address of the program to upgrade"), ) .arg( Arg::with_name("upgrade_authority") @@ -283,7 +306,9 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade authority [default: the default configured keypair]") + .help( + "Upgrade authority [default: the default configured keypair]", + ), ) .arg( Arg::with_name("new_upgrade_authority") @@ -291,21 +316,32 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("NEW_UPGRADE_AUTHORITY") .required_unless("final") .takes_value(true) - .help("New upgrade authority (keypair or pubkey). It is strongly recommended to pass in a keypair to prevent mistakes in setting the upgrade authority. You can opt out of this behavior by passing --skip-new-upgrade-authority-signer-check if you are really confident that you are setting the correct authority. Alternatively, If you wish to make the program immutable, you should ignore this arg and pass the --final flag." - ) + .help( + "New upgrade authority (keypair or pubkey). It is strongly \ + recommended to pass in a keypair to prevent mistakes in \ + setting the upgrade authority. You can opt out of this \ + behavior by passing \ + --skip-new-upgrade-authority-signer-check if you are really \ + confident that you are setting the correct authority. \ + Alternatively, If you wish to make the program immutable, \ + you should ignore this arg and pass the --final flag.", + ), ) .arg( Arg::with_name("final") .long("final") .conflicts_with("new_upgrade_authority") - .help("The program will not be upgradeable") + .help("The program will not be upgradeable"), ) .arg( Arg::with_name("skip_new_upgrade_authority_signer_check") .long("skip-new-upgrade-authority-signer-check") .requires("new_upgrade_authority") .takes_value(false) - .help("Set this flag if you don't want the new authority to sign the set-upgrade-authority transaction."), + .help( + "Set this flag if you don't want the new authority to sign \ + the set-upgrade-authority transaction.", + ), ), ) .subcommand( @@ -316,7 +352,7 @@ impl ProgramSubCommands for App<'_, '_> { .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) - .help("Address of the buffer or program to show") + .help("Address of the buffer or program to show"), ) .arg( Arg::with_name("programs") @@ -324,7 +360,7 @@ impl ProgramSubCommands for App<'_, '_> { .conflicts_with("account") .conflicts_with("buffers") .required_unless_one(&["account", "buffers"]) - .help("Show every upgradeable program that matches the authority") + .help("Show every upgradeable program that matches the authority"), ) .arg( Arg::with_name("buffers") @@ -332,22 +368,22 @@ impl ProgramSubCommands for App<'_, '_> { .conflicts_with("account") .conflicts_with("programs") .required_unless_one(&["account", "programs"]) - .help("Show every upgradeable buffer that matches the authority") + .help("Show every upgradeable buffer that matches the authority"), ) .arg( Arg::with_name("all") .long("all") .conflicts_with("account") .conflicts_with("buffer_authority") - .help("Show accounts for all authorities") + .help("Show accounts for all authorities"), ) - .arg( - pubkey!(Arg::with_name("buffer_authority") + .arg(pubkey!( + Arg::with_name("buffer_authority") .long("buffer-authority") .value_name("AUTHORITY") .conflicts_with("all"), - "Authority [default: the default configured keypair]"), - ) + "Authority [default: the default configured keypair]" + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -364,7 +400,7 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("ACCOUNT_ADDRESS") .takes_value(true) .required(true) - .help("Address of the buffer or program") + .help("Address of the buffer or program"), ) .arg( Arg::with_name("output_location") @@ -390,7 +426,7 @@ impl ProgramSubCommands for App<'_, '_> { .long("buffers") .conflicts_with("account") .required_unless("account") - .help("Close all buffer accounts that match the authority") + .help("Close all buffer accounts that match the authority"), ) .arg( Arg::with_name("authority") @@ -399,15 +435,18 @@ impl ProgramSubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Upgrade or buffer authority [default: the default configured keypair]") + .help( + "Upgrade or buffer authority [default: the default configured \ + keypair]", + ), ) - - .arg( - pubkey!(Arg::with_name("recipient_account") + .arg(pubkey!( + Arg::with_name("recipient_account") .long("recipient") .value_name("RECIPIENT_ADDRESS"), - "Address of the account to deposit the closed account's lamports [default: the default configured keypair]"), - ) + "Address of the account to deposit the closed account's lamports \ + [default: the default configured keypair]" + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -423,7 +462,9 @@ impl ProgramSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("extend") - .about("Extend the length of an upgradeable program to deploy larger programs") + .about( + "Extend the length of an upgradeable program to deploy larger programs", + ) .arg( Arg::with_name("program_id") .index(1) @@ -440,14 +481,20 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_parsable::) - .help("Number of bytes that will be allocated for the program's data account") - ) - ) + .help( + "Number of bytes that will be allocated for the program's \ + data account", + ), + ), + ), ) .subcommand( SubCommand::with_name("deploy") - .about("Deploy has been removed. Use `solana program deploy` instead to deploy upgradeable programs") - .setting(AppSettings::Hidden) + .about( + "Deploy has been removed. Use `solana program deploy` instead to deploy \ + upgradeable programs", + ) + .setting(AppSettings::Hidden), ) } } @@ -466,9 +513,13 @@ pub fn parse_program_subcommand( let response = match (subcommand, sub_matches) { ("deploy", Some(matches)) => { - let mut bulk_signers = vec![Some( - default_signer.signer_from_path(matches, wallet_manager)?, - )]; + let (fee_payer, fee_payer_pubkey) = + signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![ + Some(default_signer.signer_from_path(matches, wallet_manager)?), + fee_payer, // if None, default signer will be supplied + ]; let program_location = matches .value_of("program_location") @@ -504,6 +555,7 @@ pub fn parse_program_subcommand( CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location, + fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), program_signer_index: signer_info.index_of_or_none(program_pubkey), program_pubkey, buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), @@ -520,9 +572,13 @@ pub fn parse_program_subcommand( } } ("write-buffer", Some(matches)) => { - let mut bulk_signers = vec![Some( - default_signer.signer_from_path(matches, wallet_manager)?, - )]; + let (fee_payer, fee_payer_pubkey) = + signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![ + Some(default_signer.signer_from_path(matches, wallet_manager)?), + fee_payer, // if None, default signer will be supplied + ]; let buffer_pubkey = if let Ok((buffer_signer, Some(buffer_pubkey))) = signer_of(matches, "buffer", wallet_manager) @@ -545,6 +601,7 @@ pub fn parse_program_subcommand( CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: matches.value_of("program_location").unwrap().to_string(), + fee_payer_signer_index: signer_info.index_of(fee_payer_pubkey).unwrap(), buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), buffer_pubkey, buffer_authority_signer_index: signer_info @@ -734,6 +791,7 @@ pub fn process_program_subcommand( match program_subcommand { ProgramCliCommand::Deploy { program_location, + fee_payer_signer_index, program_signer_index, program_pubkey, buffer_signer_index, @@ -747,6 +805,7 @@ pub fn process_program_subcommand( rpc_client, config, program_location, + *fee_payer_signer_index, *program_signer_index, *program_pubkey, *buffer_signer_index, @@ -759,6 +818,7 @@ pub fn process_program_subcommand( ), ProgramCliCommand::WriteBuffer { program_location, + fee_payer_signer_index, buffer_signer_index, buffer_pubkey, buffer_authority_signer_index, @@ -768,6 +828,7 @@ pub fn process_program_subcommand( rpc_client, config, program_location, + *fee_payer_signer_index, *buffer_signer_index, *buffer_pubkey, *buffer_authority_signer_index, @@ -873,12 +934,13 @@ fn get_default_program_keypair(program_location: &Option) -> Keypair { program_keypair } -/// Deploy using upgradeable loader +/// Deploy program using upgradeable loader. It also can process program upgrades #[allow(clippy::too_many_arguments)] fn process_program_deploy( rpc_client: Arc, config: &CliConfig, program_location: &Option, + fee_payer_signer_index: SignerIndex, program_signer_index: Option, program_pubkey: Option, buffer_signer_index: Option, @@ -889,7 +951,10 @@ fn process_program_deploy( allow_excessive_balance: bool, skip_fee_check: bool, ) -> ProcessResult { - let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; + let fee_payer_signer = config.signers[fee_payer_signer_index]; + let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; + + let (buffer_words, buffer_mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { (true, Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(pubkey) = buffer_pubkey { @@ -901,7 +966,6 @@ fn process_program_deploy( buffer_keypair.pubkey(), ) }; - let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { @@ -915,7 +979,7 @@ fn process_program_deploy( ) }; - let do_deploy = if let Some(account) = rpc_client + let do_initial_deploy = if let Some(account) = rpc_client .get_account_with_commitment(&program_pubkey, config.commitment)? .value { @@ -982,61 +1046,18 @@ fn process_program_deploy( let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { - // Check supplied buffer account - if let Some(account) = rpc_client - .get_account_with_commitment(&buffer_pubkey, config.commitment)? - .value - { - if !bpf_loader_upgradeable::check_id(&account.owner) { - return Err(format!( - "Buffer account {buffer_pubkey} is not owned by the BPF Upgradeable Loader", - ) - .into()); - } - - match account.state() { - Ok(UpgradeableLoaderState::Buffer { .. }) => { - // continue if buffer is initialized - } - Ok(UpgradeableLoaderState::Program { .. }) => { - return Err( - format!("Cannot use program account {buffer_pubkey} as buffer").into(), - ); - } - Ok(UpgradeableLoaderState::ProgramData { .. }) => { - return Err(format!( - "Cannot use program data account {buffer_pubkey} as buffer", - ) - .into()) - } - Ok(UpgradeableLoaderState::Uninitialized) => { - return Err(format!("Buffer account {buffer_pubkey} is not initialized").into()); - } - Err(_) => { - return Err( - format!("Buffer account {buffer_pubkey} could not be deserialized").into(), - ) - } - }; - - let program_len = account - .data - .len() - .saturating_sub(UpgradeableLoaderState::size_of_buffer_metadata()); - - (vec![], program_len) - } else { - return Err(format!( - "Buffer account {buffer_pubkey} not found, was it already consumed?", - ) - .into()); - } + ( + vec![], + fetch_buffer_len(&rpc_client, config, buffer_pubkey)?, + ) } else { return Err("Program location required if buffer not supplied".into()); }; - let programdata_len = if let Some(len) = max_len { + let program_data_max_len = if let Some(len) = max_len { if program_len > len { - return Err("Max length specified not large enough".into()); + return Err( + "Max length specified not large enough to accommodate desired program".into(), + ); } len } else if is_final { @@ -1044,11 +1065,12 @@ fn process_program_deploy( } else { program_len * 2 }; - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(programdata_len), + + let min_rent_exempt_program_data_balance = rpc_client.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_programdata(program_data_max_len), )?; - let result = if do_deploy { + let result = if do_initial_deploy { if program_signer.is_none() { return Err( "Initial deployments require a keypair be provided for the program id".into(), @@ -1059,9 +1081,10 @@ fn process_program_deploy( config, &program_data, program_len, - programdata_len, - minimum_balance, + program_data_max_len, + min_rent_exempt_program_data_balance, &bpf_loader_upgradeable::id(), + fee_payer_signer, Some(&[program_signer.unwrap(), upgrade_authority_signer]), buffer_signer, &buffer_pubkey, @@ -1074,8 +1097,11 @@ fn process_program_deploy( rpc_client.clone(), config, &program_data, + program_len, + min_rent_exempt_program_data_balance, + fee_payer_signer, &program_pubkey, - config.signers[upgrade_authority_signer_index], + upgrade_authority_signer, &buffer_pubkey, buffer_signer, skip_fee_check, @@ -1091,22 +1117,78 @@ fn process_program_deploy( None, )?; } - if result.is_err() && buffer_signer_index.is_none() { - report_ephemeral_mnemonic(words, mnemonic); + if result.is_err() && !buffer_provided { + // We might have deployed "temporary" buffer but failed to deploy our program from this + // buffer, reporting this to the user - so he can retry deploying re-using same buffer. + report_ephemeral_mnemonic(buffer_words, buffer_mnemonic); } result } +fn fetch_buffer_len( + rpc_client: &RpcClient, + config: &CliConfig, + buffer_pubkey: Pubkey, +) -> Result> { + // Check supplied buffer account + if let Some(account) = rpc_client + .get_account_with_commitment(&buffer_pubkey, config.commitment)? + .value + { + if !bpf_loader_upgradeable::check_id(&account.owner) { + return Err(format!( + "Buffer account {buffer_pubkey} is not owned by the BPF Upgradeable Loader", + ) + .into()); + } + + match account.state() { + Ok(UpgradeableLoaderState::Buffer { .. }) => { + // continue if buffer is initialized + } + Ok(UpgradeableLoaderState::Program { .. }) => { + return Err(format!("Cannot use program account {buffer_pubkey} as buffer").into()); + } + Ok(UpgradeableLoaderState::ProgramData { .. }) => { + return Err( + format!("Cannot use program data account {buffer_pubkey} as buffer",).into(), + ) + } + Ok(UpgradeableLoaderState::Uninitialized) => { + return Err(format!("Buffer account {buffer_pubkey} is not initialized").into()); + } + Err(_) => { + return Err( + format!("Buffer account {buffer_pubkey} could not be deserialized").into(), + ) + } + }; + + let program_len = account + .data + .len() + .saturating_sub(UpgradeableLoaderState::size_of_buffer_metadata()); + + Ok(program_len) + } else { + Err(format!("Buffer account {buffer_pubkey} not found, was it already consumed?",).into()) + } +} + fn process_write_buffer( rpc_client: Arc, config: &CliConfig, program_location: &str, + fee_payer_signer_index: SignerIndex, buffer_signer_index: Option, buffer_pubkey: Option, buffer_authority_signer_index: SignerIndex, max_len: Option, skip_fee_check: bool, ) -> ProcessResult { + let fee_payer_signer = config.signers[fee_payer_signer_index]; + let buffer_authority = config.signers[buffer_authority_signer_index]; + // Create ephemeral keypair to use for Buffer account, if not provided let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { @@ -1119,7 +1201,6 @@ fn process_write_buffer( buffer_keypair.pubkey(), ) }; - let buffer_authority = config.signers[buffer_authority_signer_index]; if let Some(account) = rpc_client .get_account_with_commitment(&buffer_pubkey, config.commitment)? @@ -1145,13 +1226,13 @@ fn process_write_buffer( } let program_data = read_and_verify_elf(program_location)?; - let buffer_data_len = if let Some(len) = max_len { + let buffer_data_max_len = if let Some(len) = max_len { len } else { program_data.len() }; - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(buffer_data_len), + let min_rent_exempt_program_data_balance = rpc_client.get_minimum_balance_for_rent_exemption( + UpgradeableLoaderState::size_of_programdata(buffer_data_max_len), )?; let result = do_process_program_write_and_deploy( @@ -1159,9 +1240,10 @@ fn process_write_buffer( config, &program_data, program_data.len(), - program_data.len(), - minimum_balance, + buffer_data_max_len, + min_rent_exempt_program_data_balance, &bpf_loader_upgradeable::id(), + fee_payer_signer, None, buffer_signer, &buffer_pubkey, @@ -1169,7 +1251,6 @@ fn process_write_buffer( true, skip_fee_check, ); - if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() { report_ephemeral_mnemonic(words, mnemonic); } @@ -1381,7 +1462,8 @@ fn get_programs( let results = get_accounts_with_filter(rpc_client, filters, 0)?; if results.len() != 1 { return Err(format!( - "Error: More than one Program associated with ProgramData account {programdata_address}" + "Error: More than one Program associated with ProgramData account \ + {programdata_address}" ) .into()); } @@ -1881,11 +1963,12 @@ where fn do_process_program_write_and_deploy( rpc_client: Arc, config: &CliConfig, - program_data: &[u8], + program_data: &[u8], // can be empty, hence we have program_len program_len: usize, - programdata_len: usize, - minimum_balance: u64, + program_data_max_len: usize, + min_rent_exempt_program_data_balance: u64, loader_id: &Pubkey, + fee_payer_signer: &dyn Signer, program_signers: Option<&[&dyn Signer]>, buffer_signer: Option<&dyn Signer>, buffer_pubkey: &Pubkey, @@ -1902,7 +1985,7 @@ fn do_process_program_write_and_deploy( { complete_partial_program_init( loader_id, - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &account, if loader_id == &bpf_loader_upgradeable::id() { @@ -1910,36 +1993,36 @@ fn do_process_program_write_and_deploy( } else { program_len }, - minimum_balance, + min_rent_exempt_program_data_balance, allow_excessive_balance, )? } else if loader_id == &bpf_loader_upgradeable::id() { ( bpf_loader_upgradeable::create_buffer( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &buffer_authority_signer.pubkey(), - minimum_balance, + min_rent_exempt_program_data_balance, program_len, )?, - minimum_balance, + min_rent_exempt_program_data_balance, ) } else { ( vec![system_instruction::create_account( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, - minimum_balance, + min_rent_exempt_program_data_balance, program_len as u64, loader_id, )], - minimum_balance, + min_rent_exempt_program_data_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( &initial_instructions, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, )) } else { @@ -1947,7 +2030,6 @@ fn do_process_program_write_and_deploy( }; // Create and add write messages - let payer_pubkey = config.signers[0].pubkey(); let create_msg = |offset: u32, bytes: Vec| { let instruction = if loader_id == &bpf_loader_upgradeable::id() { bpf_loader_upgradeable::write( @@ -1959,7 +2041,7 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write(buffer_pubkey, loader_id, offset, bytes) }; - Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) + Message::new_with_blockhash(&[instruction], Some(&fee_payer_signer.pubkey()), &blockhash) }; let mut write_messages = vec![]; @@ -1973,22 +2055,22 @@ fn do_process_program_write_and_deploy( let message = if loader_id == &bpf_loader_upgradeable::id() { Message::new_with_blockhash( &bpf_loader_upgradeable::deploy_with_max_program_len( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), &program_signers[0].pubkey(), buffer_pubkey, &program_signers[1].pubkey(), rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::size_of_program(), )?, - programdata_len, + program_data_max_len, )?, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ) } else { Message::new_with_blockhash( &[loader_instruction::finalize(buffer_pubkey, loader_id)], - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ) }; @@ -2001,6 +2083,7 @@ fn do_process_program_write_and_deploy( check_payer( &rpc_client, config, + fee_payer_signer.pubkey(), balance_needed, &initial_message, &write_messages, @@ -2014,6 +2097,7 @@ fn do_process_program_write_and_deploy( &initial_message, &write_messages, &final_message, + fee_payer_signer, buffer_signer, Some(buffer_authority_signer), program_signers, @@ -2032,23 +2116,20 @@ fn do_process_program_write_and_deploy( } } +#[allow(clippy::too_many_arguments)] fn do_process_program_upgrade( rpc_client: Arc, config: &CliConfig, - program_data: &[u8], + program_data: &[u8], // can be empty, hence we have program_len + program_len: usize, + min_rent_exempt_program_data_balance: u64, + fee_payer_signer: &dyn Signer, program_id: &Pubkey, upgrade_authority: &dyn Signer, buffer_pubkey: &Pubkey, buffer_signer: Option<&dyn Signer>, skip_fee_check: bool, ) -> ProcessResult { - let loader_id = bpf_loader_upgradeable::id(); - let data_len = program_data.len(); - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( - UpgradeableLoaderState::size_of_programdata(data_len), - )?; - - // Build messages to calculate fees let blockhash = rpc_client.get_latest_blockhash()?; let (initial_message, write_messages, balance_needed) = @@ -2059,31 +2140,31 @@ fn do_process_program_upgrade( .value { complete_partial_program_init( - &loader_id, - &config.signers[0].pubkey(), + &bpf_loader_upgradeable::id(), + &fee_payer_signer.pubkey(), &buffer_signer.pubkey(), &account, - UpgradeableLoaderState::size_of_buffer(data_len), - minimum_balance, + UpgradeableLoaderState::size_of_buffer(program_len), + min_rent_exempt_program_data_balance, true, )? } else { ( bpf_loader_upgradeable::create_buffer( - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), buffer_pubkey, &upgrade_authority.pubkey(), - minimum_balance, - data_len, + min_rent_exempt_program_data_balance, + program_len, )?, - minimum_balance, + min_rent_exempt_program_data_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( &initial_instructions, - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, )) } else { @@ -2092,7 +2173,6 @@ fn do_process_program_upgrade( let buffer_signer_pubkey = buffer_signer.pubkey(); let upgrade_authority_pubkey = upgrade_authority.pubkey(); - let payer_pubkey = config.signers[0].pubkey(); let create_msg = |offset: u32, bytes: Vec| { let instruction = bpf_loader_upgradeable::write( &buffer_signer_pubkey, @@ -2100,7 +2180,11 @@ fn do_process_program_upgrade( offset, bytes, ); - Message::new_with_blockhash(&[instruction], Some(&payer_pubkey), &blockhash) + Message::new_with_blockhash( + &[instruction], + Some(&fee_payer_signer.pubkey()), + &blockhash, + ) }; // Create and add write messages @@ -2121,9 +2205,9 @@ fn do_process_program_upgrade( program_id, buffer_pubkey, &upgrade_authority.pubkey(), - &config.signers[0].pubkey(), + &fee_payer_signer.pubkey(), )], - Some(&config.signers[0].pubkey()), + Some(&fee_payer_signer.pubkey()), &blockhash, ); let final_message = Some(final_message); @@ -2132,6 +2216,7 @@ fn do_process_program_upgrade( check_payer( &rpc_client, config, + fee_payer_signer.pubkey(), balance_needed, &initial_message, &write_messages, @@ -2145,6 +2230,7 @@ fn do_process_program_upgrade( &initial_message, &write_messages, &final_message, + fee_payer_signer, buffer_signer, Some(upgrade_authority), Some(&[upgrade_authority]), @@ -2237,6 +2323,7 @@ fn complete_partial_program_init( fn check_payer( rpc_client: &RpcClient, config: &CliConfig, + fee_payer_pubkey: Pubkey, balance_needed: u64, initial_message: &Option, write_messages: &[Message], @@ -2257,7 +2344,7 @@ fn check_payer( } check_account_for_spend_and_fee_with_commitment( rpc_client, - &config.signers[0].pubkey(), + &fee_payer_pubkey, balance_needed, fee, config.commitment, @@ -2271,12 +2358,11 @@ fn send_deploy_messages( initial_message: &Option, write_messages: &[Message], final_message: &Option, + fee_payer_signer: &dyn Signer, initial_signer: Option<&dyn Signer>, write_signer: Option<&dyn Signer>, final_signers: Option<&[&dyn Signer]>, ) -> Result<(), Box> { - let payer_signer = config.signers[0]; - if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); @@ -2288,9 +2374,9 @@ fn send_deploy_messages( // This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an // extraneous signature. if message.header.num_required_signatures == 2 { - initial_transaction.try_sign(&[payer_signer, initial_signer], blockhash)?; + initial_transaction.try_sign(&[fee_payer_signer, initial_signer], blockhash)?; } else { - initial_transaction.try_sign(&[payer_signer], blockhash)?; + initial_transaction.try_sign(&[fee_payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); log_instruction_custom_error::(result, config) @@ -2317,7 +2403,7 @@ fn send_deploy_messages( )? .send_and_confirm_messages_with_spinner( write_messages, - &[payer_signer, write_signer], + &[fee_payer_signer, write_signer], ), ConnectionCache::Quic(cache) => { let tpu_client_fut = solana_client::nonblocking::tpu_client::TpuClient::new_with_connection_cache( @@ -2335,7 +2421,7 @@ fn send_deploy_messages( rpc_client.clone(), Some(tpu_client), write_messages, - &[payer_signer, write_signer], + &[fee_payer_signer, write_signer], SendAndConfirmConfig { resign_txs_count: Some(5), with_spinner: true, @@ -2366,7 +2452,7 @@ fn send_deploy_messages( let mut final_tx = Transaction::new_unsigned(message.clone()); let mut signers = final_signers.to_vec(); - signers.push(payer_signer); + signers.push(fee_payer_signer); final_tx.try_sign(&signers, blockhash)?; rpc_client .send_and_confirm_transaction_with_spinner_and_config( @@ -2454,6 +2540,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2481,6 +2568,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2510,6 +2598,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: None, + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), program_signer_index: None, @@ -2541,6 +2630,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2571,6 +2661,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: Some(1), @@ -2604,6 +2695,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2633,6 +2725,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, @@ -2670,6 +2763,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -2694,6 +2788,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -2721,6 +2816,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -2751,6 +2847,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 1, @@ -2786,6 +2883,7 @@ mod tests { CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -3315,6 +3413,7 @@ mod tests { rpc_client: Some(Arc::new(RpcClient::new_mock("".to_string()))), command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 198ecf7ea6dc7b..125e64de38189e 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -114,7 +114,10 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("PROGRAM_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program account signer. The program data is written to the associated account.") + .help( + "Program account signer. The program data is written to the \ + associated account.", + ), ) .arg( Arg::with_name("authority") @@ -122,7 +125,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -140,7 +145,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("buffer") @@ -148,7 +153,10 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Optional intermediate buffer account to write data to, which can be used to resume a failed deploy") + .help( + "Optional intermediate buffer account to write data to, which \ + can be used to resume a failed deploy", + ), ) .arg( Arg::with_name("authority") @@ -156,7 +164,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -167,7 +177,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("authority") @@ -175,7 +185,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -186,7 +198,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .long("program-id") .value_name("PROGRAM_ID") .takes_value(true) - .help("Executable program's address") + .help("Executable program's address"), ) .arg( Arg::with_name("authority") @@ -194,7 +206,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help("Program authority [default: the default configured keypair]") + .help( + "Program authority [default: the default configured keypair]", + ), ), ) .subcommand( @@ -205,22 +219,22 @@ impl ProgramV4SubCommands for App<'_, '_> { .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) - .help("Address of the program to show") + .help("Address of the program to show"), ) .arg( Arg::with_name("all") .long("all") .conflicts_with("account") .conflicts_with("buffer_authority") - .help("Show accounts for all authorities") + .help("Show accounts for all authorities"), ) - .arg( - pubkey!(Arg::with_name("authority") + .arg(pubkey!( + Arg::with_name("authority") .long("authority") .value_name("AUTHORITY") .conflicts_with("all"), - "Authority [default: the default configured keypair]"), - ), + "Authority [default: the default configured keypair]" + )), ) .subcommand( SubCommand::with_name("dump") @@ -231,7 +245,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("ACCOUNT_ADDRESS") .takes_value(true) .required(true) - .help("Address of the buffer or program") + .help("Address of the buffer or program"), ) .arg( Arg::with_name("output_location") @@ -241,7 +255,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .required(true) .help("/path/to/program.so"), ), - ) + ), ) } } @@ -955,7 +969,11 @@ fn build_create_buffer_message( if account.lamports < lamports_required || account.data.len() != expected_account_data_len { if program_address == buffer_address { - return Err("Buffer account passed could be for a different deploy? It has different size/lamports".into()); + return Err( + "Buffer account passed could be for a different deploy? It has different \ + size/lamports" + .into(), + ); } let (truncate_instructions, balance_needed) = build_truncate_instructions( diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 31cbaab21802e1..051979327363ac 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -144,7 +144,10 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create (or base of derived address if --seed is used)") + .help( + "Stake account to create (or base of derived address if --seed is \ + used)", + ), ) .arg( Arg::with_name("amount") @@ -153,28 +156,35 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to send to the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to send to the stake account, in SOL; accepts keyword ALL", + ), ) - .arg( - pubkey!(Arg::with_name("custodian") + .arg(pubkey!( + Arg::with_name("custodian") .long("custodian") .value_name("PUBKEY"), - "Authority to modify lockups. ") - ) + "Authority to modify lockups" + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey", + ), ) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -182,7 +192,10 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name(STAKE_AUTHORITY_ARG.name) @@ -190,7 +203,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(STAKE_AUTHORITY_ARG.help) + .help(STAKE_AUTHORITY_ARG.help), ) .arg( Arg::with_name(WITHDRAW_AUTHORITY_ARG.name) @@ -198,7 +211,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(WITHDRAW_AUTHORITY_ARG.help) + .help(WITHDRAW_AUTHORITY_ARG.help), ) .arg( Arg::with_name("from") @@ -212,7 +225,7 @@ impl StakeSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("create-stake-account-checked") @@ -224,7 +237,10 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create (or base of derived address if --seed is used)") + .help( + "Stake account to create (or base of derived address if --seed is \ + used)", + ), ) .arg( Arg::with_name("amount") @@ -233,15 +249,19 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to send to the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to send to the stake account, in SOL; accepts keyword ALL", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey", + ), ) .arg( Arg::with_name(STAKE_AUTHORITY_ARG.name) @@ -249,7 +269,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_valid_pubkey) - .help(STAKE_AUTHORITY_ARG.help) + .help(STAKE_AUTHORITY_ARG.help), ) .arg( Arg::with_name(WITHDRAW_AUTHORITY_ARG.name) @@ -257,7 +277,7 @@ impl StakeSubCommands for App<'_, '_> { .value_name("KEYPAIR") .takes_value(true) .validator(is_valid_signer) - .help(WITHDRAW_AUTHORITY_ARG.help) + .help(WITHDRAW_AUTHORITY_ARG.help), ) .arg( Arg::with_name("from") @@ -271,7 +291,7 @@ impl StakeSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("delegate-stake") @@ -281,28 +301,28 @@ impl StakeSubCommands for App<'_, '_> { .long("force") .takes_value(false) .hidden(hidden_unless_forced()) // Don't document this argument to discourage its use - .help("Override vote account sanity checks (use carefully!)") + .help("Override vote account sanity checks (use carefully!)"), ) - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to delegate") - ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + "Stake account to delegate" + )) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(2) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "The vote account to which the stake will be delegated") - ) + "The vote account to which the stake will be delegated" + )) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("redelegate-stake") @@ -312,24 +332,24 @@ impl StakeSubCommands for App<'_, '_> { .long("force") .takes_value(false) .hidden(hidden_unless_forced()) // Don't document this argument to discourage its use - .help("Override vote account sanity checks (use carefully!)") + .help("Override vote account sanity checks (use carefully!)"), ) - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Existing delegated stake account that has been fully activated. \ - On success this stake account will be scheduled for deactivation and the rent-exempt balance \ - may be withdrawn once fully deactivated") - ) - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + "Existing delegated stake account that has been fully activated. On success \ + this stake account will be scheduled for deactivation and the rent-exempt \ + balance may be withdrawn once fully deactivated" + )) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(2) .value_name("REDELEGATED_VOTE_ACCOUNT_ADDRESS") .required(true), - "The vote account to which the stake will be redelegated") - ) + "The vote account to which the stake will be redelegated" + )) .arg( Arg::with_name("redelegation_stake_account") .index(3) @@ -337,42 +357,43 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Stake account to create for the redelegation. \ - On success this stake account will be created and scheduled for activation with all \ - the stake in the existing stake account, exclusive of the rent-exempt balance retained \ - in the existing account") + .help( + "Stake account to create for the redelegation. On success this stake \ + account will be created and scheduled for activation with all the \ + stake in the existing stake account, exclusive of the rent-exempt \ + balance retained in the existing account", + ), ) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) - .arg(memo_arg()) + .arg(memo_arg()), ) - .subcommand( SubCommand::with_name("stake-authorize") .about("Authorize a new signing keypair for the given stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .required(true) .index(1) .value_name("STAKE_ACCOUNT_ADDRESS"), - "Stake account in which to set a new authority. ") - ) - .arg( - pubkey!(Arg::with_name("new_stake_authority") + "Stake account in which to set a new authority. " + )) + .arg(pubkey!( + Arg::with_name("new_stake_authority") .long("new-stake-authority") .required_unless("new_withdraw_authority") .value_name("PUBKEY"), - "New authorized staker") - ) - .arg( - pubkey!(Arg::with_name("new_withdraw_authority") + "New authorized staker" + )) + .arg(pubkey!( + Arg::with_name("new_withdraw_authority") .long("new-withdraw-authority") .required_unless("new_stake_authority") .value_name("PUBKEY"), - "New authorized withdrawer. ") - ) + "New authorized withdrawer. " + )) .arg(stake_authority_arg()) .arg(withdraw_authority_arg()) .offline_args() @@ -383,21 +404,27 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-authorize-checked") - .about("Authorize a new signing keypair for the given stake account, checking the authority as a signer") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .about( + "Authorize a new signing keypair for the given stake account, checking the \ + authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .required(true) .index(1) .value_name("STAKE_ACCOUNT_ADDRESS"), - "Stake account in which to set a new authority. ") - ) + "Stake account in which to set a new authority. " + )) .arg( Arg::with_name("new_stake_authority") .long("new-stake-authority") @@ -405,7 +432,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .required_unless("new_withdraw_authority") - .help("New authorized staker") + .help("New authorized staker"), ) .arg( Arg::with_name("new_withdraw_authority") @@ -414,7 +441,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .required_unless("new_stake_authority") - .help("New authorized withdrawer") + .help("New authorized withdrawer"), ) .arg(stake_authority_arg()) .arg(withdraw_authority_arg()) @@ -426,53 +453,62 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("deactivate-stake") .about("Deactivate the delegated stake from the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to be deactivated (or base of derived address if --seed is used). ") - ) + "Stake account to be deactivated (or base of derived address if --seed is \ + used). " + )) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of STAKE_ACCOUNT_ADDRESS") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS", + ), ) .arg( Arg::with_name("delinquent") .long("delinquent") .takes_value(false) .conflicts_with(SIGN_ONLY_ARG.name) - .help("Deactivate abandoned stake that is currently delegated to a delinquent vote account") + .help( + "Deactivate abandoned stake that is currently delegated to a \ + delinquent vote account", + ), ) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("split-stake") .about("Duplicate a stake account, splitting the tokens between the two") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to split (or base of derived address if --seed is used). ") - ) + "Stake account to split (or base of derived address if --seed is used). " + )) .arg( Arg::with_name("split_stake_account") .index(2) @@ -480,7 +516,7 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("Keypair of the new stake account") + .help("Keypair of the new stake account"), ) .arg( Arg::with_name("amount") @@ -489,18 +525,20 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount) .required(true) - .help("The amount to move into the new stake account, in SOL") + .help("The amount to move into the new stake account, in SOL"), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of SPLIT_STAKE_ACCOUNT") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of SPLIT_STAKE_ACCOUNT", + ), ) .arg(stake_authority_arg()) - .offline_args_config(&SignOnlySplitNeedsRent{}) + .offline_args_config(&SignOnlySplitNeedsRent {}) .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) @@ -512,52 +550,55 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount) .requires("sign_only") - .help("Offline signing only: the rent-exempt amount to move into the new \ - stake account, in SOL") - ) + .help( + "Offline signing only: the rent-exempt amount to move into the new \ + stake account, in SOL", + ), + ), ) .subcommand( SubCommand::with_name("merge-stake") .about("Merges one stake account into another") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to merge into") - ) - .arg( - pubkey!(Arg::with_name("source_stake_account_pubkey") + "Stake account to merge into" + )) + .arg(pubkey!( + Arg::with_name("source_stake_account_pubkey") .index(2) .value_name("SOURCE_STAKE_ACCOUNT_ADDRESS") .required(true), - "Source stake account for the merge. If successful, this stake account \ - will no longer exist after the merge") - ) + "Source stake account for the merge. If successful, this stake account will \ + no longer exist after the merge" + )) .arg(stake_authority_arg()) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("withdraw-stake") .about("Withdraw the unstaked SOL from the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account from which to withdraw (or base of derived address if --seed is used). ") - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Stake account from which to withdraw (or base of derived address if --seed \ + is used). " + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "Recipient of withdrawn SOL") - ) + "Recipient of withdrawn SOL" + )) .arg( Arg::with_name("amount") .index(3) @@ -565,15 +606,20 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount_or_all) .required(true) - .help("The amount to withdraw from the stake account, in SOL; accepts keyword ALL") + .help( + "The amount to withdraw from the stake account, in SOL; accepts \ + keyword ALL", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account \ - will be at a derived address of STAKE_ACCOUNT_ADDRESS") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS", + ), ) .arg(withdraw_authority_arg()) .offline_args() @@ -581,24 +627,27 @@ impl StakeSubCommands for App<'_, '_> { .arg(fee_payer_arg()) .arg(custodian_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-set-lockup") .about("Set Lockup for the stake account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account for which to set lockup parameters. ") - ) + "Stake account for which to set lockup parameters. " + )) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -606,48 +655,56 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) - .arg( - pubkey!(Arg::with_name("new_custodian") + .arg(pubkey!( + Arg::with_name("new_custodian") .long("new-custodian") .value_name("PUBKEY"), - "Identity of a new lockup custodian. ") + "Identity of a new lockup custodian. " + )) + .group( + ArgGroup::with_name("lockup_details") + .args(&["lockup_epoch", "lockup_date", "new_custodian"]) + .multiple(true) + .required(true), ) - .group(ArgGroup::with_name("lockup_details") - .args(&["lockup_epoch", "lockup_date", "new_custodian"]) - .multiple(true) - .required(true)) .arg( Arg::with_name("custodian") .long("custodian") .takes_value(true) .value_name("KEYPAIR") .validator(is_valid_signer) - .help("Keypair of the existing custodian [default: cli config pubkey]") + .help("Keypair of the existing custodian [default: cli config pubkey]"), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-set-lockup-checked") .about("Set Lockup for the stake account, checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account for which to set lockup parameters. ") - ) + "Stake account for which to set lockup parameters. " + )) .arg( Arg::with_name("lockup_epoch") .long("lockup-epoch") .value_name("NUMBER") .takes_value(true) - .help("The epoch height at which this account will be available for withdrawal") + .help( + "The epoch height at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("lockup_date") @@ -655,7 +712,10 @@ impl StakeSubCommands for App<'_, '_> { .value_name("RFC3339 DATETIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("The date and time at which this account will be available for withdrawal") + .help( + "The date and time at which this account will be available for \ + withdrawal", + ), ) .arg( Arg::with_name("new_custodian") @@ -663,42 +723,44 @@ impl StakeSubCommands for App<'_, '_> { .value_name("KEYPAIR") .takes_value(true) .validator(is_valid_signer) - .help("Keypair of a new lockup custodian") + .help("Keypair of a new lockup custodian"), + ) + .group( + ArgGroup::with_name("lockup_details") + .args(&["lockup_epoch", "lockup_date", "new_custodian"]) + .multiple(true) + .required(true), ) - .group(ArgGroup::with_name("lockup_details") - .args(&["lockup_epoch", "lockup_date", "new_custodian"]) - .multiple(true) - .required(true)) .arg( Arg::with_name("custodian") .long("custodian") .takes_value(true) .value_name("KEYPAIR") .validator(is_valid_signer) - .help("Keypair of the existing custodian [default: cli config pubkey]") + .help("Keypair of the existing custodian [default: cli config pubkey]"), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("stake-account") .about("Show the contents of a stake account") .alias("show-stake-account") - .arg( - pubkey!(Arg::with_name("stake_account_pubkey") + .arg(pubkey!( + Arg::with_name("stake_account_pubkey") .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "The stake account to display. ") - ) + "The stake account to display. " + )) .arg( Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display balance in lamports instead of SOL") + .help("Display balance in lamports instead of SOL"), ) .arg( Arg::with_name("with_rewards") @@ -710,7 +772,7 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("csv") .long("csv") .takes_value(false) - .help("Format stake rewards data in csv") + .help("Format stake rewards data in csv"), ) .arg( Arg::with_name("num_rewards_epochs") @@ -720,7 +782,10 @@ impl StakeSubCommands for App<'_, '_> { .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") - .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), + .help( + "Display rewards for NUM recent epochs, max 10 \ + [default: latest epoch only]", + ), ), ) .subcommand( @@ -731,7 +796,7 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display balance in lamports instead of SOL") + .help("Display balance in lamports instead of SOL"), ) .arg( Arg::with_name("limit") @@ -739,13 +804,12 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .value_name("NUM") .default_value("10") - .validator(|s| { - s.parse::() - .map(|_| ()) - .map_err(|e| e.to_string()) - }) - .help("Display NUM recent epochs worth of stake history in text mode. 0 for all") - ) + .validator(|s| s.parse::().map(|_| ()).map_err(|e| e.to_string())) + .help( + "Display NUM recent epochs worth of stake history in text mode. 0 for \ + all", + ), + ), ) .subcommand( SubCommand::with_name("stake-minimum-delegation") @@ -754,8 +818,8 @@ impl StakeSubCommands for App<'_, '_> { Arg::with_name("lamports") .long("lamports") .takes_value(false) - .help("Display minimum delegation in lamports instead of SOL") - ) + .help("Display minimum delegation in lamports instead of SOL"), + ), ) } } @@ -1456,7 +1520,8 @@ pub fn process_create_stake_account( if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for stake account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for stake account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } @@ -1919,7 +1984,8 @@ pub fn process_split_stake( format!("Stake account {split_stake_account_address} already exists") } else { format!( - "Account {split_stake_account_address} already exists and is not a stake account" + "Account {split_stake_account_address} already exists and is not a stake \ + account" ) }; return Err(CliError::BadParameter(err_msg).into()); @@ -1930,7 +1996,8 @@ pub fn process_split_stake( if lamports < minimum_balance { return Err(CliError::BadParameter(format!( - "need at least {minimum_balance} lamports for stake account to be rent exempt, provided lamports: {lamports}" + "need at least {minimum_balance} lamports for stake account to be rent exempt, \ + provided lamports: {lamports}" )) .into()); } @@ -2359,7 +2426,8 @@ pub(crate) fn check_current_authority( ) -> Result<(), CliError> { if !permitted_authorities.contains(provided_current_authority) { Err(CliError::RpcRequestError(format!( - "Invalid authority provided: {provided_current_authority:?}, expected {permitted_authorities:?}" + "Invalid authority provided: {provided_current_authority:?}, expected \ + {permitted_authorities:?}" ))) } else { Ok(()) @@ -2622,8 +2690,8 @@ pub fn process_delegate_stake( )) } else { Err(CliError::DynamicProgramError(format!( - "Unable to delegate. Vote account appears delinquent \ - because its current root slot, {root_slot}, is less than {min_root_slot}" + "Unable to delegate. Vote account appears delinquent because its current root \ + slot, {root_slot}, is less than {min_root_slot}" ))) }; diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index f6251c649972e2..ad3df38d553499 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -88,7 +88,11 @@ fn verify_keybase( if client.head(&url).send()?.status().is_success() { Ok(()) } else { - Err(format!("keybase_username could not be confirmed at: {url}. Please add this pubkey file to your keybase profile to connect").into()) + Err(format!( + "keybase_username could not be confirmed at: {url}. Please add this pubkey file \ + to your keybase profile to connect" + ) + .into()) } } else { Err(format!("keybase_username could not be parsed as String: {keybase_username}").into()) @@ -204,7 +208,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> { .value_name("DETAILS") .takes_value(true) .validator(check_details_length) - .help("Validator description") + .help("Validator description"), ) .arg( Arg::with_name("force") @@ -223,9 +227,12 @@ impl ValidatorInfoSubCommands for App<'_, '_> { .value_name("PUBKEY") .takes_value(true) .validator(is_pubkey) - .help("The pubkey of the Validator info account; without this argument, returns all"), + .help( + "The pubkey of the Validator info account; without this \ + argument, returns all Validator info accounts", + ), ), - ) + ), ) } } @@ -607,7 +614,12 @@ mod tests { let max_short_string = "Max Length String KWpP299aFCBWvWg1MHpSuaoTsud7cv8zMJsh99aAtP8X1s26yrR1".to_string(); // 300-character string - let max_long_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut libero quam, volutpat et aliquet eu, varius in mi. Aenean vestibulum ex in tristique faucibus. Maecenas in imperdiet turpis. Nullam feugiat aliquet erat. Morbi malesuada turpis sed dui pulvinar lobortis. Pellentesque a lectus eu leo nullam.".to_string(); + let max_long_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut libero \ + quam, volutpat et aliquet eu, varius in mi. Aenean vestibulum ex \ + in tristique faucibus. Maecenas in imperdiet turpis. Nullam \ + feugiat aliquet erat. Morbi malesuada turpis sed dui pulvinar \ + lobortis. Pellentesque a lectus eu leo nullam." + .to_string(); let mut info = Map::new(); info.insert("name".to_string(), Value::String(max_short_string.clone())); info.insert( diff --git a/cli/src/vote.rs b/cli/src/vote.rs index e4456fe1d2355c..e13c6eec876c01 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -70,15 +70,15 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Keypair of validator that will vote with this account"), ) - .arg( - pubkey!(Arg::with_name("authorized_withdrawer") + .arg(pubkey!( + Arg::with_name("authorized_withdrawer") .index(3) .value_name("WITHDRAWER_PUBKEY") .takes_value(true) .required(true) .long("authorized-withdrawer"), - "Public key of the authorized withdrawer") - ) + "Public key of the authorized withdrawer" + )) .arg( Arg::with_name("commission") .long("commission") @@ -87,43 +87,48 @@ impl VoteSubCommands for App<'_, '_> { .default_value("100") .help("The commission taken on reward redemption (0-100)"), ) - .arg( - pubkey!(Arg::with_name("authorized_voter") + .arg(pubkey!( + Arg::with_name("authorized_voter") .long("authorized-voter") .value_name("VOTER_PUBKEY"), - "Public key of the authorized voter [default: validator identity pubkey]. "), - ) + "Public key of the authorized voter [default: validator identity pubkey]. " + )) .arg( Arg::with_name("allow_unsafe_authorized_withdrawer") .long("allow-unsafe-authorized-withdrawer") .takes_value(false) - .help("Allow an authorized withdrawer pubkey to be identical to the validator identity \ - account pubkey or vote account pubkey, which is normally an unsafe \ - configuration and should be avoided."), + .help( + "Allow an authorized withdrawer pubkey to be identical to the \ + validator identity account pubkey or vote account pubkey, which is \ + normally an unsafe configuration and should be avoided.", + ), ) .arg( Arg::with_name("seed") .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey") + .help( + "Seed for address generation; if specified, the resulting account \ + will be at a derived address of the VOTE ACCOUNT pubkey", + ), ) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-voter") .about("Authorize a new vote signing keypair for the given vote account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized voter. "), - ) + "Vote account in which to set the authorized voter. " + )) .arg( Arg::with_name("authorized") .index(2) @@ -132,29 +137,29 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Current authorized vote signer."), ) - .arg( - pubkey!(Arg::with_name("new_authorized_pubkey") + .arg(pubkey!( + Arg::with_name("new_authorized_pubkey") .index(3) .value_name("NEW_AUTHORIZED_PUBKEY") .required(true), - "New authorized vote signer. "), - ) + "New authorized vote signer. " + )) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer") .about("Authorize a new withdraw signing keypair for the given vote account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized withdrawer. "), - ) + "Vote account in which to set the authorized withdrawer. " + )) .arg( Arg::with_name("authorized") .index(2) @@ -163,30 +168,32 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Current authorized withdrawer."), ) - .arg( - pubkey!(Arg::with_name("new_authorized_pubkey") + .arg(pubkey!( + Arg::with_name("new_authorized_pubkey") .index(3) .value_name("AUTHORIZED_PUBKEY") .required(true), - "New authorized withdrawer. "), - ) + "New authorized withdrawer. " + )) .offline_args() .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-voter-checked") - .about("Authorize a new vote signing keypair for the given vote account, \ - checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .about( + "Authorize a new vote signing keypair for the given vote account, checking \ + the new authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized voter. "), - ) + "Vote account in which to set the authorized voter. " + )) .arg( Arg::with_name("authorized") .index(2) @@ -207,19 +214,21 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer-checked") - .about("Authorize a new withdraw signing keypair for the given vote account, \ - checking the new authority as a signer") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .about( + "Authorize a new withdraw signing keypair for the given vote account, \ + checking the new authority as a signer", + ) + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account in which to set the authorized withdrawer. "), - ) + "Vote account in which to set the authorized withdrawer. " + )) .arg( Arg::with_name("authorized") .index(2) @@ -240,18 +249,18 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-update-validator") .about("Update the vote account's validator identity") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to update. "), - ) + "Vote account to update. " + )) .arg( Arg::with_name("new_identity_account") .index(2) @@ -274,18 +283,18 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-update-commission") .about("Update the vote account's commission") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to update. "), - ) + "Vote account to update. " + )) .arg( Arg::with_name("commission") .index(2) @@ -293,7 +302,7 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_percentage) - .help("The new commission") + .help("The new commission"), ) .arg( Arg::with_name("authorized_withdrawer") @@ -308,19 +317,19 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg()) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("vote-account") .about("Show the contents of a vote account") .alias("show-vote-account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account pubkey. "), - ) + "Vote account pubkey. " + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -347,26 +356,29 @@ impl VoteSubCommands for App<'_, '_> { .validator(|s| is_within_range(s, 1..=50)) .default_value_if("with_rewards", None, "1") .requires("with_rewards") - .help("Display rewards for NUM recent epochs, max 10 [default: latest epoch only]"), + .help( + "Display rewards for NUM recent epochs, max 10 \ + [default: latest epoch only]", + ), ), ) .subcommand( SubCommand::with_name("withdraw-from-vote-account") .about("Withdraw lamports from a vote account into a specified account") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account from which to withdraw. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Vote account from which to withdraw. " + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The recipient of withdrawn SOL. "), - ) + "The recipient of withdrawn SOL. " + )) .arg( Arg::with_name("amount") .index(3) @@ -374,7 +386,10 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_amount_or_all) - .help("The amount to withdraw, in SOL; accepts keyword ALL, which for this command means account balance minus rent-exempt minimum"), + .help( + "The amount to withdraw, in SOL; accepts keyword ALL, which for this \ + command means account balance minus rent-exempt minimum", + ), ) .arg( Arg::with_name("authorized_withdrawer") @@ -388,26 +403,25 @@ impl VoteSubCommands for App<'_, '_> { .nonce_args(false) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg() - ) + .arg(compute_unit_price_arg()), ) .subcommand( SubCommand::with_name("close-vote-account") .about("Close a vote account and withdraw all funds remaining") - .arg( - pubkey!(Arg::with_name("vote_account_pubkey") + .arg(pubkey!( + Arg::with_name("vote_account_pubkey") .index(1) .value_name("VOTE_ACCOUNT_ADDRESS") .required(true), - "Vote account to be closed. "), - ) - .arg( - pubkey!(Arg::with_name("destination_account_pubkey") + "Vote account to be closed. " + )) + .arg(pubkey!( + Arg::with_name("destination_account_pubkey") .index(2) .value_name("RECIPIENT_ADDRESS") .required(true), - "The recipient of all withdrawn SOL. "), - ) + "The recipient of all withdrawn SOL. " + )) .arg( Arg::with_name("authorized_withdrawer") .long("authorized-withdrawer") @@ -418,8 +432,7 @@ impl VoteSubCommands for App<'_, '_> { ) .arg(fee_payer_arg()) .arg(memo_arg()) - .arg(compute_unit_price_arg() - ) + .arg(compute_unit_price_arg()), ) } } @@ -451,15 +464,15 @@ pub fn parse_create_vote_account( if !allow_unsafe { if authorized_withdrawer == vote_account_pubkey.unwrap() { return Err(CliError::BadParameter( - "Authorized withdrawer pubkey is identical to vote \ - account pubkey, an unsafe configuration" + "Authorized withdrawer pubkey is identical to vote account pubkey, an unsafe \ + configuration" .to_owned(), )); } if authorized_withdrawer == identity_pubkey.unwrap() { return Err(CliError::BadParameter( - "Authorized withdrawer pubkey is identical to identity \ - account pubkey, an unsafe configuration" + "Authorized withdrawer pubkey is identical to identity account pubkey, an unsafe \ + configuration" .to_owned(), )); } @@ -956,8 +969,10 @@ pub fn process_vote_authorize( if let Some(signer) = new_authorized_signer { if signer.is_interactive() { return Err(CliError::BadParameter(format!( - "invalid new authorized vote signer {new_authorized_pubkey:?}. Interactive vote signers not supported" - )).into()); + "invalid new authorized vote signer {new_authorized_pubkey:?}. \ + Interactive vote signers not supported" + )) + .into()); } } } @@ -1337,7 +1352,9 @@ pub fn process_withdraw_from_vote_account( let balance_remaining = current_balance.saturating_sub(withdraw_amount); if balance_remaining < minimum_balance && balance_remaining != 0 { return Err(CliError::BadParameter(format!( - "Withdraw amount too large. The vote account balance must be at least {} SOL to remain rent exempt", lamports_to_sol(minimum_balance) + "Withdraw amount too large. The vote account balance must be at least {} SOL \ + to remain rent exempt", + lamports_to_sol(minimum_balance) )) .into()); } diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index bc3e5d4e0081d3..d44e7d45239549 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -60,13 +60,13 @@ impl WalletSubCommands for App<'_, '_> { SubCommand::with_name("account") .about("Show the contents of an account") .alias("account") - .arg( - pubkey!(Arg::with_name("account_pubkey") + .arg(pubkey!( + Arg::with_name("account_pubkey") .index(1) .value_name("ACCOUNT_ADDRESS") .required(true), - "Account key URI. ") - ) + "Account key URI. " + )) .arg( Arg::with_name("output_file") .long("output-file") @@ -104,22 +104,22 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The airdrop amount to request, in SOL"), ) - .arg( - pubkey!(Arg::with_name("to") + .arg(pubkey!( + Arg::with_name("to") .index(2) .value_name("RECIPIENT_ADDRESS"), - "The account address of airdrop recipient. "), - ), + "The account address of airdrop recipient. " + )), ) .subcommand( SubCommand::with_name("balance") .about("Get your balance") - .arg( - pubkey!(Arg::with_name("pubkey") + .arg(pubkey!( + Arg::with_name("pubkey") .index(1) .value_name("ACCOUNT_ADDRESS"), - "The account address of the balance to check. ") - ) + "The account address of the balance to check. " + )) .arg( Arg::with_name("lamports") .long("lamports") @@ -138,23 +138,25 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The transaction signature to confirm"), ) - .after_help(// Formatted specifically for the manually-indented heredoc string - "Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\ - \n\ - \nAccount modes:\ - \n |srwx|\ - \n s: signed\ - \n r: readable (always true)\ - \n w: writable\ - \n x: program account (inner instructions excluded)\ - " + .after_help( + // Formatted specifically for the manually-indented heredoc string + "Note: This will show more detailed information for finalized \ + transactions with verbose mode (-v/--verbose).\ + \n\ + \nAccount modes:\ + \n |srwx|\ + \n s: signed\ + \n r: readable (always true)\ + \n w: writable\ + \n x: program account (inner instructions excluded)\ + ", ), ) .subcommand( SubCommand::with_name("create-address-with-seed") .about( - "Generate a derived account address with a seed. \ - For program derived addresses (PDAs), use the find-program-derived-address command instead" + "Generate a derived account address with a seed. For program derived \ + addresses (PDAs), use the find-program-derived-address command instead", ) .arg( Arg::with_name("seed") @@ -176,45 +178,46 @@ impl WalletSubCommands for App<'_, '_> { or one of NONCE, STAKE, and VOTE keywords", ), ) - .arg( - pubkey!(Arg::with_name("from") + .arg(pubkey!( + Arg::with_name("from") .long("from") .value_name("FROM_PUBKEY") .required(false), - "From (base) key, [default: cli config keypair]. "), + "From (base) key, [default: cli config keypair]. " + )), + ) + .subcommand( + SubCommand::with_name("find-program-derived-address") + .about("Generate a program derived account address with a seed") + .arg( + Arg::with_name("program_id") + .index(1) + .value_name("PROGRAM_ID") + .takes_value(true) + .required(true) + .help( + "The program_id that the address will ultimately be used for, \n\ + or one of NONCE, STAKE, and VOTE keywords", + ), + ) + .arg( + Arg::with_name("seeds") + .min_values(0) + .value_name("SEED") + .takes_value(true) + .validator(is_structured_seed) + .help( + "The seeds. \n\ + Each one must match the pattern PREFIX:VALUE. \n\ + PREFIX can be one of [string, pubkey, hex, u8] \n\ + or matches the pattern [u,i][16,32,64,128][le,be] \ + (for example u64le) for number values \n\ + [u,i] - represents whether the number is unsigned or signed, \n\ + [16,32,64,128] - represents the bit length, and \n\ + [le,be] - represents the byte order - little endian or big endian", + ), ), ) - .subcommand( - SubCommand::with_name("find-program-derived-address") - .about("Generate a program derived account address with a seed") - .arg( - Arg::with_name("program_id") - .index(1) - .value_name("PROGRAM_ID") - .takes_value(true) - .required(true) - .help( - "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, and VOTE keywords", - ), - ) - .arg( - Arg::with_name("seeds") - .min_values(0) - .value_name("SEED") - .takes_value(true) - .validator(is_structured_seed) - .help( - "The seeds. \n\ - Each one must match the pattern PREFIX:VALUE. \n\ - PREFIX can be one of [string, pubkey, hex, u8] \n\ - or matches the pattern [u,i][16,32,64,128][le,be] (for example u64le) for number values \n\ - [u,i] - represents whether the number is unsigned or signed, \n\ - [16,32,64,128] - represents the bit length, and \n\ - [le,be] - represents the byte order - little endian or big endian" - ), - ), - ) .subcommand( SubCommand::with_name("decode-transaction") .about("Decode a serialized transaction") @@ -239,7 +242,10 @@ impl WalletSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("resolve-signer") - .about("Checks that a signer is valid, and returns its specific path; useful for signers that may be specified generally, eg. usb://ledger") + .about( + "Checks that a signer is valid, and returns its specific path; useful for \ + signers that may be specified generally, eg. usb://ledger", + ) .arg( Arg::with_name("signer") .index(1) @@ -247,20 +253,20 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help("The signer path to resolve") - ) + .help("The signer path to resolve"), + ), ) .subcommand( SubCommand::with_name("transfer") .about("Transfer funds between system accounts") .alias("pay") - .arg( - pubkey!(Arg::with_name("to") + .arg(pubkey!( + Arg::with_name("to") .index(1) .value_name("RECIPIENT_ADDRESS") .required(true), - "The account address of recipient. "), - ) + "The account address of recipient. " + )) .arg( Arg::with_name("amount") .index(2) @@ -270,17 +276,20 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The amount to send, in SOL; accepts keyword ALL"), ) - .arg( - pubkey!(Arg::with_name("from") + .arg(pubkey!( + Arg::with_name("from") .long("from") .value_name("FROM_ADDRESS"), - "Source account of funds (if different from client local account). "), - ) + "Source account of funds (if different from client local account). " + )) .arg( Arg::with_name("no_wait") .long("no-wait") .takes_value(false) - .help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"), + .help( + "Return signature immediately after submitting the transaction, \ + instead of waiting for confirmations", + ), ) .arg( Arg::with_name("derived_address_seed") @@ -289,7 +298,7 @@ impl WalletSubCommands for App<'_, '_> { .value_name("SEED_STRING") .requires("derived_address_program_id") .validator(is_derived_address_seed) - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("derived_address_program_id") @@ -297,13 +306,13 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("PROGRAM_ID") .requires("derived_address_seed") - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("allow_unfunded_recipient") .long("allow-unfunded-recipient") .takes_value(false) - .help("Complete the transfer even if the recipient address is not funded") + .help("Complete the transfer even if the recipient address is not funded"), ) .offline_args() .nonce_args(false) @@ -320,7 +329,7 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("STRING") .required(true) - .help("The message text to be signed") + .help("The message text to be signed"), ) .arg( Arg::with_name("version") @@ -331,10 +340,10 @@ impl WalletSubCommands for App<'_, '_> { .default_value("0") .validator(|p| match p.parse::() { Err(_) => Err(String::from("Must be unsigned integer")), - Ok(_) => { Ok(()) } + Ok(_) => Ok(()), }) - .help("The off-chain message version") - ) + .help("The off-chain message version"), + ), ) .subcommand( SubCommand::with_name("verify-offchain-signature") @@ -345,7 +354,7 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .value_name("STRING") .required(true) - .help("The text of the original message") + .help("The text of the original message"), ) .arg( Arg::with_name("signature") @@ -353,7 +362,7 @@ impl WalletSubCommands for App<'_, '_> { .value_name("SIGNATURE") .takes_value(true) .required(true) - .help("The message signature to verify") + .help("The message signature to verify"), ) .arg( Arg::with_name("version") @@ -364,17 +373,17 @@ impl WalletSubCommands for App<'_, '_> { .default_value("0") .validator(|p| match p.parse::() { Err(_) => Err(String::from("Must be unsigned integer")), - Ok(_) => { Ok(()) } + Ok(_) => Ok(()), }) - .help("The off-chain message version") + .help("The off-chain message version"), ) - .arg( - pubkey!(Arg::with_name("signer") + .arg(pubkey!( + Arg::with_name("signer") .long("signer") .value_name("PUBKEY") .required(false), - "The pubkey of the message signer (if different from config default)") - ) + "The pubkey of the message signer (if different from config default)" + )), ) } } @@ -889,9 +898,8 @@ pub fn process_transfer( .value; if recipient_balance == 0 { return Err(format!( - "The recipient address ({to}) is not funded. \ - Add `--allow-unfunded-recipient` to complete the transfer \ - " + "The recipient address ({to}) is not funded. Add `--allow-unfunded-recipient` to \ + complete the transfer " ) .into()); } diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 5bd10a92b7f6a5..7e8d409e1c26ec 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -65,6 +65,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -109,6 +110,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.signers = vec![&keypair, &custom_address_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -163,6 +165,7 @@ fn test_cli_program_deploy_non_upgradeable() { config.signers = vec![&keypair, &custom_address_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -185,6 +188,7 @@ fn test_cli_program_deploy_non_upgradeable() { // Use forcing parameter to deploy to account with excess balance config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, @@ -245,6 +249,7 @@ fn test_cli_program_deploy_no_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -271,6 +276,7 @@ fn test_cli_program_deploy_no_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_id), buffer_signer_index: None, @@ -332,6 +338,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -380,6 +387,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -422,6 +430,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -477,6 +486,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -552,6 +562,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: Some(program_pubkey), buffer_signer_index: None, @@ -568,6 +579,7 @@ fn test_cli_program_deploy_with_authority() { config.signers = vec![&keypair, &new_upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -671,6 +683,7 @@ fn test_cli_program_close_program() { config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -774,6 +787,7 @@ fn test_cli_program_extend_program() { config.signers = vec![&keypair, &upgrade_authority, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -864,6 +878,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -899,6 +914,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -961,6 +977,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -999,6 +1016,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 2, @@ -1073,6 +1091,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: 0, @@ -1114,6 +1133,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -1124,6 +1144,7 @@ fn test_cli_program_write_buffer() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_large_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: Some(1), @@ -1186,6 +1207,7 @@ fn test_cli_program_set_buffer_authority() { config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 0, @@ -1200,7 +1222,7 @@ fn test_cli_program_set_buffer_authority() { panic!("not a buffer account"); } - // Set new authority + // Set new buffer authority let new_buffer_authority = Keypair::new(); config.signers = vec![&keypair, &buffer_keypair]; config.command = CliCommand::Program(ProgramCliCommand::SetBufferAuthority { @@ -1229,7 +1251,25 @@ fn test_cli_program_set_buffer_authority() { panic!("not a buffer account"); } - // Set authority to buffer + // Attempt to deploy program from buffer using previous authority (should fail) + config.signers = vec![&keypair, &buffer_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: Some(buffer_keypair.pubkey()), + allow_excessive_balance: false, + upgrade_authority_signer_index: 0, + is_final: false, + max_len: None, + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap_err(); + + // Set buffer authority to the buffer identity (it's a common way for program devs to do so) config.signers = vec![&keypair, &new_buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey: buffer_keypair.pubkey(), @@ -1255,6 +1295,24 @@ fn test_cli_program_set_buffer_authority() { } else { panic!("not a buffer account"); } + + // Deploy from buffer using proper(new) buffer authority + config.signers = vec![&keypair, &buffer_keypair]; + config.command = CliCommand::Program(ProgramCliCommand::Deploy { + program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, + program_signer_index: None, + program_pubkey: None, + buffer_signer_index: None, + buffer_pubkey: Some(buffer_keypair.pubkey()), + allow_excessive_balance: false, + upgrade_authority_signer_index: 1, + is_final: false, + max_len: None, + skip_fee_check: false, + }); + config.output_format = OutputFormat::JsonCompact; + process_command(&config).unwrap(); } #[test] @@ -1302,6 +1360,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &buffer_keypair, &buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -1321,6 +1380,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &upgrade_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -1337,6 +1397,7 @@ fn test_cli_program_mismatch_buffer_authority() { config.signers = vec![&keypair, &buffer_authority]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: None, program_pubkey: None, buffer_signer_index: None, @@ -1398,6 +1459,7 @@ fn test_cli_program_show() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, @@ -1454,6 +1516,7 @@ fn test_cli_program_show() { config.signers = vec![&keypair, &authority_keypair, &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(noop_path.to_str().unwrap().to_string()), + fee_payer_signer_index: 0, program_signer_index: Some(2), program_pubkey: Some(program_keypair.pubkey()), buffer_signer_index: None, @@ -1585,6 +1648,7 @@ fn test_cli_program_dump() { config.signers = vec![&keypair, &buffer_keypair, &authority_keypair]; config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: noop_path.to_str().unwrap().to_string(), + fee_payer_signer_index: 0, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: 2, diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 5a88e4d114fb84..514d99ada1ca10 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -37,6 +37,7 @@ tungstenite = { workspace = true, features = ["rustls-tls-webpki-roots"] } [dev-dependencies] solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 44673c06f4d087..36820ba44abafb 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -9,7 +9,10 @@ use { }, }, solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, - solana_sdk::{pubkey::Pubkey, signature::Keypair, transport::Result as TransportResult}, + solana_sdk::{ + pubkey::Pubkey, quic::NotifyKeyUpdate, signature::Keypair, + transport::Result as TransportResult, + }, solana_streamer::streamer::StakedNodes, solana_udp_client::{UdpConfig, UdpConnectionManager, UdpPool}, std::{ @@ -43,6 +46,15 @@ pub enum NonblockingClientConnection { Udp(Arc<::NonblockingClientConnection>), } +impl NotifyKeyUpdate for ConnectionCache { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + match self { + Self::Udp(_) => Ok(()), + Self::Quic(backend) => backend.update_key(key), + } + } +} + impl ConnectionCache { pub fn new(name: &'static str) -> Self { if DEFAULT_CONNECTION_CACHE_USE_QUIC { @@ -217,7 +229,8 @@ mod tests { crossbeam_channel::unbounded, solana_sdk::{net::DEFAULT_TPU_COALESCE, signature::Keypair}, solana_streamer::{ - nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, streamer::StakedNodes, + nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::SpawnServerResult, + streamer::StakedNodes, }, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -245,7 +258,11 @@ mod tests { let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (response_recv_endpoint, response_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: response_recv_endpoint, + thread: response_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index a674dccd7020fb..eed6991abf1b5a 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -9,7 +9,7 @@ use { log::*, rand::{thread_rng, Rng}, solana_measure::measure::Measure, - solana_sdk::timing::AtomicInterval, + solana_sdk::{signature::Keypair, timing::AtomicInterval}, std::{ net::SocketAddr, sync::{atomic::Ordering, Arc, RwLock}, @@ -38,6 +38,7 @@ pub trait ConnectionManager: Send + Sync + 'static { fn new_connection_pool(&self) -> Self::ConnectionPool; fn new_connection_config(&self) -> Self::NewConnectionConfig; + fn update_key(&self, _key: &Keypair) -> Result<(), Box>; } pub struct ConnectionCache< @@ -137,6 +138,11 @@ where .unwrap() } + pub fn update_key(&self, key: &Keypair) -> Result<(), Box> { + let mut map = self.map.write().unwrap(); + map.clear(); + self.connection_manager.update_key(key) + } /// Create a lazy connection object under the exclusive lock of the cache map if there is not /// enough used connections in the connection pool for the specified address. /// Returns CreateConnectionResult. @@ -636,6 +642,10 @@ mod tests { fn new_connection_config(&self) -> Self::NewConnectionConfig { MockUdpConfig::new().unwrap() } + + fn update_key(&self, _key: &Keypair) -> Result<(), Box> { + Ok(()) + } } impl BlockingClientConnection for MockUdpConnection { diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 110e1f5aa42b66..3acd0f84336113 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,7 +1,7 @@ use { solana_gossip::cluster_info::ClusterInfo, solana_runtime::bank_forks::BankForks, - solana_sdk::pubkey::Pubkey, + solana_sdk::{pubkey::Pubkey, quic::NotifyKeyUpdate}, std::{ collections::HashSet, sync::{Arc, RwLock}, @@ -14,4 +14,5 @@ pub struct AdminRpcRequestMetadataPostInit { pub bank_forks: Arc>, pub vote_account: Pubkey, pub repair_whitelist: Arc>>, + pub notifies: Vec>, } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index e4e5f3125ed136..158614b32d7963 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -556,9 +556,11 @@ impl BankingStage { let (finished_work_sender, finished_work_receiver) = unbounded(); // Spawn the worker threads + let mut worker_metrics = Vec::with_capacity(num_workers as usize); for (index, work_receiver) in work_receivers.into_iter().enumerate() { let id = (index as u32).saturating_add(NUM_VOTE_PROCESSING_THREADS); let consume_worker = ConsumeWorker::new( + id, work_receiver, Consumer::new( committer.clone(), @@ -570,6 +572,7 @@ impl BankingStage { poh_recorder.read().unwrap().new_leader_bank_notifier(), ); + worker_metrics.push(consume_worker.metrics_handle()); bank_thread_hdls.push( Builder::new() .name(format!("solCoWorker{id:02}")) @@ -590,6 +593,7 @@ impl BankingStage { packet_deserializer, bank_forks, scheduler, + worker_metrics, ); Builder::new() .name("solBnkTxSched".to_string()) @@ -797,9 +801,7 @@ mod tests { }, poh_service::PohService, }, - solana_runtime::{ - bank::Bank, bank_forks::BankForks, genesis_utils::bootstrap_validator_stake_lamports, - }, + solana_runtime::{bank::Bank, genesis_utils::bootstrap_validator_stake_lamports}, solana_sdk::{ hash::Hash, poh_config::PohConfig, @@ -835,9 +837,7 @@ mod tests { #[test] fn test_banking_stage_shutdown1() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); @@ -887,9 +887,7 @@ mod tests { } = create_genesis_config(2); genesis_config.ticks_per_slot = 4; let num_extra_ticks = 2; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); @@ -966,9 +964,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); @@ -1053,7 +1049,7 @@ mod tests { drop(poh_recorder); let mut blockhash = start_hash; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; bank.process_transaction(&fund_tx).unwrap(); //receive entries + ticks loop { @@ -1148,9 +1144,7 @@ mod tests { let entry_receiver = { // start a banking_stage to eat verified receiver - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let blockstore = Arc::new( Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), @@ -1199,7 +1193,7 @@ mod tests { .map(|(_bank, (entry, _tick_height))| entry) .collect(); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; for entry in entries { bank.process_entry_transactions(entry.transactions) .iter() @@ -1223,7 +1217,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Blockstore::open(ledger_path.path()) @@ -1332,9 +1326,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().get(0).unwrap(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index f18f3da5d16acd..32a5d81e6e9313 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -1,12 +1,21 @@ use { super::{ consumer::{Consumer, ExecuteAndCommitTransactionsOutput, ProcessTransactionBatchOutput}, + leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_poh::leader_bank_notifier::LeaderBankNotifier, solana_runtime::bank::Bank, - std::{sync::Arc, time::Duration}, + solana_sdk::timing::AtomicInterval, + std::{ + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }, thiserror::Error, }; @@ -24,11 +33,13 @@ pub(crate) struct ConsumeWorker { consumed_sender: Sender, leader_bank_notifier: Arc, + metrics: Arc, } #[allow(dead_code)] impl ConsumeWorker { pub fn new( + id: u32, consume_receiver: Receiver, consumer: Consumer, consumed_sender: Sender, @@ -39,9 +50,14 @@ impl ConsumeWorker { consumer, consumed_sender, leader_bank_notifier, + metrics: Arc::new(ConsumeWorkerMetrics::new(id)), } } + pub fn metrics_handle(&self) -> Arc { + self.metrics.clone() + } + pub fn run(self) -> Result<(), ConsumeWorkerError> { loop { let work = self.consume_receiver.recv()?; @@ -70,22 +86,20 @@ impl ConsumeWorker { /// Consume a single batch. fn consume(&self, bank: &Arc, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let ProcessTransactionBatchOutput { - execute_and_commit_transactions_output: - ExecuteAndCommitTransactionsOutput { - retryable_transaction_indexes, - .. - }, - .. - } = self.consumer.process_and_record_aged_transactions( + let output = self.consumer.process_and_record_aged_transactions( bank, &work.transactions, &work.max_age_slots, ); + self.metrics.update_for_consume(&output); + self.metrics.has_data.store(true, Ordering::Relaxed); + self.consumed_sender.send(FinishedConsumeWork { work, - retryable_indexes: retryable_transaction_indexes, + retryable_indexes: output + .execute_and_commit_transactions_output + .retryable_transaction_indexes, })?; Ok(()) } @@ -107,7 +121,17 @@ impl ConsumeWorker { /// Send transactions back to scheduler as retryable. fn retry(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let retryable_indexes = (0..work.transactions.len()).collect(); + let retryable_indexes: Vec<_> = (0..work.transactions.len()).collect(); + let num_retryable = retryable_indexes.len(); + self.metrics + .count_metrics + .retryable_transaction_count + .fetch_add(num_retryable, Ordering::Relaxed); + self.metrics + .count_metrics + .retryable_expired_bank_count + .fetch_add(num_retryable, Ordering::Relaxed); + self.metrics.has_data.store(true, Ordering::Relaxed); self.consumed_sender.send(FinishedConsumeWork { work, retryable_indexes, @@ -122,6 +146,460 @@ fn try_drain_iter(work: T, receiver: &Receiver) -> impl Iterator std::iter::once(work).chain(receiver.try_iter()) } +/// Metrics tracking number of packets processed by the consume worker. +/// These are atomic, and intended to be reported by the scheduling thread +/// since the consume worker thread is sleeping unless there is work to be +/// done. +pub(crate) struct ConsumeWorkerMetrics { + id: u32, + interval: AtomicInterval, + has_data: AtomicBool, + + count_metrics: ConsumeWorkerCountMetrics, + error_metrics: ConsumeWorkerTransactionErrorMetrics, + timing_metrics: ConsumeWorkerTimingMetrics, +} + +impl ConsumeWorkerMetrics { + /// Report and reset metrics iff the interval has elapsed and the worker did some work. + pub fn maybe_report_and_reset(&self) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) + && self.has_data.swap(false, Ordering::Relaxed) + { + self.count_metrics.report_and_reset(self.id); + self.timing_metrics.report_and_reset(self.id); + self.error_metrics.report_and_reset(self.id); + } + } + + fn new(id: u32) -> Self { + Self { + id, + interval: AtomicInterval::default(), + has_data: AtomicBool::new(false), + count_metrics: ConsumeWorkerCountMetrics::default(), + error_metrics: ConsumeWorkerTransactionErrorMetrics::default(), + timing_metrics: ConsumeWorkerTimingMetrics::default(), + } + } + + fn update_for_consume( + &self, + ProcessTransactionBatchOutput { + cost_model_throttled_transactions_count, + cost_model_us, + execute_and_commit_transactions_output, + }: &ProcessTransactionBatchOutput, + ) { + self.count_metrics + .cost_model_throttled_transactions_count + .fetch_add(*cost_model_throttled_transactions_count, Ordering::Relaxed); + self.timing_metrics + .cost_model_us + .fetch_add(*cost_model_us, Ordering::Relaxed); + self.update_on_execute_and_commit_transactions_output( + execute_and_commit_transactions_output, + ); + } + + fn update_on_execute_and_commit_transactions_output( + &self, + ExecuteAndCommitTransactionsOutput { + transactions_attempted_execution_count, + executed_transactions_count, + executed_with_successful_result_count, + retryable_transaction_indexes, + execute_and_commit_timings, + error_counters, + .. + }: &ExecuteAndCommitTransactionsOutput, + ) { + self.count_metrics + .transactions_attempted_execution_count + .fetch_add(*transactions_attempted_execution_count, Ordering::Relaxed); + self.count_metrics + .executed_transactions_count + .fetch_add(*executed_transactions_count, Ordering::Relaxed); + self.count_metrics + .executed_with_successful_result_count + .fetch_add(*executed_with_successful_result_count, Ordering::Relaxed); + self.count_metrics + .retryable_transaction_count + .fetch_add(retryable_transaction_indexes.len(), Ordering::Relaxed); + + self.update_on_execute_and_commit_timings(execute_and_commit_timings); + self.update_on_error_counters(error_counters); + } + + fn update_on_execute_and_commit_timings( + &self, + LeaderExecuteAndCommitTimings { + collect_balances_us, + load_execute_us, + freeze_lock_us, + record_us, + commit_us, + find_and_send_votes_us, + .. + }: &LeaderExecuteAndCommitTimings, + ) { + self.timing_metrics + .collect_balances_us + .fetch_add(*collect_balances_us, Ordering::Relaxed); + self.timing_metrics + .load_execute_us + .fetch_add(*load_execute_us, Ordering::Relaxed); + self.timing_metrics + .freeze_lock_us + .fetch_add(*freeze_lock_us, Ordering::Relaxed); + self.timing_metrics + .record_us + .fetch_add(*record_us, Ordering::Relaxed); + self.timing_metrics + .commit_us + .fetch_add(*commit_us, Ordering::Relaxed); + self.timing_metrics + .find_and_send_votes_us + .fetch_add(*find_and_send_votes_us, Ordering::Relaxed); + } + + fn update_on_error_counters( + &self, + TransactionErrorMetrics { + total, + account_in_use, + too_many_account_locks, + account_loaded_twice, + account_not_found, + blockhash_not_found, + blockhash_too_old, + call_chain_too_deep, + already_processed, + instruction_error, + insufficient_funds, + invalid_account_for_fee, + invalid_account_index, + invalid_program_for_execution, + not_allowed_during_cluster_maintenance, + invalid_writable_account, + invalid_rent_paying_account, + would_exceed_max_block_cost_limit, + would_exceed_max_account_cost_limit, + would_exceed_max_vote_cost_limit, + would_exceed_account_data_block_limit, + max_loaded_accounts_data_size_exceeded, + program_execution_temporarily_restricted, + }: &TransactionErrorMetrics, + ) { + self.error_metrics + .total + .fetch_add(*total, Ordering::Relaxed); + self.error_metrics + .account_in_use + .fetch_add(*account_in_use, Ordering::Relaxed); + self.error_metrics + .too_many_account_locks + .fetch_add(*too_many_account_locks, Ordering::Relaxed); + self.error_metrics + .account_loaded_twice + .fetch_add(*account_loaded_twice, Ordering::Relaxed); + self.error_metrics + .account_not_found + .fetch_add(*account_not_found, Ordering::Relaxed); + self.error_metrics + .blockhash_not_found + .fetch_add(*blockhash_not_found, Ordering::Relaxed); + self.error_metrics + .blockhash_too_old + .fetch_add(*blockhash_too_old, Ordering::Relaxed); + self.error_metrics + .call_chain_too_deep + .fetch_add(*call_chain_too_deep, Ordering::Relaxed); + self.error_metrics + .already_processed + .fetch_add(*already_processed, Ordering::Relaxed); + self.error_metrics + .instruction_error + .fetch_add(*instruction_error, Ordering::Relaxed); + self.error_metrics + .insufficient_funds + .fetch_add(*insufficient_funds, Ordering::Relaxed); + self.error_metrics + .invalid_account_for_fee + .fetch_add(*invalid_account_for_fee, Ordering::Relaxed); + self.error_metrics + .invalid_account_index + .fetch_add(*invalid_account_index, Ordering::Relaxed); + self.error_metrics + .invalid_program_for_execution + .fetch_add(*invalid_program_for_execution, Ordering::Relaxed); + self.error_metrics + .not_allowed_during_cluster_maintenance + .fetch_add(*not_allowed_during_cluster_maintenance, Ordering::Relaxed); + self.error_metrics + .invalid_writable_account + .fetch_add(*invalid_writable_account, Ordering::Relaxed); + self.error_metrics + .invalid_rent_paying_account + .fetch_add(*invalid_rent_paying_account, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_block_cost_limit + .fetch_add(*would_exceed_max_block_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_account_cost_limit + .fetch_add(*would_exceed_max_account_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_max_vote_cost_limit + .fetch_add(*would_exceed_max_vote_cost_limit, Ordering::Relaxed); + self.error_metrics + .would_exceed_account_data_block_limit + .fetch_add(*would_exceed_account_data_block_limit, Ordering::Relaxed); + self.error_metrics + .max_loaded_accounts_data_size_exceeded + .fetch_add(*max_loaded_accounts_data_size_exceeded, Ordering::Relaxed); + self.error_metrics + .program_execution_temporarily_restricted + .fetch_add(*program_execution_temporarily_restricted, Ordering::Relaxed); + } +} + +#[derive(Default)] +struct ConsumeWorkerCountMetrics { + transactions_attempted_execution_count: AtomicUsize, + executed_transactions_count: AtomicUsize, + executed_with_successful_result_count: AtomicUsize, + retryable_transaction_count: AtomicUsize, + retryable_expired_bank_count: AtomicUsize, + cost_model_throttled_transactions_count: AtomicUsize, +} + +impl ConsumeWorkerCountMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_counts", + ("id", id, i64), + ( + "transactions_attempted_execution_count", + self.transactions_attempted_execution_count + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "executed_transactions_count", + self.executed_transactions_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "executed_with_successful_result_count", + self.executed_with_successful_result_count + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "retryable_transaction_count", + self.retryable_transaction_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "retryable_expired_bank_count", + self.retryable_expired_bank_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "cost_model_throttled_transactions_count", + self.cost_model_throttled_transactions_count + .swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + +#[derive(Default)] +struct ConsumeWorkerTimingMetrics { + cost_model_us: AtomicU64, + collect_balances_us: AtomicU64, + load_execute_us: AtomicU64, + freeze_lock_us: AtomicU64, + record_us: AtomicU64, + commit_us: AtomicU64, + find_and_send_votes_us: AtomicU64, +} + +impl ConsumeWorkerTimingMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_timing", + ("id", id, i64), + ( + "cost_model_us", + self.cost_model_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "collect_balances_us", + self.collect_balances_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "load_execute_us", + self.load_execute_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "freeze_lock_us", + self.freeze_lock_us.swap(0, Ordering::Relaxed), + i64 + ), + ("record_us", self.record_us.swap(0, Ordering::Relaxed), i64), + ("commit_us", self.commit_us.swap(0, Ordering::Relaxed), i64), + ( + "find_and_send_votes_us", + self.find_and_send_votes_us.swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + +#[derive(Default)] +struct ConsumeWorkerTransactionErrorMetrics { + total: AtomicUsize, + account_in_use: AtomicUsize, + too_many_account_locks: AtomicUsize, + account_loaded_twice: AtomicUsize, + account_not_found: AtomicUsize, + blockhash_not_found: AtomicUsize, + blockhash_too_old: AtomicUsize, + call_chain_too_deep: AtomicUsize, + already_processed: AtomicUsize, + instruction_error: AtomicUsize, + insufficient_funds: AtomicUsize, + invalid_account_for_fee: AtomicUsize, + invalid_account_index: AtomicUsize, + invalid_program_for_execution: AtomicUsize, + not_allowed_during_cluster_maintenance: AtomicUsize, + invalid_writable_account: AtomicUsize, + invalid_rent_paying_account: AtomicUsize, + would_exceed_max_block_cost_limit: AtomicUsize, + would_exceed_max_account_cost_limit: AtomicUsize, + would_exceed_max_vote_cost_limit: AtomicUsize, + would_exceed_account_data_block_limit: AtomicUsize, + max_loaded_accounts_data_size_exceeded: AtomicUsize, + program_execution_temporarily_restricted: AtomicUsize, +} + +impl ConsumeWorkerTransactionErrorMetrics { + fn report_and_reset(&self, id: u32) { + datapoint_info!( + "banking_stage_worker_error_metrics", + ("id", id, i64), + ("total", self.total.swap(0, Ordering::Relaxed), i64), + ( + "account_in_use", + self.account_in_use.swap(0, Ordering::Relaxed), + i64 + ), + ( + "too_many_account_locks", + self.too_many_account_locks.swap(0, Ordering::Relaxed), + i64 + ), + ( + "account_loaded_twice", + self.account_loaded_twice.swap(0, Ordering::Relaxed), + i64 + ), + ( + "account_not_found", + self.account_not_found.swap(0, Ordering::Relaxed), + i64 + ), + ( + "blockhash_not_found", + self.blockhash_not_found.swap(0, Ordering::Relaxed), + i64 + ), + ( + "blockhash_too_old", + self.blockhash_too_old.swap(0, Ordering::Relaxed), + i64 + ), + ( + "call_chain_too_deep", + self.call_chain_too_deep.swap(0, Ordering::Relaxed), + i64 + ), + ( + "already_processed", + self.already_processed.swap(0, Ordering::Relaxed), + i64 + ), + ( + "instruction_error", + self.instruction_error.swap(0, Ordering::Relaxed), + i64 + ), + ( + "insufficient_funds", + self.insufficient_funds.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_account_for_fee", + self.invalid_account_for_fee.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_account_index", + self.invalid_account_index.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_program_for_execution", + self.invalid_program_for_execution + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "not_allowed_during_cluster_maintenance", + self.not_allowed_during_cluster_maintenance + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_writable_account", + self.invalid_writable_account.swap(0, Ordering::Relaxed), + i64 + ), + ( + "invalid_rent_paying_account", + self.invalid_rent_paying_account.swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_block_cost_limit", + self.would_exceed_max_block_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_account_cost_limit", + self.would_exceed_max_account_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "would_exceed_max_vote_cost_limit", + self.would_exceed_max_vote_cost_limit + .swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} + #[cfg(test)] mod tests { use { @@ -138,7 +616,7 @@ mod tests { get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, + solana_runtime::prioritization_fee_cache::PrioritizationFeeCache, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, system_transaction, @@ -173,9 +651,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -207,6 +683,7 @@ mod tests { let (consume_sender, consume_receiver) = unbounded(); let (consumed_sender, consumed_receiver) = unbounded(); let worker = ConsumeWorker::new( + 0, consume_receiver, consumer, consumed_sender, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 19a3aa515a0e8b..d8d45079553a16 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -41,29 +41,29 @@ pub const TARGET_NUM_TRANSACTIONS_PER_BATCH: usize = 64; pub struct ProcessTransactionBatchOutput { // The number of transactions filtered out by the cost model - cost_model_throttled_transactions_count: usize, + pub(crate) cost_model_throttled_transactions_count: usize, // Amount of time spent running the cost model - cost_model_us: u64, + pub(crate) cost_model_us: u64, pub execute_and_commit_transactions_output: ExecuteAndCommitTransactionsOutput, } pub struct ExecuteAndCommitTransactionsOutput { // Total number of transactions that were passed as candidates for execution - transactions_attempted_execution_count: usize, + pub(crate) transactions_attempted_execution_count: usize, // The number of transactions of that were executed. See description of in `ProcessTransactionsSummary` // for possible outcomes of execution. - executed_transactions_count: usize, + pub(crate) executed_transactions_count: usize, // Total number of the executed transactions that returned success/not // an error. - executed_with_successful_result_count: usize, + pub(crate) executed_with_successful_result_count: usize, // Transactions that either were not executed, or were executed and failed to be committed due // to the block ending. pub(crate) retryable_transaction_indexes: Vec, // A result that indicates whether transactions were successfully // committed into the Poh stream. pub commit_transactions_result: Result, PohRecorderError>, - execute_and_commit_timings: LeaderExecuteAndCommitTimings, - error_counters: TransactionErrorMetrics, + pub(crate) execute_and_commit_timings: LeaderExecuteAndCommitTimings, + pub(crate) error_counters: TransactionErrorMetrics, } pub struct Consumer { @@ -881,7 +881,7 @@ mod tests { } = &genesis_config_info; let blockstore = Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config).0; let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -938,7 +938,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let transactions = sanitize_transactions(vec![system_transaction::transfer( @@ -1067,7 +1067,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let transactions = { @@ -1160,11 +1160,12 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let mut bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.ns_per_slot = std::u128::MAX; if !apply_cost_tracker_during_replay_enabled { bank.deactivate_feature(&feature_set::apply_cost_tracker_during_replay::id()); } - let bank = Arc::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; let pubkey = solana_sdk::pubkey::new_rand(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -1312,7 +1313,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -1390,7 +1391,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(lamports); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1451,7 +1452,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1510,7 +1511,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); @@ -1592,7 +1593,7 @@ mod tests { } = create_slow_genesis_config(solana_sdk::native_token::sol_to_lamports(1000.0)); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); let keypair1 = Keypair::new(); @@ -1723,14 +1724,19 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let keypair = Keypair::new(); let address_table_key = Pubkey::new_unique(); let address_table_state = generate_new_address_lookup_table(None, 2); store_address_lookup_table(&bank, address_table_key, address_table_state); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), 1)); + let new_bank = Bank::new_from_parent(bank, &Pubkey::new_unique(), 2); + let bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); let message = VersionedMessage::V0(v0::Message { header: MessageHeader { num_required_signatures: 1, diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 9b490541833f73..6ad2c3042b254f 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -148,7 +148,7 @@ mod tests { #[test] fn test_make_consume_or_forward_decision() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let ledger_path = temp_dir(); let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap()); let (exit, poh_recorder, poh_service, _entry_receiver) = diff --git a/core/src/banking_stage/forward_worker.rs b/core/src/banking_stage/forward_worker.rs index c13b8c426378be..255f1b8e01be99 100644 --- a/core/src/banking_stage/forward_worker.rs +++ b/core/src/banking_stage/forward_worker.rs @@ -97,7 +97,7 @@ mod tests { }, solana_perf::packet::to_packet_batches, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::bank::Bank, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, system_transaction, @@ -128,9 +128,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 1cb656f0ddc701..1092e5c57b07cb 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -306,9 +306,7 @@ mod tests { create_slow_genesis_config_with_leader(10_000, &validator_keypair.pubkey()); let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info; - let bank: Bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(genesis_config); let ledger_path = TempDir::new().unwrap(); let blockstore = Arc::new( diff --git a/core/src/banking_stage/multi_iterator_scanner.rs b/core/src/banking_stage/multi_iterator_scanner.rs index 866470e8e4c68c..fe1b5906ba1b6d 100644 --- a/core/src/banking_stage/multi_iterator_scanner.rs +++ b/core/src/banking_stage/multi_iterator_scanner.rs @@ -1,6 +1,6 @@ //! Provides an iterator interface that create non-conflicting batches of elements to process. //! -//! The problem that this structure is targetting is as following: +//! The problem that this structure is targeting is as following: //! We have a slice of transactions we want to process in batches where transactions //! in the same batch do not conflict with each other. This allows us process them in //! parallel. The original slice is ordered by priority, and it is often the case diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 7a2117675b31d0..b9d65ff4756857 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -173,7 +173,7 @@ mod tests { fn create_test_bank() -> Arc { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); - Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)) + Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0 } // Helper function (could potentially use test_case in future). diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 23e15562e1ae54..bfdead250996e1 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -15,7 +15,11 @@ use { crossbeam_channel::{Receiver, Sender, TryRecvError}, itertools::izip, prio_graph::{AccessKind, PrioGraph}, - solana_sdk::{pubkey::Pubkey, slot_history::Slot, transaction::SanitizedTransaction}, + solana_measure::measure_us, + solana_sdk::{ + pubkey::Pubkey, saturating_add_assign, slot_history::Slot, + transaction::SanitizedTransaction, + }, std::collections::HashMap, }; @@ -43,7 +47,9 @@ impl PrioGraphScheduler { } /// Schedule transactions from the given `TransactionStateContainer` to be consumed by the - /// worker threads. Returns the number of transactions scheduled, or an error. + /// worker threads. Returns summary of scheduling, or an error. + /// `filter` is used to filter out transactions that should be skipped and dropped, and + /// should set `false` for transactions that should be dropped, and `true` otherwise. /// /// Uses a `PrioGraph` to perform look-ahead during the scheduling of transactions. /// This, combined with internal tracking of threads' in-flight transactions, allows @@ -52,7 +58,8 @@ impl PrioGraphScheduler { pub(crate) fn schedule( &mut self, container: &mut TransactionStateContainer, - ) -> Result { + filter: impl Fn(&[&SanitizedTransaction], &mut [bool]), + ) -> Result { let num_threads = self.consume_work_senders.len(); let mut batches = Batches::new(num_threads); let mut chain_id_to_thread_index = HashMap::new(); @@ -64,21 +71,67 @@ impl PrioGraphScheduler { let mut blocking_locks = ReadWriteAccountSet::default(); let mut prio_graph = PrioGraph::new(|id: &TransactionPriorityId, _graph_node| *id); - // Create the initial look-ahead window. - for _ in 0..self.look_ahead_window_size { - let Some(id) = container.pop() else { - break; - }; + // Track metrics on filter. + let mut num_filtered_out: usize = 0; + let mut total_filter_time_us: u64 = 0; + + let mut window_budget = self.look_ahead_window_size; + let mut chunked_pops = |container: &mut TransactionStateContainer, + prio_graph: &mut PrioGraph<_, _, _, _>, + window_budget: &mut usize| { + while *window_budget > 0 { + const MAX_FILTER_CHUNK_SIZE: usize = 128; + let mut filter_array = [true; MAX_FILTER_CHUNK_SIZE]; + let mut ids = Vec::with_capacity(MAX_FILTER_CHUNK_SIZE); + let mut txs = Vec::with_capacity(MAX_FILTER_CHUNK_SIZE); + + let chunk_size = (*window_budget).min(MAX_FILTER_CHUNK_SIZE); + for _ in 0..chunk_size { + if let Some(id) = container.pop() { + ids.push(id); + } else { + break; + } + } + *window_budget = window_budget.saturating_sub(chunk_size); + + ids.iter().for_each(|id| { + let transaction = container.get_transaction_ttl(&id.id).unwrap(); + txs.push(&transaction.transaction); + }); + + let (_, filter_us) = measure_us!(filter(&txs, &mut filter_array[..chunk_size])); + saturating_add_assign!(total_filter_time_us, filter_us); + + for (id, filter_result) in ids.iter().zip(&filter_array[..chunk_size]) { + if *filter_result { + let transaction = container.get_transaction_ttl(&id.id).unwrap(); + prio_graph.insert_transaction( + *id, + Self::get_transaction_account_access(transaction), + ); + } else { + saturating_add_assign!(num_filtered_out, 1); + container.remove_by_id(&id.id); + } + } - let transaction = container.get_transaction_ttl(&id.id).unwrap(); - prio_graph.insert_transaction(id, Self::get_transaction_account_access(transaction)); - } + if ids.len() != chunk_size { + break; + } + } + }; + + // Create the initial look-ahead window. + // Check transactions against filter, remove from container if it fails. + chunked_pops(container, &mut prio_graph, &mut window_budget); let mut unblock_this_batch = Vec::with_capacity(self.consume_work_senders.len() * TARGET_NUM_TRANSACTIONS_PER_BATCH); const MAX_TRANSACTIONS_PER_SCHEDULING_PASS: usize = 100_000; - let mut num_scheduled = 0; - let mut num_sent = 0; + let mut num_scheduled: usize = 0; + let mut num_sent: usize = 0; + let mut num_unschedulable: usize = 0; while num_scheduled < MAX_TRANSACTIONS_PER_SCHEDULING_PASS { // If nothing is in the main-queue of the `PrioGraph` then there's nothing left to schedule. if prio_graph.is_empty() { @@ -88,15 +141,6 @@ impl PrioGraphScheduler { while let Some(id) = prio_graph.pop() { unblock_this_batch.push(id); - // Push next transaction from container into the `PrioGraph` look-ahead window. - if let Some(next_id) = container.pop() { - let transaction = container.get_transaction_ttl(&next_id.id).unwrap(); - prio_graph.insert_transaction( - next_id, - Self::get_transaction_account_access(transaction), - ); - } - // Should always be in the container, during initial testing phase panic. // Later, we can replace with a continue in case this does happen. let Some(transaction_state) = container.get_mut_transaction_state(&id.id) else { @@ -109,6 +153,7 @@ impl PrioGraphScheduler { if !blocking_locks.check_locks(transaction.message()) { blocking_locks.take_locks(transaction.message()); unschedulable_ids.push(id); + saturating_add_assign!(num_unschedulable, 1); continue; } @@ -133,10 +178,11 @@ impl PrioGraphScheduler { ) else { blocking_locks.take_locks(transaction.message()); unschedulable_ids.push(id); + saturating_add_assign!(num_unschedulable, 1); continue; }; - num_scheduled += 1; + saturating_add_assign!(num_scheduled, 1); // Track the chain-id to thread-index mapping. chain_id_to_thread_index.insert(prio_graph.chain_id(&id), thread_id); @@ -154,11 +200,11 @@ impl PrioGraphScheduler { batches.transactions[thread_id].push(transaction); batches.ids[thread_id].push(id.id); batches.max_age_slots[thread_id].push(max_age_slot); - batches.total_cus[thread_id] += cu_limit; + saturating_add_assign!(batches.total_cus[thread_id], cu_limit); // If target batch size is reached, send only this batch. if batches.ids[thread_id].len() >= TARGET_NUM_TRANSACTIONS_PER_BATCH { - num_sent += self.send_batch(&mut batches, thread_id)?; + saturating_add_assign!(num_sent, self.send_batch(&mut batches, thread_id)?); } if num_scheduled >= MAX_TRANSACTIONS_PER_SCHEDULING_PASS { @@ -167,7 +213,11 @@ impl PrioGraphScheduler { } // Send all non-empty batches - num_sent += self.send_batches(&mut batches)?; + saturating_add_assign!(num_sent, self.send_batches(&mut batches)?); + + // Refresh window budget and do chunked pops + saturating_add_assign!(window_budget, unblock_this_batch.len()); + chunked_pops(container, &mut prio_graph, &mut window_budget); // Unblock all transactions that were blocked by the transactions that were just sent. for id in unblock_this_batch.drain(..) { @@ -176,7 +226,7 @@ impl PrioGraphScheduler { } // Send batches for any remaining transactions - num_sent += self.send_batches(&mut batches)?; + saturating_add_assign!(num_sent, self.send_batches(&mut batches)?); // Push unschedulable ids back into the container for id in unschedulable_ids { @@ -184,7 +234,7 @@ impl PrioGraphScheduler { } // Push remaining transactions back into the container - while let Some(id) = prio_graph.pop_and_unblock() { + while let Some((id, _)) = prio_graph.pop_and_unblock() { container.push_id_into_queue(id); } @@ -193,24 +243,39 @@ impl PrioGraphScheduler { "number of scheduled and sent transactions must match" ); - Ok(num_scheduled) + Ok(SchedulingSummary { + num_scheduled, + num_unschedulable, + num_filtered_out, + filter_time_us: total_filter_time_us, + }) } /// Receive completed batches of transactions without blocking. + /// Returns (num_transactions, num_retryable_transactions) on success. pub fn receive_completed( &mut self, container: &mut TransactionStateContainer, - ) -> Result<(), SchedulerError> { - while self.try_receive_completed(container)? {} - Ok(()) + ) -> Result<(usize, usize), SchedulerError> { + let mut total_num_transactions: usize = 0; + let mut total_num_retryable: usize = 0; + loop { + let (num_transactions, num_retryable) = self.try_receive_completed(container)?; + if num_transactions == 0 { + break; + } + saturating_add_assign!(total_num_transactions, num_transactions); + saturating_add_assign!(total_num_retryable, num_retryable); + } + Ok((total_num_transactions, total_num_retryable)) } /// Receive completed batches of transactions. - /// Returns `Ok(true)` if a batch was received, `Ok(false)` if no batch was received. + /// Returns `Ok((num_transactions, num_retryable))` if a batch was received, `Ok((0, 0))` if no batch was received. fn try_receive_completed( &mut self, container: &mut TransactionStateContainer, - ) -> Result { + ) -> Result<(usize, usize), SchedulerError> { match self.finished_consume_work_receiver.try_recv() { Ok(FinishedConsumeWork { work: @@ -222,6 +287,9 @@ impl PrioGraphScheduler { }, retryable_indexes, }) => { + let num_transactions = ids.len(); + let num_retryable = retryable_indexes.len(); + // Free the locks self.complete_batch(batch_id, &transactions); @@ -246,9 +314,9 @@ impl PrioGraphScheduler { container.remove_by_id(&id); } - Ok(true) + Ok((num_transactions, num_retryable)) } - Err(TryRecvError::Empty) => Ok(false), + Err(TryRecvError::Empty) => Ok((0, 0)), Err(TryRecvError::Disconnected) => Err(SchedulerError::DisconnectedRecvChannel( "finished consume work", )), @@ -364,6 +432,19 @@ impl PrioGraphScheduler { } } +/// Metrics from scheduling transactions. +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct SchedulingSummary { + /// Number of transactions scheduled. + pub num_scheduled: usize, + /// Number of transactions that were not scheduled due to conflicts. + pub num_unschedulable: usize, + /// Number of transactions that were dropped due to filter. + pub num_filtered_out: usize, + /// Time spent filtering transactions + pub filter_time_us: u64, +} + struct Batches { ids: Vec>, transactions: Vec>, @@ -520,6 +601,10 @@ mod tests { .unzip() } + fn test_filter(_txs: &[&SanitizedTransaction], results: &mut [bool]) { + results.fill(true); + } + #[test] fn test_schedule_disconnected_channel() { let (mut scheduler, work_receivers, _finished_work_sender) = create_test_frame(1); @@ -527,7 +612,7 @@ mod tests { drop(work_receivers); // explicitly drop receivers assert_matches!( - scheduler.schedule(&mut container), + scheduler.schedule(&mut container, test_filter), Err(SchedulerError::DisconnectedSendChannel(_)) ); } @@ -540,8 +625,9 @@ mod tests { (&Keypair::new(), &[Pubkey::new_unique()], 2, 2), ]); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, vec![txids!([1, 0])]); } @@ -554,8 +640,9 @@ mod tests { (&Keypair::new(), &[pubkey], 1, 2), ]); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[0]).1, vec![txids!([1]), txids!([0])] @@ -571,8 +658,12 @@ mod tests { ); // expect 4 full batches to be scheduled - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4 * TARGET_NUM_TRANSACTIONS_PER_BATCH); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!( + scheduling_summary.num_scheduled, + 4 * TARGET_NUM_TRANSACTIONS_PER_BATCH + ); + assert_eq!(scheduling_summary.num_unschedulable, 0); let thread0_work_counts: Vec<_> = work_receivers[0] .try_iter() @@ -587,8 +678,9 @@ mod tests { let mut container = create_container((0..4).map(|i| (Keypair::new(), [Pubkey::new_unique()], 1, i))); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 4); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, [txids!([3, 1])]); assert_eq!(collect_work(&work_receivers[1]).1, [txids!([2, 0])]); } @@ -618,8 +710,9 @@ mod tests { // fact they eventually join means that the scheduler will schedule them // onto the same thread to avoid causing [4], which conflicts with both // chains, to be un-schedulable. - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 5); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 5); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[0]).1, [txids!([0, 2]), txids!([1, 3]), txids!([4])] @@ -658,15 +751,20 @@ mod tests { // Because the look-ahead window is shortened to a size of 4, the scheduler does // not have knowledge of the joining at transaction [4] until after [0] and [1] // have been scheduled. - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 4); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 4); + assert_eq!(scheduling_summary.num_unschedulable, 2); let (thread_0_work, thread_0_ids) = collect_work(&work_receivers[0]); - assert_eq!(thread_0_ids, [txids!([0, 2])]); - assert_eq!(collect_work(&work_receivers[1]).1, [txids!([1, 3])]); + assert_eq!(thread_0_ids, [txids!([0]), txids!([2])]); + assert_eq!( + collect_work(&work_receivers[1]).1, + [txids!([1]), txids!([3])] + ); // Cannot schedule even on next pass because of lock conflicts - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 0); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 0); + assert_eq!(scheduling_summary.num_unschedulable, 2); // Complete batch on thread 0. Remaining txs can be scheduled onto thread 1 finished_work_sender @@ -676,8 +774,9 @@ mod tests { }) .unwrap(); scheduler.receive_completed(&mut container).unwrap(); - let num_scheduled = scheduler.schedule(&mut container).unwrap(); - assert_eq!(num_scheduled, 2); + let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( collect_work(&work_receivers[1]).1, diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 8c1dc4f9172f73..f7601a75686559 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -9,13 +9,20 @@ use { transaction_state_container::TransactionStateContainer, }, crate::banking_stage::{ + consume_worker::ConsumeWorkerMetrics, decision_maker::{BufferedPacketsDecision, DecisionMaker}, immutable_deserialized_packet::ImmutableDeserializedPacket, packet_deserializer::PacketDeserializer, TOTAL_BUFFERED_PACKETS, }, crossbeam_channel::RecvTimeoutError, - solana_runtime::bank_forks::BankForks, + solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + solana_measure::measure_us, + solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::{ + clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, + transaction::SanitizedTransaction, + }, std::{ sync::{Arc, RwLock}, time::Duration, @@ -36,6 +43,12 @@ pub(crate) struct SchedulerController { container: TransactionStateContainer, /// State for scheduling and communicating with worker threads. scheduler: PrioGraphScheduler, + /// Metrics tracking counts on transactions in different states. + count_metrics: SchedulerCountMetrics, + /// Metrics tracking time spent in different code sections. + timing_metrics: SchedulerTimingMetrics, + /// Metric report handles for the worker threads. + worker_metrics: Vec>, } impl SchedulerController { @@ -44,6 +57,7 @@ impl SchedulerController { packet_deserializer: PacketDeserializer, bank_forks: Arc>, scheduler: PrioGraphScheduler, + worker_metrics: Vec>, ) -> Self { Self { decision_maker, @@ -52,6 +66,9 @@ impl SchedulerController { transaction_id_generator: TransactionIdGenerator::default(), container: TransactionStateContainer::with_capacity(TOTAL_BUFFERED_PACKETS), scheduler, + count_metrics: SchedulerCountMetrics::default(), + timing_metrics: SchedulerTimingMetrics::default(), + worker_metrics, } } @@ -67,13 +84,24 @@ impl SchedulerController { // `Forward` will drop packets from the buffer instead of forwarding. // During receiving, since packets would be dropped from buffer anyway, we can // bypass sanitization and buffering and immediately drop the packets. - let decision = self.decision_maker.make_consume_or_forward_decision(); + let (decision, decision_time_us) = + measure_us!(self.decision_maker.make_consume_or_forward_decision()); + saturating_add_assign!(self.timing_metrics.decision_time_us, decision_time_us); self.process_transactions(&decision)?; - self.scheduler.receive_completed(&mut self.container)?; - if !self.receive_packets(&decision) { + self.receive_completed()?; + if !self.receive_and_buffer_packets(&decision) { break; } + + // Report metrics only if there is data. + // Reset intervals when appropriate, regardless of report. + let should_report = self.count_metrics.has_data(); + self.count_metrics.maybe_report_and_reset(should_report); + self.timing_metrics.maybe_report_and_reset(should_report); + self.worker_metrics + .iter() + .for_each(|metrics| metrics.maybe_report_and_reset()); } Ok(()) @@ -85,28 +113,135 @@ impl SchedulerController { decision: &BufferedPacketsDecision, ) -> Result<(), SchedulerError> { match decision { - BufferedPacketsDecision::Consume(_bank_start) => { - let _num_scheduled = self.scheduler.schedule(&mut self.container)?; + BufferedPacketsDecision::Consume(bank_start) => { + let (scheduling_summary, schedule_time_us) = + measure_us!(self + .scheduler + .schedule(&mut self.container, |txs, results| { + Self::pre_scheduling_filter(txs, results, &bank_start.working_bank) + })?); + saturating_add_assign!( + self.count_metrics.num_scheduled, + scheduling_summary.num_scheduled + ); + saturating_add_assign!( + self.count_metrics.num_unschedulable, + scheduling_summary.num_unschedulable + ); + saturating_add_assign!( + self.count_metrics.num_schedule_filtered_out, + scheduling_summary.num_filtered_out + ); + saturating_add_assign!( + self.timing_metrics.schedule_filter_time_us, + scheduling_summary.filter_time_us + ); + saturating_add_assign!(self.timing_metrics.schedule_time_us, schedule_time_us); } BufferedPacketsDecision::Forward => { - self.clear_container(); + let (_, clear_time_us) = measure_us!(self.clear_container()); + saturating_add_assign!(self.timing_metrics.clear_time_us, clear_time_us); + } + BufferedPacketsDecision::ForwardAndHold => { + let (_, clean_time_us) = measure_us!(self.clean_queue()); + saturating_add_assign!(self.timing_metrics.clean_time_us, clean_time_us); } - BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {} + BufferedPacketsDecision::Hold => {} } Ok(()) } + fn pre_scheduling_filter( + transactions: &[&SanitizedTransaction], + results: &mut [bool], + bank: &Bank, + ) { + let lock_results = vec![Ok(()); transactions.len()]; + let mut error_counters = TransactionErrorMetrics::default(); + let check_results = bank.check_transactions( + transactions, + &lock_results, + MAX_PROCESSING_AGE, + &mut error_counters, + ); + + for ((check_result, _), result) in check_results.into_iter().zip(results.iter_mut()) { + *result = check_result.is_ok(); + } + } + /// Clears the transaction state container. /// This only clears pending transactions, and does **not** clear in-flight transactions. fn clear_container(&mut self) { while let Some(id) = self.container.pop() { self.container.remove_by_id(&id.id); + saturating_add_assign!(self.count_metrics.num_dropped_on_clear, 1); } } + /// Clean unprocessable transactions from the queue. These will be transactions that are + /// expired, already processed, or are no longer sanitizable. + /// This only clears pending transactions, and does **not** clear in-flight transactions. + fn clean_queue(&mut self) { + // Clean up any transactions that have already been processed, are too old, or do not have + // valid nonce accounts. + const MAX_TRANSACTION_CHECKS: usize = 10_000; + let mut transaction_ids = Vec::with_capacity(MAX_TRANSACTION_CHECKS); + + while let Some(id) = self.container.pop() { + transaction_ids.push(id); + } + + let bank = self.bank_forks.read().unwrap().working_bank(); + + const CHUNK_SIZE: usize = 128; + let mut error_counters = TransactionErrorMetrics::default(); + + for chunk in transaction_ids.chunks(CHUNK_SIZE) { + let lock_results = vec![Ok(()); chunk.len()]; + let sanitized_txs: Vec<_> = chunk + .iter() + .map(|id| { + &self + .container + .get_transaction_ttl(&id.id) + .expect("transaction must exist") + .transaction + }) + .collect(); + + let check_results = bank.check_transactions( + &sanitized_txs, + &lock_results, + MAX_PROCESSING_AGE, + &mut error_counters, + ); + + for ((result, _nonce), id) in check_results.into_iter().zip(chunk.iter()) { + if result.is_err() { + saturating_add_assign!(self.count_metrics.num_dropped_on_age_and_status, 1); + self.container.remove_by_id(&id.id); + } + } + } + } + + /// Receives completed transactions from the workers and updates metrics. + fn receive_completed(&mut self) -> Result<(), SchedulerError> { + let ((num_transactions, num_retryable), receive_completed_time_us) = + measure_us!(self.scheduler.receive_completed(&mut self.container)?); + saturating_add_assign!(self.count_metrics.num_finished, num_transactions); + saturating_add_assign!(self.count_metrics.num_retryable, num_retryable); + saturating_add_assign!( + self.timing_metrics.receive_completed_time_us, + receive_completed_time_us + ); + Ok(()) + } + /// Returns whether the packet receiver is still connected. - fn receive_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { + fn receive_and_buffer_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { let remaining_queue_capacity = self.container.remaining_queue_capacity(); const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); @@ -125,17 +260,29 @@ impl SchedulerController { } }; - let received_packet_results = self + let (received_packet_results, receive_time_us) = measure_us!(self .packet_receiver - .receive_packets(recv_timeout, remaining_queue_capacity); - - match (received_packet_results, should_buffer) { - (Ok(receive_packet_results), true) => { - self.buffer_packets(receive_packet_results.deserialized_packets) + .receive_packets(recv_timeout, remaining_queue_capacity)); + saturating_add_assign!(self.timing_metrics.receive_time_us, receive_time_us); + + match received_packet_results { + Ok(receive_packet_results) => { + let num_received_packets = receive_packet_results.deserialized_packets.len(); + saturating_add_assign!(self.count_metrics.num_received, num_received_packets); + if should_buffer { + let (_, buffer_time_us) = measure_us!( + self.buffer_packets(receive_packet_results.deserialized_packets) + ); + saturating_add_assign!(self.timing_metrics.buffer_time_us, buffer_time_us); + } else { + saturating_add_assign!( + self.count_metrics.num_dropped_on_receive, + num_received_packets + ); + } } - (Ok(receive_packet_results), false) => drop(receive_packet_results), - (Err(RecvTimeoutError::Timeout), _) => {} - (Err(RecvTimeoutError::Disconnected), _) => return false, + Err(RecvTimeoutError::Timeout) => {} + Err(RecvTimeoutError::Disconnected) => return false, } true @@ -145,28 +292,270 @@ impl SchedulerController { // Sanitize packets, generate IDs, and insert into the container. let bank = self.bank_forks.read().unwrap().working_bank(); let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); + let transaction_account_lock_limit = bank.get_transaction_account_lock_limit(); let feature_set = &bank.feature_set; let vote_only = bank.vote_only_bank(); - for packet in packets { - let Some(transaction) = - packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) - else { - continue; - }; - - let transaction_id = self.transaction_id_generator.next(); - let transaction_ttl = SanitizedTransactionTTL { - transaction, - max_age_slot: last_slot_in_epoch, - }; - let transaction_priority_details = packet.priority_details(); - self.container.insert_new_transaction( - transaction_id, - transaction_ttl, - transaction_priority_details, + + const CHUNK_SIZE: usize = 128; + let lock_results: [_; CHUNK_SIZE] = core::array::from_fn(|_| Ok(())); + let mut error_counts = TransactionErrorMetrics::default(); + for chunk in packets.chunks(CHUNK_SIZE) { + let mut post_sanitization_count: usize = 0; + let (transactions, priority_details): (Vec<_>, Vec<_>) = chunk + .iter() + .filter_map(|packet| { + packet + .build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) + .map(|tx| (tx, packet.priority_details())) + }) + .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) + .filter(|(tx, _)| { + SanitizedTransaction::validate_account_locks( + tx.message(), + transaction_account_lock_limit, + ) + .is_ok() + }) + .unzip(); + + let check_results = bank.check_transactions( + &transactions, + &lock_results[..transactions.len()], + MAX_PROCESSING_AGE, + &mut error_counts, + ); + let post_lock_validation_count = transactions.len(); + + let mut post_transaction_check_count: usize = 0; + for ((transaction, priority_details), _) in transactions + .into_iter() + .zip(priority_details) + .zip(check_results) + .filter(|(_, check_result)| check_result.0.is_ok()) + { + saturating_add_assign!(post_transaction_check_count, 1); + let transaction_id = self.transaction_id_generator.next(); + let transaction_ttl = SanitizedTransactionTTL { + transaction, + max_age_slot: last_slot_in_epoch, + }; + + if self.container.insert_new_transaction( + transaction_id, + transaction_ttl, + priority_details, + ) { + saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); + } + saturating_add_assign!(self.count_metrics.num_buffered, 1); + } + + // Update metrics for transactions that were dropped. + let num_dropped_on_sanitization = chunk.len().saturating_sub(post_sanitization_count); + let num_dropped_on_lock_validation = + post_sanitization_count.saturating_sub(post_lock_validation_count); + let num_dropped_on_transaction_checks = + post_lock_validation_count.saturating_sub(post_transaction_check_count); + + saturating_add_assign!( + self.count_metrics.num_dropped_on_sanitization, + num_dropped_on_sanitization + ); + saturating_add_assign!( + self.count_metrics.num_dropped_on_validate_locks, + num_dropped_on_lock_validation ); + saturating_add_assign!( + self.count_metrics.num_dropped_on_receive_transaction_checks, + num_dropped_on_transaction_checks + ); + } + } +} + +#[derive(Default)] +struct SchedulerCountMetrics { + interval: AtomicInterval, + + /// Number of packets received. + num_received: usize, + /// Number of packets buffered. + num_buffered: usize, + + /// Number of transactions scheduled. + num_scheduled: usize, + /// Number of transactions that were unschedulable. + num_unschedulable: usize, + /// Number of transactions that were filtered out during scheduling. + num_schedule_filtered_out: usize, + /// Number of completed transactions received from workers. + num_finished: usize, + /// Number of transactions that were retryable. + num_retryable: usize, + + /// Number of transactions that were immediately dropped on receive. + num_dropped_on_receive: usize, + /// Number of transactions that were dropped due to sanitization failure. + num_dropped_on_sanitization: usize, + /// Number of transactions that were dropped due to failed lock validation. + num_dropped_on_validate_locks: usize, + /// Number of transactions that were dropped due to failed transaction + /// checks during receive. + num_dropped_on_receive_transaction_checks: usize, + /// Number of transactions that were dropped due to clearing. + num_dropped_on_clear: usize, + /// Number of transactions that were dropped due to age and status checks. + num_dropped_on_age_and_status: usize, + /// Number of transactions that were dropped due to exceeded capacity. + num_dropped_on_capacity: usize, +} + +impl SchedulerCountMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); + } + } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_counts", + ("num_received", self.num_received, i64), + ("num_buffered", self.num_buffered, i64), + ("num_scheduled", self.num_scheduled, i64), + ("num_unschedulable", self.num_unschedulable, i64), + ( + "num_schedule_filtered_out", + self.num_schedule_filtered_out, + i64 + ), + ("num_finished", self.num_finished, i64), + ("num_retryable", self.num_retryable, i64), + ("num_dropped_on_receive", self.num_dropped_on_receive, i64), + ( + "num_dropped_on_sanitization", + self.num_dropped_on_sanitization, + i64 + ), + ( + "num_dropped_on_validate_locks", + self.num_dropped_on_validate_locks, + i64 + ), + ( + "num_dropped_on_receive_transaction_checks", + self.num_dropped_on_receive_transaction_checks, + i64 + ), + ("num_dropped_on_clear", self.num_dropped_on_clear, i64), + ( + "num_dropped_on_age_and_status", + self.num_dropped_on_age_and_status, + i64 + ), + ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64) + ); + } + + fn has_data(&self) -> bool { + self.num_received != 0 + || self.num_buffered != 0 + || self.num_scheduled != 0 + || self.num_unschedulable != 0 + || self.num_schedule_filtered_out != 0 + || self.num_finished != 0 + || self.num_retryable != 0 + || self.num_dropped_on_receive != 0 + || self.num_dropped_on_sanitization != 0 + || self.num_dropped_on_validate_locks != 0 + || self.num_dropped_on_receive_transaction_checks != 0 + || self.num_dropped_on_clear != 0 + || self.num_dropped_on_age_and_status != 0 + || self.num_dropped_on_capacity != 0 + } + + fn reset(&mut self) { + self.num_received = 0; + self.num_buffered = 0; + self.num_scheduled = 0; + self.num_unschedulable = 0; + self.num_schedule_filtered_out = 0; + self.num_finished = 0; + self.num_retryable = 0; + self.num_dropped_on_receive = 0; + self.num_dropped_on_sanitization = 0; + self.num_dropped_on_validate_locks = 0; + self.num_dropped_on_receive_transaction_checks = 0; + self.num_dropped_on_clear = 0; + self.num_dropped_on_age_and_status = 0; + self.num_dropped_on_capacity = 0; + } +} + +#[derive(Default)] +struct SchedulerTimingMetrics { + interval: AtomicInterval, + /// Time spent making processing decisions. + decision_time_us: u64, + /// Time spent receiving packets. + receive_time_us: u64, + /// Time spent buffering packets. + buffer_time_us: u64, + /// Time spent filtering transactions during scheduling. + schedule_filter_time_us: u64, + /// Time spent scheduling transactions. + schedule_time_us: u64, + /// Time spent clearing transactions from the container. + clear_time_us: u64, + /// Time spent cleaning expired or processed transactions from the container. + clean_time_us: u64, + /// Time spent receiving completed transactions. + receive_completed_time_us: u64, +} + +impl SchedulerTimingMetrics { + fn maybe_report_and_reset(&mut self, should_report: bool) { + const REPORT_INTERVAL_MS: u64 = 1000; + if self.interval.should_update(REPORT_INTERVAL_MS) { + if should_report { + self.report(); + } + self.reset(); } } + + fn report(&self) { + datapoint_info!( + "banking_stage_scheduler_timing", + ("decision_time_us", self.decision_time_us, i64), + ("receive_time_us", self.receive_time_us, i64), + ("buffer_time_us", self.buffer_time_us, i64), + ("schedule_filter_time_us", self.schedule_filter_time_us, i64), + ("schedule_time_us", self.schedule_time_us, i64), + ("clear_time_us", self.clear_time_us, i64), + ("clean_time_us", self.clean_time_us, i64), + ( + "receive_completed_time_us", + self.receive_completed_time_us, + i64 + ) + ); + } + + fn reset(&mut self) { + self.decision_time_us = 0; + self.receive_time_us = 0; + self.buffer_time_us = 0; + self.schedule_filter_time_us = 0; + self.schedule_time_us = 0; + self.clear_time_us = 0; + self.clean_time_us = 0; + self.receive_completed_time_us = 0; + } } #[cfg(test)] @@ -190,7 +579,7 @@ mod tests { }, solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, solana_poh::poh_recorder::{PohRecorder, Record, WorkingBankEntry}, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::bank::Bank, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, poh_config::PohConfig, pubkey::Pubkey, signature::Keypair, signer::Signer, @@ -222,9 +611,7 @@ mod tests { fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { let GenesisConfigInfo { genesis_config, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); - let bank = bank_forks.read().unwrap().working_bank(); + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -266,6 +653,7 @@ mod tests { packet_deserializer, bank_forks, PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), + vec![], // no actual workers with metrics to report, this can be empty ); (test_frame, scheduler_controller) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 76807653315117..10401a88eff405 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -119,12 +119,13 @@ impl TransactionStateContainer { } /// Insert a new transaction into the container's queues and maps. + /// Returns `true` if a packet was dropped due to capacity limits. pub(crate) fn insert_new_transaction( &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, transaction_priority_details: TransactionPriorityDetails, - ) { + ) -> bool { let priority_id = TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); self.id_to_transaction_state.insert( @@ -151,12 +152,15 @@ impl TransactionStateContainer { /// Pushes a transaction id into the priority queue. If the queue is full, the lowest priority /// transaction will be dropped (removed from the queue and map). - pub(crate) fn push_id_into_queue(&mut self, priority_id: TransactionPriorityId) { + /// Returns `true` if a packet was dropped due to capacity limits. + pub(crate) fn push_id_into_queue(&mut self, priority_id: TransactionPriorityId) -> bool { if self.remaining_queue_capacity() == 0 { let popped_id = self.priority_queue.push_pop_min(priority_id); self.remove_by_id(&popped_id.id); + true } else { self.priority_queue.push(priority_id); + false } } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 03b3e583326a71..840a2cf860239c 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -1014,7 +1014,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10); - let current_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let current_bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let simple_transactions: Vec = (0..256) .map(|_id| { diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 84242b44c6433a..e6d12d1b08b917 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -263,6 +263,20 @@ mod tests { }, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + #[test] fn test_get_highest_super_majority_root() { assert_eq!(get_highest_super_majority_root(vec![], 10), 0); @@ -508,14 +522,18 @@ mod tests { vec![100; 1], ); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank0); + let (_bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Fill bank_forks with banks with votes landing in the next slot // Create enough banks such that vote account will root slots 0 and 1 for x in 0..33 { let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + x + 1, + ); let vote = vote_transaction::new_vote_transaction( vec![x], previous_bank.hash(), @@ -526,7 +544,6 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.write().unwrap().insert(bank); } let working_bank = bank_forks.read().unwrap().working_bank(); @@ -543,7 +560,12 @@ mod tests { // Add an additional bank/vote that will root slot 2 let bank33 = bank_forks.read().unwrap().get(33).unwrap(); - let bank34 = Bank::new_from_parent(bank33.clone(), &Pubkey::default(), 34); + let bank34 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank33.clone(), + &Pubkey::default(), + 34, + ); let vote33 = vote_transaction::new_vote_transaction( vec![33], bank33.hash(), @@ -554,7 +576,6 @@ mod tests { None, ); bank34.process_transaction(&vote33).unwrap(); - bank_forks.write().unwrap().insert(bank34); let working_bank = bank_forks.read().unwrap().working_bank(); let root = get_vote_account_root_slot( @@ -587,8 +608,12 @@ mod tests { // Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote // account's root (and thus the highest_super_majority_root) rolls back to slot 1 let bank33 = bank_forks.read().unwrap().get(33).unwrap(); - let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35); - bank_forks.write().unwrap().insert(bank35); + let _bank35 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank33, + &Pubkey::default(), + 35, + ); let working_bank = bank_forks.read().unwrap().working_bank(); let ancestors = working_bank.status_cache_ancestors(); @@ -613,7 +638,12 @@ mod tests { // continues normally for x in 35..=37 { let previous_bank = bank_forks.read().unwrap().get(x).unwrap(); - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), x + 1); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + x + 1, + ); let vote = vote_transaction::new_vote_transaction( vec![x], previous_bank.hash(), @@ -624,7 +654,6 @@ mod tests { None, ); bank.process_transaction(&vote).unwrap(); - bank_forks.write().unwrap().insert(bank); } let working_bank = bank_forks.read().unwrap().working_bank(); diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 4b58ee78b99da7..8afebae2ba68e9 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -16,6 +16,7 @@ use { }, std::{ borrow::Borrow, + cmp::Ordering, collections::{ btree_set::Iter, hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet, VecDeque, }, @@ -94,10 +95,16 @@ struct ForkInfo { // Amount of stake that has voted for this slot and the subtree // rooted at this slot stake_voted_subtree: ForkWeight, + // Tree height for the subtree rooted at this slot + height: usize, // Best slot in the subtree rooted at this slot, does not // have to be a direct child in `children`. This is the slot whose subtree // is the heaviest. best_slot: SlotHashKey, + // Deepest slot in the subtree rooted at this slot. This is the slot + // with the greatest tree height. This metric does not discriminate invalid + // forks, unlike `best_slot` + deepest_slot: SlotHashKey, parent: Option, children: BTreeSet, // The latest ancestor of this node that has been marked invalid. If the slot @@ -285,16 +292,30 @@ impl HeaviestSubtreeForkChoice { .map(|fork_info| fork_info.best_slot) } + pub fn deepest_slot(&self, key: &SlotHashKey) -> Option { + self.fork_infos + .get(key) + .map(|fork_info| fork_info.deepest_slot) + } + pub fn best_overall_slot(&self) -> SlotHashKey { self.best_slot(&self.tree_root).unwrap() } + pub fn deepest_overall_slot(&self) -> SlotHashKey { + self.deepest_slot(&self.tree_root).unwrap() + } + pub fn stake_voted_subtree(&self, key: &SlotHashKey) -> Option { self.fork_infos .get(key) .map(|fork_info| fork_info.stake_voted_subtree) } + pub fn height(&self, key: &SlotHashKey) -> Option { + self.fork_infos.get(key).map(|fork_info| fork_info.height) + } + pub fn tree_root(&self) -> SlotHashKey { self.tree_root } @@ -404,8 +425,10 @@ impl HeaviestSubtreeForkChoice { let root_parent_info = ForkInfo { stake_voted_at: 0, stake_voted_subtree: root_info.stake_voted_subtree, - // The `best_slot` does not change + height: root_info.height + 1, + // The `best_slot` and `deepest_slot` do not change best_slot: root_info.best_slot, + deepest_slot: root_info.deepest_slot, children: BTreeSet::from([self.tree_root]), parent: None, latest_invalid_ancestor: None, @@ -435,8 +458,10 @@ impl HeaviestSubtreeForkChoice { .or_insert(ForkInfo { stake_voted_at: 0, stake_voted_subtree: 0, - // The `best_slot` of a leaf is itself + height: 1, + // The `best_slot` and `deepest_slot` of a leaf is itself best_slot: slot_hash_key, + deepest_slot: slot_hash_key, children: BTreeSet::new(), parent, latest_invalid_ancestor: parent_latest_invalid_ancestor, @@ -459,8 +484,8 @@ impl HeaviestSubtreeForkChoice { .insert(slot_hash_key); // Propagate leaf up the tree to any ancestors who considered the previous leaf - // the `best_slot` - self.propagate_new_leaf(&slot_hash_key, &parent) + // the `best_slot`, as well as any deepest slot info + self.propagate_new_leaf(&slot_hash_key, &parent); } // Returns true if the given `maybe_best_child` is the heaviest among the children @@ -492,6 +517,41 @@ impl HeaviestSubtreeForkChoice { true } + // Returns true if the given `maybe_deepest_child` is the deepest among the children + // of the parent. Breaks ties by stake, then slot # (lower is heavier). + fn is_deepest_child(&self, maybe_deepest_child: &SlotHashKey) -> bool { + let maybe_deepest_child_weight = self.stake_voted_subtree(maybe_deepest_child).unwrap(); + let maybe_deepest_child_height = self.height(maybe_deepest_child).unwrap(); + let parent = self.parent(maybe_deepest_child); + // If there's no parent, this must be the root + if parent.is_none() { + return true; + } + for child in self.children(&parent.unwrap()).unwrap() { + let child_height = self + .height(child) + .expect("child must exist in `self.fork_infos`"); + let child_weight = self + .stake_voted_subtree(child) + .expect("child must exist in `self.fork_infos`"); + + match ( + child_height.cmp(&maybe_deepest_child_height), + child_weight.cmp(&maybe_deepest_child_weight), + child.cmp(maybe_deepest_child), + ) { + (Ordering::Greater, _, _) => return false, + // Tiebreak by stake + (Ordering::Equal, Ordering::Greater, _) => return false, + // Tiebreak by slot # + (Ordering::Equal, Ordering::Equal, Ordering::Less) => return false, + _ => (), + } + } + + true + } + pub fn all_slots_stake_voted_subtree(&self) -> impl Iterator { self.fork_infos .iter() @@ -509,29 +569,35 @@ impl HeaviestSubtreeForkChoice { /// Returns the subtree originating from `slot_hash_key` pub fn split_off(&mut self, slot_hash_key: &SlotHashKey) -> Self { assert_ne!(self.tree_root, *slot_hash_key); - let mut split_tree_root = { + let (mut split_tree_root, parent) = { let node_to_split_at = self .fork_infos .get_mut(slot_hash_key) .expect("Slot hash key must exist in tree"); - let split_tree_fork_info = node_to_split_at.clone(); - // Remove stake to be aggregated up the tree - node_to_split_at.stake_voted_subtree = 0; - node_to_split_at.stake_voted_at = 0; - // Mark this node as invalid so that it cannot be chosen as best child - node_to_split_at.latest_invalid_ancestor = Some(slot_hash_key.0); - split_tree_fork_info + ( + node_to_split_at.clone(), + node_to_split_at + .parent + .expect("Split node is not tree root"), + ) }; let mut update_operations: UpdateOperations = BTreeMap::new(); - // Aggregate up to the root + // Insert aggregate operations up to the root self.insert_aggregate_operations(&mut update_operations, *slot_hash_key); + // Remove child link so that this slot cannot be choosen as best or deepest + assert!(self + .fork_infos + .get_mut(&parent) + .expect("Parent must exist in fork_infos") + .children + .remove(slot_hash_key)); + // Aggregate self.process_update_operations(update_operations); // Remove node + all children and add to new tree let mut split_tree_fork_infos = HashMap::new(); let mut to_visit = vec![*slot_hash_key]; - while let Some(current_node) = to_visit.pop() { let current_fork_info = self .fork_infos @@ -657,6 +723,10 @@ impl HeaviestSubtreeForkChoice { }) } + /// To be called when `slot_hash_key` has been added to `self.fork_infos`, before any + /// aggregate update operations have taken place. + /// + /// Will propagate update `best_slot` and `deepest_slot` to ancestors. fn propagate_new_leaf( &mut self, slot_hash_key: &SlotHashKey, @@ -665,9 +735,7 @@ impl HeaviestSubtreeForkChoice { let parent_best_slot_hash_key = self .best_slot(parent_slot_hash_key) .expect("parent must exist in self.fork_infos after its child leaf was created"); - - // If this new leaf is the direct parent's best child, then propagate - // it up the tree + // If this new leaf is the direct parent's best child, then propagate it up the tree if self.is_best_child(slot_hash_key) { let mut ancestor = Some(*parent_slot_hash_key); loop { @@ -683,6 +751,24 @@ impl HeaviestSubtreeForkChoice { ancestor = ancestor_fork_info.parent; } } + // Propagate the deepest slot up the tree + let mut ancestor = Some(*parent_slot_hash_key); + let mut current_child = *slot_hash_key; + let mut current_height = 1; + loop { + if ancestor.is_none() { + break; + } + if !self.is_deepest_child(¤t_child) { + break; + } + let ancestor_fork_info = self.fork_infos.get_mut(&ancestor.unwrap()).unwrap(); + ancestor_fork_info.deepest_slot = *slot_hash_key; + ancestor_fork_info.height = current_height + 1; + current_child = ancestor.unwrap(); + current_height = ancestor_fork_info.height; + ancestor = ancestor_fork_info.parent; + } } fn insert_aggregate_operations( @@ -757,18 +843,23 @@ impl HeaviestSubtreeForkChoice { fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) { let mut stake_voted_subtree; + let mut deepest_child_height = 0; let mut best_slot_hash_key = slot_hash_key; + let mut deepest_slot_hash_key = slot_hash_key; let mut is_duplicate_confirmed = false; if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) { stake_voted_subtree = fork_info.stake_voted_at; let mut best_child_stake_voted_subtree = 0; let mut best_child_slot_key = slot_hash_key; + let mut deepest_child_stake_voted_subtree = 0; + let mut deepest_child_slot_key = slot_hash_key; for child_key in &fork_info.children { let child_fork_info = self .fork_infos .get(child_key) .expect("Child must exist in fork_info map"); let child_stake_voted_subtree = child_fork_info.stake_voted_subtree; + let child_height = child_fork_info.height; is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed; // Child forks that are not candidates still contribute to the weight @@ -804,6 +895,28 @@ impl HeaviestSubtreeForkChoice { best_child_slot_key = *child_key; best_slot_hash_key = child_fork_info.best_slot; } + + match ( + deepest_child_slot_key == slot_hash_key, + child_height.cmp(&deepest_child_height), + child_stake_voted_subtree.cmp(&deepest_child_stake_voted_subtree), + child_key.cmp(&deepest_child_slot_key) + ) { + // First child + (true, _, _, _) | + // or deeper child + (_, Ordering::Greater, _, _) | + // or tie break by stake weight + (_, Ordering::Equal, Ordering::Greater, _) | + // or tie break by slot # + (_, Ordering::Equal, Ordering::Equal, Ordering::Less) => { + deepest_child_height = child_height; + deepest_child_stake_voted_subtree = child_stake_voted_subtree; + deepest_child_slot_key = *child_key; + deepest_slot_hash_key = child_fork_info.deepest_slot; + }, + _ => () + } } } else { return; @@ -820,7 +933,9 @@ impl HeaviestSubtreeForkChoice { fork_info.set_duplicate_confirmed(); } fork_info.stake_voted_subtree = stake_voted_subtree; + fork_info.height = deepest_child_height + 1; fork_info.best_slot = best_slot_hash_key; + fork_info.deepest_slot = deepest_slot_hash_key; } /// Mark that `valid_slot` on the fork starting at `fork_to_modify` has been marked @@ -1019,7 +1134,49 @@ impl HeaviestSubtreeForkChoice { .and_then(|last_voted_slot_hash| { match self.is_candidate(&last_voted_slot_hash) { Some(true) => self.best_slot(&last_voted_slot_hash), - Some(false) => None, + Some(false) => { + // In this case our last voted fork has been marked invalid because + // it contains a duplicate block. It is critical that we continue to + // build on it as long as there exists at least 1 non duplicate fork. + // This is because there is a chance that this fork is actually duplicate + // confirmed but not observed because there is no block containing the + // required votes. + // + // Scenario 1: + // Slot 0 - Slot 1 (90%) + // | + // - Slot 1' + // | + // - Slot 2 (10%) + // + // Imagine that 90% of validators voted for Slot 1, but because of the existence + // of Slot 1', Slot 1 is marked as invalid in fork choice. It is impossible to reach + // the required switch threshold for these validators to switch off of Slot 1 to Slot 2. + // In this case it is important for someone to build a Slot 3 off of Slot 1 that contains + // the votes for Slot 1. At this point they will see that the fork off of Slot 1 is duplicate + // confirmed, and the rest of the network can repair Slot 1, and mark it is a valid candidate + // allowing fork choice to converge. + // + // This will only occur after Slot 2 has been created, in order to resolve the following + // scenario: + // + // Scenario 2: + // Slot 0 - Slot 1 (30%) + // | + // - Slot 1' (30%) + // + // In this scenario only 60% of the network has voted before the duplicate proof for Slot 1 and 1' + // was viewed. Neither version of the slot will reach the duplicate confirmed threshold, so it is + // critical that a new fork Slot 2 from Slot 0 is created to allow the the validators on Slot 1 and + // Slot 1' to switch. Since the `best_slot` is an ancestor of the last vote (Slot 0 is ancestor of last + // vote Slot 1 or Slot 1'), we will trigger `SwitchForkDecision::FailedSwitchDuplicateRollback`, which + // will create an alternate fork off of Slot 0. Once this alternate fork is created, the `best_slot` + // will be Slot 2, at which point we will be in Scenario 1 and continue building off of Slot 1 or Slot 1'. + // + // For more details see the case for + // `SwitchForkDecision::FailedSwitchDuplicateRollback` in `ReplayStage::select_vote_and_reset_forks`. + self.deepest_slot(&last_voted_slot_hash) + } None => { if !tower.is_stray_last_vote() { // Unless last vote is stray and stale, self.is_candidate(last_voted_slot_hash) must return @@ -1126,9 +1283,39 @@ impl ForkChoice for HeaviestSubtreeForkChoice { .get_with_checked_hash(self.best_overall_slot()) .unwrap(), self.heaviest_slot_on_same_voted_fork(tower) - .map(|slot_hash| { - // BankForks should only contain one valid version of this slot - r_bank_forks.get_with_checked_hash(slot_hash).unwrap() + .and_then(|slot_hash| { + #[allow(clippy::manual_filter)] + if let Some(bank) = r_bank_forks.get(slot_hash.0) { + if bank.hash() != slot_hash.1 { + // It is possible that our last vote was for an invalid fork + // and we have repaired and replayed the correct version of the fork. + // In this case the hash for the heaviest bank on our voted fork + // will no longer be matching what we have replayed. + // + // Because we have dumped and repaired a new version, it is impossible + // for our last voted fork to become duplicate confirmed as the state + // machine will never dump and repair a block that has not been observed + // as duplicate confirmed. Therefore it is safe to never build on this + // invalid fork. + None + } else { + Some(bank) + } + } else { + // It is possible that our last vote was for an invalid fork + // and we are in the middle of dumping and repairing such fork. + // In that case, the `heaviest_slot_on_same_voted_fork` has a chance to + // be for a slot that we currently do not have in our bank forks, so we + // return None. + // + // We are guarenteed that we will eventually repair a duplicate confirmed version + // of this slot because the state machine will never dump a slot unless it has + // observed a duplicate confirmed version of the slot. + // + // Therefore there is no chance that our last voted fork will ever become + // duplicate confirmed, so it is safe to never build on it. + None + } }), ) } @@ -1323,6 +1510,14 @@ mod test { .0, 5 ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(2, Hash::default())) + .unwrap() + .0, + 5 + ); + assert!(heaviest_subtree_fork_choice .parent(&(2, Hash::default())) .is_none()); @@ -1516,7 +1711,7 @@ mod test { // Vote for slot 2 heaviest_subtree_fork_choice.add_votes( - [(vote_pubkeys[0], (1, Hash::default()))].iter(), + [(vote_pubkeys[0], (2, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -1671,6 +1866,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + _, ) = setup_duplicate_forks(); // Add a child to one of the duplicates @@ -1715,7 +1911,7 @@ mod test { fn test_propagate_new_leaf() { let mut heaviest_subtree_fork_choice = setup_forks(); - // Add a leaf 10, it should be the best choice + // Add a leaf 10, it should be the best and deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((10, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1723,9 +1919,10 @@ mod test { .chain(std::iter::once((10, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 10); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 10); } - // Add a smaller leaf 9, it should be the best choice + // Add a smaller leaf 9, it should be the best and deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((9, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1733,9 +1930,10 @@ mod test { .chain(std::iter::once((9, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 9); } - // Add a higher leaf 11, should not change the best choice + // Add a higher leaf 11, should not change the best or deepest choice heaviest_subtree_fork_choice .add_new_leaf_slot((11, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1743,6 +1941,7 @@ mod test { .chain(std::iter::once((9, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 9); } // Add a vote for the other branch at slot 3. @@ -1760,6 +1959,8 @@ mod test { // Because slot 1 now sees the child branch at slot 3 has non-zero // weight, adding smaller leaf slot 8 in the other child branch at slot 2 // should not propagate past slot 1 + // Similarly, both forks have the same tree height so we should tie break by + // stake weight choosing 6 as the deepest slot when possible. heaviest_subtree_fork_choice .add_new_leaf_slot((8, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1771,6 +1972,10 @@ mod test { heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, best_slot ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, + best_slot + ); } // Add vote for slot 8, should now be the best slot (has same weight @@ -1781,9 +1986,12 @@ mod test { bank.epoch_schedule(), ); assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 8); + // Deepest overall is now 8 as well + assert_eq!(heaviest_subtree_fork_choice.deepest_overall_slot().0, 8); // Because slot 4 now sees the child leaf 8 has non-zero // weight, adding smaller leaf slots should not propagate past slot 4 + // Similarly by tiebreak, 8 should be the deepest slot heaviest_subtree_fork_choice .add_new_leaf_slot((7, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice @@ -1791,9 +1999,10 @@ mod test { .chain(std::iter::once((8, Hash::default()))); for a in ancestors { assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 8); + assert_eq!(heaviest_subtree_fork_choice.deepest_slot(&a).unwrap().0, 8); } - // All the leaves should think they are their own best choice + // All the leaves should think they are their own best and deepest choice for leaf in [8, 9, 10, 11].iter() { assert_eq!( heaviest_subtree_fork_choice @@ -1802,6 +2011,13 @@ mod test { .0, *leaf ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(*leaf, Hash::default())) + .unwrap() + .0, + *leaf + ); } } @@ -1891,6 +2107,28 @@ mod test { .0, 6 ); + // The deepest leaf only tiebreaks by slot # when tree heights are equal + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(1, Hash::default())) + .unwrap() + .0, + 6 + ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(2, Hash::default())) + .unwrap() + .0, + 4 + ); + assert_eq!( + heaviest_subtree_fork_choice + .deepest_slot(&(3, Hash::default())) + .unwrap() + .0, + 6 + ); // Update the weights that have voted *exactly* at each slot, the // branch containing slots {5, 6} has weight 11, so should be heavier @@ -1917,7 +2155,9 @@ mod test { // The best path is now 0 -> 1 -> 3 -> 5 -> 6, so leaf 6 // should be the best choice + // It is still the deepest choice assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); + assert_eq!(heaviest_subtree_fork_choice.deepest_overall_slot().0, 6); // Verify `stake_voted_at` for slot in 0..=6 { @@ -2003,6 +2243,15 @@ mod test { } }; + let expected_deepest_slot = + |slot, _heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| -> Slot { + if [2, 4].contains(&slot) { + 4 + } else { + 6 + } + }; + check_process_update_correctness( &mut heaviest_subtree_fork_choice, &pubkey_votes, @@ -2010,6 +2259,7 @@ mod test { &bank, stake, expected_best_slot, + expected_deepest_slot, ); // Everyone makes newer votes @@ -2044,6 +2294,7 @@ mod test { &bank, stake, expected_best_slot, + expected_deepest_slot, ); } @@ -2255,8 +2506,12 @@ mod test { #[test] fn test_add_votes_duplicate_tie() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2278,16 +2533,23 @@ mod test { ), expected_best_slot_hash ); - assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + + // we tie break the duplicate_leaves_descended_from_6 and pick the smaller one + // for deepest + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .deepest_slot(&(3, Hash::default())) .unwrap(), - stake + expected_deepest_slot_hash + ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash ); // Adding the same vote again will not do anything @@ -2314,6 +2576,10 @@ mod test { .unwrap(), stake ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // All common ancestors should have subtree voted stake == 2 * stake, but direct // voted stake == 0 @@ -2338,8 +2604,12 @@ mod test { #[test] fn test_add_votes_duplicate_greater_hash_ignored() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2361,6 +2631,13 @@ mod test { ), expected_best_slot_hash ); + // we tie break the duplicate_leaves_descended_from_6 and pick the smaller one + // for deepest + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // Adding a duplicate vote for a validator, for another a greater bank hash, // should be ignored as we prioritize the smaller bank hash. Thus nothing // should change. @@ -2374,6 +2651,10 @@ mod test { ), expected_best_slot_hash ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // Still only has one validator voting on it assert_eq!( @@ -2411,8 +2692,12 @@ mod test { #[test] fn test_add_votes_duplicate_smaller_hash_prioritized() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = - setup_duplicate_forks(); + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + _, + duplicate_leaves_descended_from_6, + ) = setup_duplicate_forks(); let stake = 10; let num_validators = 2; let (bank, vote_pubkeys) = @@ -2434,6 +2719,11 @@ mod test { ), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // BEFORE, both validators voting on this leaf assert_eq!( @@ -2477,6 +2767,10 @@ mod test { .unwrap(), stake, ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); // The other leaf now has one of the votes assert_eq!( @@ -2515,7 +2809,7 @@ mod test { #[test] fn test_add_votes_duplicate_then_outdated() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _, _) = setup_duplicate_forks(); let stake = 10; let num_validators = 3; @@ -2641,10 +2935,11 @@ mod test { #[test] fn test_add_votes_duplicate_zero_stake() { - let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _): ( + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _, _): ( HeaviestSubtreeForkChoice, Vec, Vec, + Vec, ) = setup_duplicate_forks(); let stake = 0; @@ -3094,6 +3389,10 @@ mod test { ), (expected_best_slot, Hash::default()), ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + (expected_best_slot, Hash::default()), + ); // Simulate a vote on slot 5 let last_voted_slot_hash = (5, Hash::default()); @@ -3124,10 +3423,10 @@ mod test { assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3); // After marking the last vote in the tower as invalid, `heaviest_slot_on_same_voted_fork()` - // should disregard all descendants of that invalid vote + // should instead use the deepest slot metric, which is still 6 assert_eq!( heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower), - None + Some((6, Hash::default())) ); // Adding another descendant to the invalid candidate won't @@ -3149,10 +3448,14 @@ mod test { (invalid_slot_ancestor, Hash::default()), ); - // This shouldn't update the `heaviest_slot_on_same_voted_fork` either - assert!(heaviest_subtree_fork_choice - .heaviest_slot_on_same_voted_fork(&tower) - .is_none()); + // However this should update the `heaviest_slot_on_same_voted_fork` since we use + // deepest metric for invalid forks + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + new_leaf7, + ); // Adding a descendant to the ancestor of the invalid candidate *should* update // the best slot though, since the ancestor is on the heaviest fork @@ -3162,9 +3465,12 @@ mod test { assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), new_leaf8,); // Should not update the `heaviest_slot_on_same_voted_fork` because the new leaf // is not descended from the last vote - assert!(heaviest_subtree_fork_choice - .heaviest_slot_on_same_voted_fork(&tower) - .is_none()); + assert_eq!( + heaviest_subtree_fork_choice + .heaviest_slot_on_same_voted_fork(&tower) + .unwrap(), + new_leaf7 + ); // If we mark slot a descendant of `invalid_candidate` as valid, then that // should also mark `invalid_candidate` as valid, and the best slot should @@ -3198,6 +3504,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(3, stake); @@ -3216,6 +3523,11 @@ mod test { ), duplicate_leaves_descended_from_4[0] ); + // Deepest slot should be the smallest leaf descended from 6 + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + duplicate_leaves_descended_from_6[0], + ); // If we mark slot 4 as invalid, the ancestor 2 should be the heaviest, not // the other branch at slot 5 @@ -3225,6 +3537,11 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), (2, Hash::default()) ); + // Samallest duplicate from 6 should still be deepest + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + duplicate_leaves_descended_from_6[0], + ); ( heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, @@ -3716,12 +4033,84 @@ mod test { assert_eq!(4, tree.best_overall_slot().0); } + #[test] + fn test_split_off_on_deepest_path() { + let mut heaviest_subtree_fork_choice = setup_forks(); + + assert_eq!(6, heaviest_subtree_fork_choice.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(6, Hash::default())); + assert_eq!(4, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(6, tree.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(3, Hash::default())); + assert_eq!(4, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(5, tree.deepest_overall_slot().0); + + let tree = heaviest_subtree_fork_choice.split_off(&(1, Hash::default())); + assert_eq!(0, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!(4, tree.deepest_overall_slot().0); + } + + #[test] + fn test_split_off_on_deepest_path_complicated() { + let mut heaviest_subtree_fork_choice = setup_complicated_forks(); + assert_eq!(23, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!( + 9, + heaviest_subtree_fork_choice + .height(&(0, Hash::default())) + .unwrap() + ); + assert_eq!( + 3, + heaviest_subtree_fork_choice + .height(&(9, Hash::default())) + .unwrap() + ); + assert_eq!( + 7, + heaviest_subtree_fork_choice + .height(&(12, Hash::default())) + .unwrap() + ); + + // Take out the 13 branch, 34 should now be deepest + let tree = heaviest_subtree_fork_choice.split_off(&(13, Hash::default())); + assert_eq!(34, heaviest_subtree_fork_choice.deepest_overall_slot().0); + assert_eq!( + 5, + heaviest_subtree_fork_choice + .height(&(0, Hash::default())) + .unwrap() + ); + assert_eq!( + 3, + heaviest_subtree_fork_choice + .height(&(9, Hash::default())) + .unwrap() + ); + assert_eq!( + 1, + heaviest_subtree_fork_choice + .height(&(12, Hash::default())) + .unwrap() + ); + + // New tree should have updated heights but still think 23 is the heaviest + assert_eq!(23, tree.deepest_overall_slot().0); + assert_eq!(6, tree.height(&(13, Hash::default())).unwrap()); + assert_eq!(2, tree.height(&(18, Hash::default())).unwrap()); + assert_eq!(1, tree.height(&(25, Hash::default())).unwrap()); + } + #[test] fn test_split_off_with_dups() { let ( mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 10; @@ -3751,13 +4140,23 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); let tree = heaviest_subtree_fork_choice.split_off(&expected_best_slot_hash); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), duplicate_leaves_descended_from_4[1] ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); assert_eq!(tree.best_overall_slot(), expected_best_slot_hash); + assert_eq!(tree.deepest_overall_slot(), expected_best_slot_hash); } #[test] @@ -3766,6 +4165,7 @@ mod test { mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) = setup_duplicate_forks(); let stake = 10; @@ -3795,13 +4195,25 @@ mod test { heaviest_subtree_fork_choice.best_overall_slot(), expected_best_slot_hash ); + let expected_deepest_slot_hash = duplicate_leaves_descended_from_6[0]; + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); + let tree = heaviest_subtree_fork_choice.split_off(&(2, Hash::default())); assert_eq!( heaviest_subtree_fork_choice.best_overall_slot(), duplicate_leaves_descended_from_5[0] ); + assert_eq!( + heaviest_subtree_fork_choice.deepest_overall_slot(), + expected_deepest_slot_hash + ); + assert_eq!(tree.best_overall_slot(), expected_best_slot_hash); + assert_eq!(tree.deepest_overall_slot(), expected_best_slot_hash,); } #[test] @@ -4031,6 +4443,7 @@ mod test { HeaviestSubtreeForkChoice, Vec, Vec, + Vec, ) { /* Build fork structure: @@ -4044,6 +4457,8 @@ mod test { / \ slot 5 slot 10 slot 10 / | \ slot 6 slot 10 slot 10 + / \ + slot 10 slot 10 */ let mut heaviest_subtree_fork_choice = setup_forks(); @@ -4056,8 +4471,13 @@ mod test { std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) .take(2) .collect::>(); + let mut duplicate_leaves_descended_from_6 = + std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) + .take(2) + .collect::>(); duplicate_leaves_descended_from_4.sort(); duplicate_leaves_descended_from_5.sort(); + duplicate_leaves_descended_from_6.sort(); // Add versions of leaf 10, some with different ancestors, some with the same // ancestors @@ -4069,6 +4489,10 @@ mod test { heaviest_subtree_fork_choice .add_new_leaf_slot(*duplicate_leaf, Some((5, Hash::default()))); } + for duplicate_leaf in &duplicate_leaves_descended_from_6 { + heaviest_subtree_fork_choice + .add_new_leaf_slot(*duplicate_leaf, Some((6, Hash::default()))); + } let mut dup_children = (&heaviest_subtree_fork_choice) .children(&(4, Hash::default())) @@ -4085,23 +4509,34 @@ mod test { .collect(); dup_children.sort(); assert_eq!(dup_children, duplicate_leaves_descended_from_5); + let mut dup_children: Vec<_> = (&heaviest_subtree_fork_choice) + .children(&(6, Hash::default())) + .unwrap() + .copied() + .filter(|(slot, _)| *slot == duplicate_slot) + .collect(); + dup_children.sort(); + assert_eq!(dup_children, duplicate_leaves_descended_from_6); ( heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, duplicate_leaves_descended_from_5, + duplicate_leaves_descended_from_6, ) } - fn check_process_update_correctness( + fn check_process_update_correctness( heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, pubkey_votes: &[(Pubkey, SlotHashKey)], slots_range: Range, bank: &Bank, stake: u64, mut expected_best_slot: F, + mut expected_deepest_slot: G, ) where F: FnMut(Slot, &HeaviestSubtreeForkChoice) -> Slot, + G: FnMut(Slot, &HeaviestSubtreeForkChoice) -> Slot, { let unique_votes: HashSet = pubkey_votes.iter().map(|(_, (slot, _))| *slot).collect(); let vote_ancestors: HashMap> = unique_votes @@ -4171,6 +4606,13 @@ mod test { .unwrap() .0 ); + assert_eq!( + expected_deepest_slot(slot, heaviest_subtree_fork_choice), + heaviest_subtree_fork_choice + .deepest_slot(&(slot, Hash::default())) + .unwrap() + .0 + ); } } } diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 978d0c074c3904..e980ddb46b4745 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -1347,7 +1347,12 @@ mod test { fn new(bank_forks: Arc>) -> Self { let ancestor_hashes_request_statuses = Arc::new(DashMap::new()); let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()); - let epoch_schedule = *bank_forks.read().unwrap().root_bank().epoch_schedule(); + let epoch_schedule = bank_forks + .read() + .unwrap() + .root_bank() + .epoch_schedule() + .clone(); let keypair = Keypair::new(); let requester_cluster_info = Arc::new(ClusterInfo::new( Node::new_localhost_with_pubkey(&keypair.pubkey()).info, diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index c6f2e00df53a26..89f9de78491101 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -408,11 +408,16 @@ async fn handle_connection( )); match futures::future::try_join(send_requests_task, recv_requests_task).await { Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), - Ok(((), Err(err))) => { - debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); - record_error(&err, &stats); + Ok(out) => { + if let (Err(ref err), _) = out { + debug!("send_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(err, &stats); + } + if let (_, Err(ref err)) = out { + debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(err, &stats); + } } - Ok(((), Ok(()))) => (), } drop_connection(remote_pubkey, &connection, &cache).await; if let Entry::Occupied(entry) = router.write().await.entry(remote_address) { @@ -513,15 +518,27 @@ async fn send_requests_task( connection: Connection, mut receiver: AsyncReceiver, stats: Arc, -) { - while let Some(request) = receiver.recv().await { - tokio::task::spawn(send_request_task( - endpoint.clone(), - remote_address, - connection.clone(), - request, - stats.clone(), - )); +) -> Result<(), Error> { + tokio::pin! { + let connection_closed = connection.closed(); + } + loop { + tokio::select! { + biased; + request = receiver.recv() => { + match request { + None => return Ok(()), + Some(request) => tokio::task::spawn(send_request_task( + endpoint.clone(), + remote_address, + connection.clone(), + request, + stats.clone(), + )), + }; + } + err = &mut connection_closed => return Err(Error::from(err)), + } } } diff --git a/core/src/repair/repair_weight.rs b/core/src/repair/repair_weight.rs index 6838021d7574c7..430a02850b30c2 100644 --- a/core/src/repair/repair_weight.rs +++ b/core/src/repair/repair_weight.rs @@ -2553,7 +2553,7 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(10, stake); let mut epoch_stakes = bank.epoch_stakes_map().clone(); - let mut epoch_schedule = *bank.epoch_schedule(); + let mut epoch_schedule = bank.epoch_schedule().clone(); // Simulate epoch boundary at slot 10, where half of the stake deactivates // Additional epoch boundary at slot 20, where 30% of the stake reactivates diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 2bfb72da52d4c0..8d9cae1a137868 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1349,14 +1349,23 @@ impl ReplayStage { ); } - // Should not dump slots for which we were the leader if Some(*my_pubkey) == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) { - panic!("We are attempting to dump a block that we produced. \ - This indicates that we are producing duplicate blocks, \ - or that there is a bug in our runtime/replay code which \ - causes us to compute different bank hashes than the rest of the cluster. \ - We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); + if let Some(bank) = bank_forks.read().unwrap().get(*duplicate_slot) { + bank_hash_details::write_bank_hash_details_file(&bank) + .map_err(|err| { + warn!("Unable to write bank hash details file: {err}"); + }) + .ok(); + } else { + warn!("Unable to get bank for slot {duplicate_slot} from bank forks \ + while attempting to write bank hash details file"); + } + panic!("We are attempting to dump a block that we produced. \ + This indicates that we are producing duplicate blocks, \ + or that there is a bug in our runtime/replay code which \ + causes us to compute different bank hashes than the rest of the cluster. \ + We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); } let attempt_no = purge_repair_slot_counter @@ -1507,7 +1516,11 @@ impl ReplayStage { let bank = w_bank_forks .remove(*slot) .expect("BankForks should not have been purged yet"); - let _ = bank_hash_details::write_bank_hash_details_file(&bank); + bank_hash_details::write_bank_hash_details_file(&bank) + .map_err(|err| { + warn!("Unable to write bank hash details file: {err}"); + }) + .ok(); ((*slot, bank.bank_id()), bank) }) .unzip() @@ -4147,6 +4160,20 @@ pub(crate) mod tests { trees::{tr, Tree}, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + #[test] fn test_is_partition_detected() { let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1, None::); @@ -4920,7 +4947,12 @@ pub(crate) mod tests { for i in 1..=3 { let prev_bank = bank_forks.read().unwrap().get(i - 1).unwrap(); let slot = prev_bank.slot() + 1; - let bank = Bank::new_from_parent(prev_bank, &Pubkey::default(), slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + prev_bank, + &Pubkey::default(), + slot, + ); let _res = bank.transfer( 10, &genesis_config_info.mint_keypair, @@ -4929,7 +4961,7 @@ pub(crate) mod tests { for _ in 0..genesis_config.ticks_per_slot { bank.register_default_tick_for_test(); } - bank_forks.write().unwrap().insert(bank); + let arc_bank = bank_forks.read().unwrap().get(i).unwrap(); leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); ReplayStage::update_commitment_cache( @@ -5006,7 +5038,7 @@ pub(crate) mod tests { let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer( bank0.get_minimum_balance_for_rent_exemption(0), @@ -5015,7 +5047,11 @@ pub(crate) mod tests { ) .unwrap(); - let bank1 = Arc::new(Bank::new_from_parent(bank0, &Pubkey::default(), 1)); + let bank1 = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank0, &Pubkey::default(), 1)) + .clone_without_scheduler(); let slot = bank1.slot(); let (entries, test_signatures) = create_test_transaction_entries( @@ -5077,10 +5113,6 @@ pub(crate) mod tests { None, ); - let bank1 = Bank::new_from_parent(bank0.clone(), &my_node_pubkey, 1); - bank1.process_transaction(&vote_tx).unwrap(); - bank1.freeze(); - // Test confirmations let ancestors = bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = bank_forks @@ -5121,8 +5153,16 @@ pub(crate) mod tests { assert!(confirmed_forks.is_empty()); } + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &my_node_pubkey, + 1, + ); + bank1.process_transaction(&vote_tx).unwrap(); + bank1.freeze(); + // Insert the bank that contains a vote for slot 0, which confirms slot 0 - bank_forks.write().unwrap().insert(bank1); progress.insert( 1, ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0), @@ -6512,8 +6552,9 @@ pub(crate) mod tests { ); // 4 should be the heaviest slot, but should not be votable - // because of lockout. 5 is no longer valid due to it being a duplicate. - let (vote_fork, reset_fork, _) = run_compute_and_select_forks( + // because of lockout. 5 is no longer valid due to it being a duplicate, however we still + // reset onto 5. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6522,7 +6563,41 @@ pub(crate) mod tests { None, ); assert!(vote_fork.is_none()); - assert!(reset_fork.is_none()); + assert_eq!(reset_fork, Some(5)); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); + + // Continue building on 5 + let forks = tr(5) / (tr(6) / (tr(7) / (tr(8) / (tr(9)))) / tr(10)); + vote_simulator.bank_forks = bank_forks; + vote_simulator.progress = progress; + vote_simulator.fill_bank_forks(forks, &HashMap::>::new(), true); + let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); + // 4 is still the heaviest slot, but not votable beecause of lockout. + // 9 is the deepest slot from our last voted fork (5), so it is what we should + // reset to. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + None, + ); + assert!(vote_fork.is_none()); + assert_eq!(reset_fork, Some(9)); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); // If slot 5 is marked as confirmed, it becomes the heaviest bank on same slot again let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); @@ -6544,12 +6619,16 @@ pub(crate) mod tests { &mut purge_repair_slot_counter, SlotStateUpdate::DuplicateConfirmed(duplicate_confirmed_state), ); + // The confirmed hash is detected in `progress`, which means // it's confirmation on the replayed block. This means we have // the right version of the block, so `duplicate_slots_to_repair` // should be empty assert!(duplicate_slots_to_repair.is_empty()); - let (vote_fork, reset_fork, _) = run_compute_and_select_forks( + + // We should still reset to slot 9 as it's the heaviest on the now valid + // fork. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( &bank_forks, &mut progress, &mut tower, @@ -6557,9 +6636,40 @@ pub(crate) mod tests { &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); + assert!(vote_fork.is_none()); + assert_eq!(reset_fork.unwrap(), 9); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); + + // Resetting our forks back to how it was should allow us to reset to our + // last vote which was previously marked as invalid and now duplicate confirmed + let bank6_hash = bank_forks.read().unwrap().bank_hash(6).unwrap(); + let _ = vote_simulator + .heaviest_subtree_fork_choice + .split_off(&(6, bank6_hash)); // Should now pick 5 as the heaviest fork from last vote again. + let (vote_fork, reset_fork, heaviest_fork_failures) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + None, + ); assert!(vote_fork.is_none()); assert_eq!(reset_fork.unwrap(), 5); + assert_eq!( + heaviest_fork_failures, + vec![ + HeaviestForkFailures::FailedSwitchThreshold(4, 0, 10000), + HeaviestForkFailures::LockedOut(4) + ] + ); } #[test] @@ -7238,7 +7348,12 @@ pub(crate) mod tests { let (voting_sender, voting_receiver) = unbounded(); // Simulate landing a vote for slot 0 landing in slot 1 - let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); bank1.fill_bank_with_ticks_for_tests(); tower.record_bank_vote(&bank0); ReplayStage::push_vote( @@ -7279,7 +7394,12 @@ pub(crate) mod tests { // Trying to refresh the vote for bank 0 in bank 1 or bank 2 won't succeed because // the last vote has landed already - let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 2)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 2, + ); bank2.fill_bank_with_ticks_for_tests(); bank2.freeze(); for refresh_bank in &[&bank1, &bank2] { @@ -7373,8 +7493,12 @@ pub(crate) mod tests { let mut parent_bank = bank2.clone(); for _ in 0..MAX_PROCESSING_AGE { let slot = parent_bank.slot() + 1; - parent_bank = - Arc::new(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)); + parent_bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + parent_bank, + &Pubkey::default(), + slot, + ); parent_bank.fill_bank_with_ticks_for_tests(); parent_bank.freeze(); } @@ -7429,11 +7553,12 @@ pub(crate) mod tests { // Processing the vote transaction should be valid let expired_bank_child_slot = expired_bank.slot() + 1; - let expired_bank_child = Arc::new(Bank::new_from_parent( + let expired_bank_child = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), expired_bank.clone(), &Pubkey::default(), expired_bank_child_slot, - )); + ); expired_bank_child.process_transaction(vote_tx).unwrap(); let vote_account = expired_bank_child .get_vote_account(&my_vote_pubkey) @@ -7449,11 +7574,12 @@ pub(crate) mod tests { // 1) The vote for slot 1 hasn't landed // 2) The latest refresh vote transaction's recent blockhash (the sibling's hash) doesn't exist // This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet - let expired_bank_sibling = Arc::new(Bank::new_from_parent( + let expired_bank_sibling = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank2, &Pubkey::default(), expired_bank_child.slot() + 1, - )); + ); expired_bank_sibling.fill_bank_with_ticks_for_tests(); expired_bank_sibling.freeze(); // Set the last refresh to now, shouldn't refresh because the last refresh just happened. @@ -7541,7 +7667,12 @@ pub(crate) mod tests { parent_bank.last_blockhash() ); assert_eq!(tower.last_voted_slot().unwrap(), parent_bank.slot()); - let bank = Bank::new_from_parent(parent_bank, &Pubkey::default(), my_slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks, + parent_bank, + &Pubkey::default(), + my_slot, + ); bank.fill_bank_with_ticks_for_tests(); if make_it_landing { bank.process_transaction(vote_tx).unwrap(); @@ -7557,7 +7688,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(bank); bank_forks.read().unwrap().get(my_slot).unwrap() } @@ -7598,8 +7728,12 @@ pub(crate) mod tests { // Add a new fork starting from 0 with bigger slot number, we assume it has a bigger // weight, but we cannot switch because of lockout. let other_fork_slot = 1; - let other_fork_bank = - Bank::new_from_parent(bank0.clone(), &Pubkey::default(), other_fork_slot); + let other_fork_bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + other_fork_slot, + ); other_fork_bank.fill_bank_with_ticks_for_tests(); other_fork_bank.freeze(); progress.entry(other_fork_slot).or_insert_with(|| { @@ -7612,7 +7746,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(other_fork_bank); let (voting_sender, voting_receiver) = unbounded(); let mut cursor = Cursor::default(); @@ -7658,7 +7791,12 @@ pub(crate) mod tests { let last_voted_slot = tower.last_voted_slot().unwrap(); while new_bank.is_in_slot_hashes_history(&last_voted_slot) { let new_slot = new_bank.slot() + 1; - let bank = Bank::new_from_parent(new_bank, &Pubkey::default(), new_slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + new_bank, + &Pubkey::default(), + new_slot, + ); bank.fill_bank_with_ticks_for_tests(); bank.freeze(); progress.entry(new_slot).or_insert_with(|| { @@ -7671,7 +7809,6 @@ pub(crate) mod tests { 0, ) }); - bank_forks.write().unwrap().insert(bank); new_bank = bank_forks.read().unwrap().get(new_slot).unwrap(); } let tip_of_voted_fork = new_bank.slot(); diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 62733953cc724f..fd72b8b8eebb3b 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -10,7 +10,9 @@ use { solana_perf::packet::{PacketBatch, PacketBatchRecycler, PacketFlags, PACKETS_PER_BATCH}, solana_runtime::bank_forks::BankForks, solana_sdk::{ - clock::DEFAULT_MS_PER_SLOT, + clock::{Slot, DEFAULT_MS_PER_SLOT}, + epoch_schedule::EpochSchedule, + feature_set::{self, FeatureSet}, packet::{Meta, PACKET_DATA_SIZE}, pubkey::Pubkey, }, @@ -50,12 +52,20 @@ impl ShredFetchStage { .as_ref() .map(|(_, cluster_info)| cluster_info.keypair().clone()); - let (mut last_root, mut last_slot, mut slots_per_epoch) = { + let ( + mut last_root, + mut slots_per_epoch, + mut feature_set, + mut epoch_schedule, + mut last_slot, + ) = { let bank_forks_r = bank_forks.read().unwrap(); let root_bank = bank_forks_r.root_bank(); ( root_bank.slot(), root_bank.get_slots_in_epoch(root_bank.epoch()), + root_bank.feature_set.clone(), + root_bank.epoch_schedule().clone(), bank_forks_r.highest_slot(), ) }; @@ -69,6 +79,8 @@ impl ShredFetchStage { last_slot = bank_forks_r.highest_slot(); bank_forks_r.root_bank() }; + feature_set = root_bank.feature_set.clone(); + epoch_schedule = root_bank.epoch_schedule().clone(); last_root = root_bank.slot(); slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); keypair = repair_context @@ -92,10 +104,19 @@ impl ShredFetchStage { // Limit shreds to 2 epochs away. let max_slot = last_slot + 2 * slots_per_epoch; + let should_drop_legacy_shreds = + |shred_slot| should_drop_legacy_shreds(shred_slot, &feature_set, &epoch_schedule); let turbine_disabled = turbine_disabled.load(Ordering::Relaxed); for packet in packet_batch.iter_mut().filter(|p| !p.meta().discard()) { if turbine_disabled - || should_discard_shred(packet, last_root, max_slot, shred_version, &mut stats) + || should_discard_shred( + packet, + last_root, + max_slot, + shred_version, + should_drop_legacy_shreds, + &mut stats, + ) { packet.meta_mut().set_discard(true); } else { @@ -373,6 +394,22 @@ pub(crate) fn receive_repair_quic_packets( } } +#[must_use] +fn should_drop_legacy_shreds( + shred_slot: Slot, + feature_set: &FeatureSet, + epoch_schedule: &EpochSchedule, +) -> bool { + match feature_set.activated_slot(&feature_set::drop_legacy_shreds::id()) { + None => false, + Some(feature_slot) => { + let feature_epoch = epoch_schedule.get_epoch(feature_slot); + let shred_epoch = epoch_schedule.get_epoch(shred_slot); + feature_epoch < shred_epoch + } + } +} + #[cfg(test)] mod tests { use { @@ -413,6 +450,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); let coding = solana_ledger::shred::Shredder::generate_coding_shreds( @@ -426,6 +464,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); } @@ -447,6 +486,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); assert_eq!(stats.index_overrun, 1); @@ -468,12 +508,18 @@ mod tests { 3, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); assert_eq!(stats.slot_out_of_range, 1); assert!(should_discard_shred( - &packet, last_root, max_slot, /*shred_version:*/ 345, &mut stats, + &packet, + last_root, + max_slot, + 345, // shred_version + |_| false, // should_drop_legacy_shreds + &mut stats, )); assert_eq!(stats.shred_version_mismatch, 1); @@ -483,6 +529,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); @@ -504,6 +551,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); @@ -515,6 +563,7 @@ mod tests { last_root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats, )); } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 3749fbf9ae3697..bec85780fc3ede 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -393,6 +393,29 @@ pub struct SystemMonitorStatsReportConfig { pub report_os_disk_stats: bool, } +#[cfg_attr(not(target_os = "linux"), allow(dead_code))] +enum InterestingLimit { + Recommend(i64), + QueryOnly, +} + +#[cfg(target_os = "linux")] +const INTERESTING_LIMITS: &[(&str, InterestingLimit)] = &[ + ("net.core.rmem_max", InterestingLimit::Recommend(134217728)), + ( + "net.core.rmem_default", + InterestingLimit::Recommend(134217728), + ), + ("net.core.wmem_max", InterestingLimit::Recommend(134217728)), + ( + "net.core.wmem_default", + InterestingLimit::Recommend(134217728), + ), + ("vm.max_map_count", InterestingLimit::Recommend(1000000)), + ("net.core.optmem_max", InterestingLimit::QueryOnly), + ("net.core.netdev_max_backlog", InterestingLimit::QueryOnly), +]; + impl SystemMonitorService { pub fn new(exit: Arc, config: SystemMonitorStatsReportConfig) -> Self { info!("Starting SystemMonitorService"); @@ -406,27 +429,8 @@ impl SystemMonitorService { Self { thread_hdl } } - #[cfg_attr(not(target_os = "linux"), allow(dead_code))] - fn linux_get_recommended_network_limits() -> HashMap<&'static str, i64> { - // Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360 - let mut recommended_limits: HashMap<&str, i64> = HashMap::default(); - recommended_limits.insert("net.core.rmem_max", 134217728); - recommended_limits.insert("net.core.rmem_default", 134217728); - recommended_limits.insert("net.core.wmem_max", 134217728); - recommended_limits.insert("net.core.wmem_default", 134217728); - recommended_limits.insert("vm.max_map_count", 1000000); - - // Additionally collect the following limits - recommended_limits.insert("net.core.optmem_max", 0); - recommended_limits.insert("net.core.netdev_max_backlog", 0); - - recommended_limits - } - #[cfg(target_os = "linux")] - fn linux_get_current_network_limits( - recommended_limits: &HashMap<&'static str, i64>, - ) -> HashMap<&'static str, i64> { + fn linux_get_current_network_limits() -> Vec<(&'static str, &'static InterestingLimit, i64)> { use sysctl::Sysctl; fn sysctl_read(name: &str) -> Result { @@ -435,47 +439,48 @@ impl SystemMonitorService { Ok(val) } - let mut current_limits: HashMap<&str, i64> = HashMap::default(); - for (key, _) in recommended_limits.iter() { - let current_val = match sysctl_read(key) { - Ok(val) => val.parse::().unwrap(), - Err(e) => { - error!("Failed to query value for {}: {}", key, e); - -1 - } - }; - current_limits.insert(key, current_val); + fn normalize_err(key: &str, error: E) -> String { + format!("Failed to query value for {}: {}", key, error) } - current_limits + INTERESTING_LIMITS + .iter() + .map(|(key, interesting_limit)| { + let current_value = sysctl_read(key) + .map_err(|e| normalize_err(key, e)) + .and_then(|val| val.parse::().map_err(|e| normalize_err(key, e))) + .unwrap_or_else(|e| { + error!("{}", e); + -1 + }); + (*key, interesting_limit, current_value) + }) + .collect::>() } #[cfg_attr(not(target_os = "linux"), allow(dead_code))] fn linux_report_network_limits( - current_limits: &HashMap<&str, i64>, - recommended_limits: &HashMap<&'static str, i64>, + current_limits: &[(&'static str, &'static InterestingLimit, i64)], ) -> bool { - let mut check_failed = false; - for (key, recommended_val) in recommended_limits.iter() { - let current_val = *current_limits.get(key).unwrap_or(&-1); - if current_val < *recommended_val { - datapoint_warn!("os-config", (key, current_val, i64)); - warn!( - " {}: recommended={} current={}, too small", - key, recommended_val, current_val - ); - check_failed = true; - } else { - datapoint_info!("os-config", (key, current_val, i64)); - info!( - " {}: recommended={} current={}", - key, recommended_val, current_val - ); - } - } - if check_failed { - datapoint_warn!("os-config", ("network_limit_test_failed", 1, i64)); - } - !check_failed + current_limits + .iter() + .map(|(key, interesting_limit, current_value)| { + datapoint_warn!("os-config", (key, *current_value, i64)); + match interesting_limit { + InterestingLimit::Recommend(recommended_value) if current_value < recommended_value => { + warn!(" {key}: recommended={recommended_value} current={current_value}, too small"); + false + } + InterestingLimit::Recommend(recommended_value) => { + info!(" {key}: recommended={recommended_value} current={current_value}"); + true + } + InterestingLimit::QueryOnly => { + info!(" {key}: report-only -- current={current_value}"); + true + } + } + }) + .all(|good| good) } #[cfg(not(target_os = "linux"))] @@ -487,9 +492,8 @@ impl SystemMonitorService { #[cfg(target_os = "linux")] pub fn check_os_network_limits() -> bool { datapoint_info!("os-config", ("platform", platform_id(), String)); - let recommended_limits = Self::linux_get_recommended_network_limits(); - let current_limits = Self::linux_get_current_network_limits(&recommended_limits); - Self::linux_report_network_limits(¤t_limits, &recommended_limits) + let current_limits = Self::linux_get_current_network_limits(); + Self::linux_report_network_limits(¤t_limits) } #[cfg(target_os = "linux")] diff --git a/core/src/tpu.rs b/core/src/tpu.rs index e6db8dc60db9e2..0456a33a8d91f4 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -31,10 +31,10 @@ use { rpc_subscriptions::RpcSubscriptions, }, solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, - solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, + solana_sdk::{clock::Slot, pubkey::Pubkey, quic::NotifyKeyUpdate, signature::Keypair}, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, - quic::{spawn_server, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, + quic::{spawn_server, SpawnServerResult, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, streamer::StakedNodes, }, solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, @@ -111,7 +111,7 @@ impl Tpu { prioritization_fee_cache: &Arc, block_production_method: BlockProductionMethod, _generator_config: Option, /* vestigial code for replay invalidator */ - ) -> Self { + ) -> (Self, Vec>) { let TpuSockets { transactions: transactions_sockets, transaction_forwards: tpu_forwards_sockets, @@ -148,7 +148,11 @@ impl Tpu { let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (_, tpu_quic_t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: tpu_quic_t, + key_updater, + } = spawn_server( "quic_streamer_tpu", transactions_quic_sockets, keypair, @@ -168,7 +172,11 @@ impl Tpu { ) .unwrap(); - let (_, tpu_forwards_quic_t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: tpu_forwards_quic_t, + key_updater: forwards_key_updater, + } = spawn_server( "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, @@ -259,19 +267,22 @@ impl Tpu { turbine_quic_endpoint_sender, ); - Self { - fetch_stage, - sigverify_stage, - vote_sigverify_stage, - banking_stage, - cluster_info_vote_listener, - broadcast_stage, - tpu_quic_t, - tpu_forwards_quic_t, - tpu_entry_notifier, - staked_nodes_updater_service, - tracer_thread_hdl, - } + ( + Self { + fetch_stage, + sigverify_stage, + vote_sigverify_stage, + banking_stage, + cluster_info_vote_listener, + broadcast_stage, + tpu_quic_t, + tpu_forwards_quic_t, + tpu_entry_notifier, + staked_nodes_updater_service, + tracer_thread_hdl, + }, + vec![key_updater, forwards_key_updater], + ) } pub fn join(self) -> thread::Result<()> { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 214fae3dceac0f..2fe7e08dd60f8b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -196,7 +196,12 @@ impl Tvu { let (dumped_slots_sender, dumped_slots_receiver) = unbounded(); let (popular_pruned_forks_sender, popular_pruned_forks_receiver) = unbounded(); let window_service = { - let epoch_schedule = *bank_forks.read().unwrap().working_bank().epoch_schedule(); + let epoch_schedule = bank_forks + .read() + .unwrap() + .working_bank() + .epoch_schedule() + .clone(); let repair_info = RepairInfo { bank_forks: bank_forks.clone(), epoch_schedule, diff --git a/core/src/validator.rs b/core/src/validator.rs index 700315f4a67c1a..df5ec80f431582 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -748,13 +748,7 @@ impl Validator { let (snapshot_package_sender, snapshot_packager_service) = if config.snapshot_config.should_generate_snapshots() { - // filler accounts make snapshots invalid for use - // so, do not publish that we have snapshots - let enable_gossip_push = config - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count == 0) - .unwrap_or(true); + let enable_gossip_push = true; let (snapshot_package_sender, snapshot_package_receiver) = crossbeam_channel::unbounded(); let snapshot_packager_service = SnapshotPackagerService::new( @@ -1086,13 +1080,6 @@ impl Validator { exit.clone(), ); - *admin_rpc_service_post_init.write().unwrap() = Some(AdminRpcRequestMetadataPostInit { - bank_forks: bank_forks.clone(), - cluster_info: cluster_info.clone(), - vote_account: *vote_account, - repair_whitelist: config.repair_whitelist.clone(), - }); - let waited_for_supermajority = wait_for_supermajority( config, Some(&mut process_blockstore), @@ -1301,7 +1288,7 @@ impl Validator { }; } - let tpu = Tpu::new( + let (tpu, mut key_notifies) = Tpu::new( &cluster_info, &poh_recorder, entry_receiver, @@ -1352,6 +1339,16 @@ impl Validator { ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; + key_notifies.push(connection_cache); + + *admin_rpc_service_post_init.write().unwrap() = Some(AdminRpcRequestMetadataPostInit { + bank_forks: bank_forks.clone(), + cluster_info: cluster_info.clone(), + vote_account: *vote_account, + repair_whitelist: config.repair_whitelist.clone(), + notifies: key_notifies, + }); + Ok(Self { stats_reporter_service, gossip_service, @@ -1807,7 +1804,8 @@ fn load_blockstore( .map(|service| service.sender()), accounts_update_notifier, exit, - ); + ) + .map_err(|err| err.to_string())?; // Before replay starts, set the callbacks in each of the banks in BankForks so that // all dropped banks come through the `pruned_banks_receiver` channel. This way all bank diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index d8986d90e5db76..58d8a40d2eb4c6 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -64,6 +64,8 @@ impl VoteSimulator { latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks::default(), } } + + #[cfg(feature = "dev-context-only-utils")] pub fn fill_bank_forks( &mut self, forks: Tree, @@ -84,6 +86,12 @@ impl VoteSimulator { let parent = *walk.get_parent().unwrap().data(); let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap(); let new_bank = Bank::new_from_parent(parent_bank.clone(), &Pubkey::default(), slot); + let new_bank = self + .bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); self.progress .entry(slot) .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)); @@ -131,7 +139,6 @@ impl VoteSimulator { Some((new_bank.parent_slot(), new_bank.parent_hash())), ); } - self.bank_forks.write().unwrap().insert(new_bank); walk.forward(); } @@ -357,7 +364,7 @@ pub fn initialize_state( ); genesis_config.poh_config.hashes_per_tick = Some(2); - let bank0 = Bank::new_for_tests(&genesis_config); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); for pubkey in validator_keypairs_map.keys() { bank0.transfer(10_000, &mint_keypair, pubkey).unwrap(); @@ -372,7 +379,6 @@ pub fn initialize_state( 0, ForkProgress::new_from_bank(&bank0, bank0.collector_id(), &Pubkey::default(), None, 0, 0), ); - let bank_forks = BankForks::new_rw_arc(bank0); let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_bank_forks(bank_forks.clone()); (bank_forks, progress, heaviest_subtree_fork_choice) diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 71e46f2b66b2d2..7709e393ae4c69 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -196,7 +196,7 @@ fn run_bank_forks_snapshot_n( f: F, set_root_interval: u64, ) where - F: Fn(&mut Bank, &Keypair), + F: Fn(&Bank, &Keypair), { solana_logger::setup(); // Set up snapshotting config @@ -221,13 +221,13 @@ fn run_bank_forks_snapshot_n( accounts_package_sender, }; for slot in 1..=last_slot { - let mut bank = Bank::new_from_parent( + let bank = Bank::new_from_parent( bank_forks.read().unwrap().get(slot - 1).unwrap().clone(), &Pubkey::default(), slot, ); - f(&mut bank, mint_keypair); let bank = bank_forks.write().unwrap().insert(bank); + f(bank.clone_without_scheduler().as_ref(), mint_keypair); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to // kick in @@ -399,6 +399,11 @@ fn test_concurrent_snapshot_packaging( &Pubkey::default(), parent_slot + 1, ); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let slot = bank.slot(); let key1 = Keypair::new().pubkey(); let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); @@ -439,7 +444,6 @@ fn test_concurrent_snapshot_packaging( ); accounts_package_sender.send(accounts_package).unwrap(); - bank_forks.write().unwrap().insert(bank); if slot == saved_slot { // Find the relevant snapshot storages let snapshot_storage_files: HashSet<_> = bank_forks @@ -758,6 +762,8 @@ fn test_bank_forks_incremental_snapshot( let bank = { let parent = bank_forks.read().unwrap().get(slot - 1).unwrap(); let bank = Bank::new_from_parent(parent, &Pubkey::default(), slot); + let bank_scheduler = bank_forks.write().unwrap().insert(bank); + let bank = bank_scheduler.clone_without_scheduler(); let key = solana_sdk::pubkey::new_rand(); let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); @@ -771,7 +777,7 @@ fn test_bank_forks_incremental_snapshot( bank.register_unique_tick(); } - bank_forks.write().unwrap().insert(bank) + bank_scheduler }; // Set root to make sure we don't end up with too many account storage entries @@ -1062,6 +1068,11 @@ fn test_snapshots_with_background_services( &Pubkey::default(), slot, ); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let key = solana_sdk::pubkey::new_rand(); let tx = system_transaction::transfer(mint_keypair, &key, 1, bank.last_blockhash()); @@ -1074,8 +1085,6 @@ fn test_snapshots_with_background_services( while !bank.is_complete() { bank.register_unique_tick(); } - - bank_forks.write().unwrap().insert(bank); } // Call `BankForks::set_root()` to cause snapshots to be taken diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 6142f9be876215..4a8b159bbf4cb6 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -32,6 +32,7 @@ name = "solana_cost_model" [dev-dependencies] solana-logger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } test-case = { workspace = true } diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index bb3e296d6dcbe0..1232e5c3c9be6e 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -16,7 +16,7 @@ use { }, }, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, + borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, feature_set::{include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index efdd86512d2039..9d322d009c62f2 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -186,7 +186,9 @@ impl CostTracker { if self.vote_cost.saturating_add(cost) > self.vote_cost_limit { return Err(CostTrackerError::WouldExceedVoteMaxLimit); } - } else if self.block_cost.saturating_add(cost) > self.block_cost_limit { + } + + if self.block_cost.saturating_add(cost) > self.block_cost_limit { // check against the total package cost return Err(CostTrackerError::WouldExceedBlockMaxLimit); } diff --git a/docs/src/cli/deploy-a-program.md b/docs/src/cli/deploy-a-program.md index 1f39dfe1399e70..3b2c89bf454ea2 100644 --- a/docs/src/cli/deploy-a-program.md +++ b/docs/src/cli/deploy-a-program.md @@ -13,8 +13,8 @@ To deploy a program, use the Solana tools to interact with the on-chain loader to: - Initialize a program account -- Upload the program's shared object to the program account's data buffer -- Verify the uploaded program +- Upload the program's shared object (the program binary `.so`) to the program account's data buffer +- (optional) Verify the uploaded program - Finalize the program by marking the program account executable. Once deployed, anyone can execute the program by sending transactions that @@ -25,7 +25,7 @@ reference it to the cluster. ### Deploy a program To deploy a program, you will need the location of the program's shared object -(the program binary .so) +(the program binary `.so`): ```bash solana program deploy @@ -89,8 +89,9 @@ Data Length: 5216 (0x1460) bytes ### Redeploy a program A program can be redeployed to the same address to facilitate rapid development, -bug fixes, or upgrades. Matching keypair files are generated once so that -redeployments will be to the same program address. +bug fixes, or upgrades. If a program id is not provided, the program will be +deployed to the default address at `-keypair.json`. This default +keypair is generated during the first program compilation. The command looks the same as the deployment command: @@ -109,11 +110,6 @@ to become (plus some wiggle room). solana program deploy --max-len 200000 ``` -Note that program accounts are required to be -[rent-exempt](developing/programming-model/accounts.md#rent-exemption), and the -`max-len` is fixed after initial deployment, so any SOL in the program accounts -is locked up permanently. - ### Resuming a failed deploy If program deployment fails, there will be a hanging intermediate buffer account @@ -157,7 +153,7 @@ solana program deploy --buffer Both program and buffer accounts can be closed and their lamport balances transferred to a recipient's account. -If deployment fails there will be a left over buffer account that holds +If deployment fails there will be a left-over buffer account that holds lamports. The buffer account can either be used to [resume a deploy](#resuming-a-failed-deploy) or closed. @@ -209,7 +205,7 @@ solana program show --buffers --all ### Set a program's upgrade authority -The program's upgrade authority must to be present to deploy a program. If no +The program's upgrade authority must be present to deploy a program. If no authority is specified during program deployment, the default keypair is used as the authority. This is why redeploying a program in the steps above didn't require an authority to be explicitly specified. @@ -232,6 +228,11 @@ Or after deployment and specifying the current authority: solana program set-upgrade-authority --upgrade-authority --new-upgrade-authority ``` +By default, `set-upgrade-authority` requires a signature from the new authority. This behavior prevents a +developer from giving upgrade authority to a key that they do not have access to. The +`--skip-new-upgrade-authority-signer-check` option relaxes the signer check. This can be useful for situations +where the new upgrade authority is an offline signer or a multisig. + ### Immutable programs A program can be marked immutable, which prevents all further redeployments, by @@ -256,12 +257,12 @@ solana program dump ``` The dumped file will be in the same as what was deployed, so in the case of a -shared object, the dumped file will be a fully functional shared object. Note +shared object (the program binary `.so`), the dumped file will be a fully functional shared object. Note that the `dump` command dumps the entire data space, which means the output file will have trailing zeros after the shared object's data up to `max_len`. Sometimes it is useful to dump and compare a program to ensure it matches a -known program binary. The original program file can be zero-extended, hashed, -and compared to the hash of the dumped file. +known program binary. The dumped file can be zero-truncated, hashed, +and compared to the hash of the original program file. ```bash $ solana dump dump.so @@ -298,5 +299,7 @@ solana program deploy --program-id --buffer ``` Note, the buffer's authority must match the program's upgrade authority. +During deployment, the buffer account's contents are copied into the program-data account and +the buffer account is set to zero. The lamports from the buffer account are refunded to a spill account. Buffers also support `show` and `dump` just like programs do. diff --git a/docs/src/cli/sign-offchain-message.md b/docs/src/cli/sign-offchain-message.md index cc256281722647..543c5c314a61e1 100644 --- a/docs/src/cli/sign-offchain-message.md +++ b/docs/src/cli/sign-offchain-message.md @@ -95,4 +95,4 @@ with a fixed prefix: `\xffsolana offchain`, where first byte is chosen such that it is implicitly illegal as the first byte in a transaction `MessageHeader` today. More details about the payload format and other considerations are available in the -[proposal](https://github.com/solana-labs/solana/blob/e80f67dd58b7fa3901168055211f346164efa43a/docs/src/proposals/off-chain-message-signing.md). +[proposal](https://github.com/solana-labs/solana/blob/master/docs/src/proposals/off-chain-message-signing.md). diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index 66e8334ab3c09e..9321eb4c88db92 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -17,6 +17,9 @@ reqwest = { workspace = true, features = ["blocking", "brotli", "deflate", "gzip solana-runtime = { workspace = true } solana-sdk = { workspace = true } +[dev-dependencies] +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } + [lib] crate-type = ["lib"] name = "solana_download_utils" diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index b1e57a296cf79e..cd8e652a86b7ad 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -30,6 +30,9 @@ solana-version = { workspace = true } solana-vote-program = { workspace = true } tempfile = { workspace = true } +[dev-dependencies] +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } + [[bin]] name = "solana-genesis" path = "src/main.rs" diff --git a/genesis/src/main.rs b/genesis/src/main.rs index c254975379c937..6b7efd5e664339 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -547,7 +547,7 @@ fn main() -> Result<(), Box> { identity_pubkey, identity_pubkey, commission, - VoteState::get_rent_exempt_reserve(&rent).max(1), + VoteState::get_rent_exempt_reserve(&genesis_config.rent).max(1), ); genesis_config.add_account( @@ -558,7 +558,7 @@ fn main() -> Result<(), Box> { .unwrap_or(identity_pubkey), vote_pubkey, &vote_account, - &rent, + &genesis_config.rent, bootstrap_validator_stake_lamports, ), ); diff --git a/genesis/src/stakes.rs b/genesis/src/stakes.rs index 1d7c18f3a034a9..133fdf57f4968b 100644 --- a/genesis/src/stakes.rs +++ b/genesis/src/stakes.rs @@ -246,7 +246,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -272,7 +272,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -298,7 +298,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve) * 2; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { @@ -323,7 +323,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve + 1) * 2; create_and_check_stakes( &mut GenesisConfig { - rent, + rent: rent.clone(), ..GenesisConfig::default() }, &StakerInfo { diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 43c31f749b6e8f..037aedf8b87e89 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -321,6 +321,35 @@ pub type Result = std::result::Result; /// Geyser plugins must describe desired behavior for load and unload, /// as well as how they will handle streamed data. pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { + /// The callback to allow the plugin to setup the logging configuration using the logger + /// and log level specified by the validator. Will be called first on load/reload, before any other + /// callback, and only called once. + /// # Examples + /// + /// ``` + /// use solana_geyser_plugin_interface::geyser_plugin_interface::{GeyserPlugin, + /// GeyserPluginError, Result}; + /// + /// #[derive(Debug)] + /// struct SamplePlugin; + /// impl GeyserPlugin for SamplePlugin { + /// fn setup_logger(&self, logger: &'static dyn log::Log, level: log::LevelFilter) -> Result<()> { + /// log::set_max_level(level); + /// if let Err(err) = log::set_logger(logger) { + /// return Err(GeyserPluginError::Custom(Box::new(err))); + /// } + /// Ok(()) + /// } + /// fn name(&self) -> &'static str { + /// &"sample" + /// } + /// } + /// ``` + #[allow(unused_variables)] + fn setup_logger(&self, logger: &'static dyn log::Log, level: log::LevelFilter) -> Result<()> { + Ok(()) + } + fn name(&self) -> &'static str; /// The callback called when a plugin is loaded by the system, diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 9e353b0254ffba..02792525ad370c 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -4,12 +4,48 @@ use { libloading::Library, log::*, solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, - std::path::Path, + std::{ + ops::{Deref, DerefMut}, + path::Path, + }, }; +#[derive(Debug)] +pub struct LoadedGeyserPlugin { + name: String, + plugin: Box, +} + +impl LoadedGeyserPlugin { + pub fn new(plugin: Box, name: Option) -> Self { + Self { + name: name.unwrap_or_else(|| plugin.name().to_owned()), + plugin, + } + } + + pub fn name(&self) -> &str { + &self.name + } +} + +impl Deref for LoadedGeyserPlugin { + type Target = Box; + + fn deref(&self) -> &Self::Target { + &self.plugin + } +} + +impl DerefMut for LoadedGeyserPlugin { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.plugin + } +} + #[derive(Default, Debug)] pub struct GeyserPluginManager { - pub plugins: Vec>, + pub plugins: Vec, libs: Vec, } @@ -107,6 +143,8 @@ impl GeyserPluginManager { }); } + setup_logger_for_plugin(&*new_plugin.plugin)?; + // Call on_load and push plugin new_plugin .on_load(new_config_file, false) @@ -193,6 +231,8 @@ impl GeyserPluginManager { }); } + setup_logger_for_plugin(&*new_plugin.plugin)?; + // Attempt to on_load with new plugin match new_plugin.on_load(new_parsed_config_file, true) { // On success, push plugin and library @@ -221,12 +261,27 @@ impl GeyserPluginManager { let mut current_plugin = self.plugins.remove(idx); let name = current_plugin.name().to_string(); current_plugin.on_unload(); + // The plugin must be dropped before the library to avoid a crash. drop(current_plugin); drop(current_lib); info!("Unloaded plugin {name} at idx {idx}"); } } +// Initialize logging for the plugin +fn setup_logger_for_plugin(new_plugin: &dyn GeyserPlugin) -> Result<(), jsonrpc_core::Error> { + new_plugin + .setup_logger(log::logger(), log::max_level()) + .map_err(|setup_logger_err| jsonrpc_core::Error { + code: ErrorCode::InvalidRequest, + message: format!( + "setup_logger method of plugin {} failed: {setup_logger_err}", + new_plugin.name() + ), + data: None, + }) +} + #[derive(Debug)] pub enum GeyserPluginManagerRequest { ReloadPlugin { @@ -284,7 +339,7 @@ pub enum GeyserPluginManagerError { #[cfg(not(test))] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(Box, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { use std::{fs::File, io::Read, path::PathBuf}; type PluginConstructor = unsafe fn() -> *mut dyn GeyserPlugin; use libloading::Symbol; @@ -327,6 +382,8 @@ pub(crate) fn load_plugin_from_config( libpath = config_dir.join(libpath); } + let plugin_name = result["name"].as_str().map(|s| s.to_owned()); + let config_file = geyser_plugin_config_file .as_os_str() .to_str() @@ -341,7 +398,11 @@ pub(crate) fn load_plugin_from_config( let plugin_raw = constructor(); (Box::from_raw(plugin_raw), lib) }; - Ok((plugin, lib, config_file)) + Ok(( + LoadedGeyserPlugin::new(plugin, plugin_name), + lib, + config_file, + )) } #[cfg(test)] @@ -357,7 +418,7 @@ const TESTPLUGIN2_CONFIG: &str = "TESTPLUGIN2_CONFIG"; #[cfg(test)] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(Box, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { if geyser_plugin_config_file.ends_with(TESTPLUGIN_CONFIG) { Ok(tests::dummy_plugin_and_library( tests::TestPlugin, @@ -379,7 +440,7 @@ pub(crate) fn load_plugin_from_config( mod tests { use { crate::geyser_plugin_manager::{ - GeyserPluginManager, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, + GeyserPluginManager, LoadedGeyserPlugin, TESTPLUGIN2_CONFIG, TESTPLUGIN_CONFIG, }, libloading::Library, solana_geyser_plugin_interface::geyser_plugin_interface::GeyserPlugin, @@ -389,9 +450,9 @@ mod tests { pub(super) fn dummy_plugin_and_library( plugin: P, config_path: &'static str, - ) -> (Box, Library, &'static str) { + ) -> (LoadedGeyserPlugin, Library, &'static str) { ( - Box::new(plugin), + LoadedGeyserPlugin::new(Box::new(plugin), None), Library::from(libloading::os::unix::Library::this()), config_path, ) diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 3696342ae83b24..f9870ac1ee380c 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -55,6 +55,7 @@ thiserror = { workspace = true } [dev-dependencies] num_cpus = { workspace = true } serial_test = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } [build-dependencies] diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 353d1e13ddc278..9f76523be792a7 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -33,13 +33,13 @@ use { }, crds_value::{ self, AccountsHashes, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, - NodeInstance, RestartLastVotedForkSlots, RestartLastVotedForkSlotsError, - SnapshotHashes, Version, Vote, MAX_WALLCLOCK, + NodeInstance, SnapshotHashes, Version, Vote, MAX_WALLCLOCK, }, duplicate_shred::DuplicateShred, epoch_slots::EpochSlots, gossip_error::GossipError, ping_pong::{self, PingCache, Pong}, + restart_crds_values::{RestartLastVotedForkSlots, RestartLastVotedForkSlotsError}, socketaddr, socketaddr_any, weighted_shuffle::WeightedShuffle, }, @@ -268,7 +268,7 @@ pub fn make_accounts_hashes_message( pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; // TODO These messages should go through the gpu pipeline for spam filtering -#[frozen_abi(digest = "HvA9JnnQrJnmkcGxrp8SmTB1b4iSyQ4VK2p6LpSBaoWR")] +#[frozen_abi(digest = "7a2P1GeQjyqCHMyBrhNPTKfPfG4iv32vki7XHahoN55z")] #[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)] #[allow(clippy::large_enum_variant)] pub(crate) enum Protocol { @@ -2085,7 +2085,7 @@ impl ClusterInfo { score }; let score = match response.data { - CrdsData::LegacyContactInfo(_) => 2 * score, + CrdsData::LegacyContactInfo(_) | CrdsData::ContactInfo(_) => 2 * score, _ => score, }; ((addr, response), score) diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 82b22f659e0b9a..4bd1939ef10d14 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -6,10 +6,9 @@ use { duplicate_shred::{DuplicateShred, DuplicateShredIndex, MAX_DUPLICATE_SHREDS}, epoch_slots::EpochSlots, legacy_contact_info::LegacyContactInfo, + restart_crds_values::RestartLastVotedForkSlots, }, bincode::{serialize, serialized_size}, - bv::BitVec, - itertools::Itertools, rand::{CryptoRng, Rng}, serde::de::{Deserialize, Deserializer}, solana_sdk::{ @@ -17,7 +16,6 @@ use { hash::Hash, pubkey::{self, Pubkey}, sanitize::{Sanitize, SanitizeError}, - serde_varint, signature::{Keypair, Signable, Signature, Signer}, timing::timestamp, transaction::Transaction, @@ -29,7 +27,6 @@ use { collections::{hash_map::Entry, BTreeSet, HashMap}, fmt, }, - thiserror::Error, }; pub const MAX_WALLCLOCK: u64 = 1_000_000_000_000_000; @@ -494,175 +491,6 @@ impl Sanitize for NodeInstance { } } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, AbiExample, AbiEnumVisitor)] -enum SlotsOffsets { - RunLengthEncoding(RunLengthEncoding), - RawOffsets(RawOffsets), -} - -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] -struct U16(#[serde(with = "serde_varint")] u16); - -// The vector always starts with 1. Encode number of 1's and 0's consecutively. -// For example, 110000111 is [2, 4, 3]. -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] -struct RunLengthEncoding(Vec); - -impl RunLengthEncoding { - fn new(bits: &BitVec) -> Self { - let encoded = (0..bits.len()) - .map(|i| bits.get(i)) - .dedup_with_count() - .map_while(|(count, _)| u16::try_from(count).ok()) - .scan(0, |current_bytes, count| { - *current_bytes += ((u16::BITS - count.leading_zeros() + 6) / 7).max(1) as usize; - (*current_bytes <= RestartLastVotedForkSlots::MAX_BYTES).then_some(U16(count)) - }) - .collect(); - Self(encoded) - } - - fn num_encoded_slots(&self) -> usize { - self.0 - .iter() - .map(|x| usize::try_from(x.0).unwrap()) - .sum::() - } - - fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { - let mut slots: Vec = self - .0 - .iter() - .map_while(|bit_count| usize::try_from(bit_count.0).ok()) - .zip([1, 0].iter().cycle()) - .flat_map(|(bit_count, bit)| std::iter::repeat(bit).take(bit_count)) - .enumerate() - .filter(|(_, bit)| **bit == 1) - .map_while(|(offset, _)| { - let offset = Slot::try_from(offset).ok()?; - last_slot.checked_sub(offset) - }) - .take(RestartLastVotedForkSlots::MAX_SLOTS) - .take_while(|slot| *slot >= min_slot) - .collect(); - slots.reverse(); - slots - } -} - -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] -struct RawOffsets(BitVec); - -impl RawOffsets { - fn new(mut bits: BitVec) -> Self { - bits.truncate(RestartLastVotedForkSlots::MAX_BYTES as u64 * 8); - bits.shrink_to_fit(); - Self(bits) - } - - fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { - let mut slots: Vec = (0..self.0.len()) - .filter(|index| self.0.get(*index)) - .map_while(|offset| last_slot.checked_sub(offset)) - .take_while(|slot| *slot >= min_slot) - .collect(); - slots.reverse(); - slots - } -} - -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, AbiExample, Debug)] -pub struct RestartLastVotedForkSlots { - pub from: Pubkey, - pub wallclock: u64, - offsets: SlotsOffsets, - pub last_voted_slot: Slot, - pub last_voted_hash: Hash, - pub shred_version: u16, -} - -impl Sanitize for RestartLastVotedForkSlots { - fn sanitize(&self) -> std::result::Result<(), SanitizeError> { - self.last_voted_hash.sanitize() - } -} - -#[derive(Debug, Error)] -pub enum RestartLastVotedForkSlotsError { - #[error("Last voted fork cannot be empty")] - LastVotedForkEmpty, -} - -impl RestartLastVotedForkSlots { - // This number is MAX_CRDS_OBJECT_SIZE - empty serialized RestartLastVotedForkSlots. - const MAX_BYTES: usize = 824; - - // Per design doc, we should start wen_restart within 7 hours. - pub const MAX_SLOTS: usize = u16::MAX as usize; - - pub fn new( - from: Pubkey, - now: u64, - last_voted_fork: &[Slot], - last_voted_hash: Hash, - shred_version: u16, - ) -> Result { - let Some((&first_voted_slot, &last_voted_slot)) = - last_voted_fork.iter().minmax().into_option() - else { - return Err(RestartLastVotedForkSlotsError::LastVotedForkEmpty); - }; - let max_size = last_voted_slot.saturating_sub(first_voted_slot) + 1; - let mut uncompressed_bitvec = BitVec::new_fill(false, max_size); - for slot in last_voted_fork { - uncompressed_bitvec.set(last_voted_slot - *slot, true); - } - let run_length_encoding = RunLengthEncoding::new(&uncompressed_bitvec); - let offsets = - if run_length_encoding.num_encoded_slots() > RestartLastVotedForkSlots::MAX_BYTES * 8 { - SlotsOffsets::RunLengthEncoding(run_length_encoding) - } else { - SlotsOffsets::RawOffsets(RawOffsets::new(uncompressed_bitvec)) - }; - Ok(Self { - from, - wallclock: now, - offsets, - last_voted_slot, - last_voted_hash, - shred_version, - }) - } - - /// New random Version for tests and benchmarks. - pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { - let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); - let num_slots = rng.gen_range(2..20); - let slots = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512)) - .take(num_slots) - .collect::>(); - RestartLastVotedForkSlots::new( - pubkey, - new_rand_timestamp(rng), - &slots, - Hash::new_unique(), - 1, - ) - .unwrap() - } - - pub fn to_slots(&self, min_slot: Slot) -> Vec { - match &self.offsets { - SlotsOffsets::RunLengthEncoding(run_length_encoding) => { - run_length_encoding.to_slots(self.last_voted_slot, min_slot) - } - SlotsOffsets::RawOffsets(raw_offsets) => { - raw_offsets.to_slots(self.last_voted_slot, min_slot) - } - } - } -} - /// Type of the replicated value /// These are labels for values in a record that is associated with `Pubkey` #[derive(PartialEq, Hash, Eq, Clone, Debug)] @@ -889,7 +717,6 @@ pub(crate) fn sanitize_wallclock(wallclock: u64) -> Result<(), SanitizeError> { mod test { use { super::*, - crate::cluster_info::MAX_CRDS_OBJECT_SIZE, bincode::{deserialize, Options}, rand::SeedableRng, rand_chacha::ChaChaRng, @@ -1262,130 +1089,4 @@ mod test { assert!(node.should_force_push(&pubkey)); assert!(!node.should_force_push(&Pubkey::new_unique())); } - - fn make_rand_slots(rng: &mut R) -> impl Iterator + '_ { - repeat_with(|| rng.gen_range(1..5)).scan(0, |slot, step| { - *slot += step; - Some(*slot) - }) - } - - #[test] - fn test_restart_last_voted_fork_slots_max_bytes() { - let keypair = Keypair::new(); - let header = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - &[1, 2], - Hash::default(), - 0, - ) - .unwrap(); - // If the following assert fails, please update RestartLastVotedForkSlots::MAX_BYTES - assert_eq!( - RestartLastVotedForkSlots::MAX_BYTES, - MAX_CRDS_OBJECT_SIZE - serialized_size(&header).unwrap() as usize - ); - - // Create large enough slots to make sure we are discarding some to make slots fit. - let mut rng = rand::thread_rng(); - let large_length = 8000; - let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); - let large_slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - &range, - Hash::default(), - 0, - ) - .unwrap(); - assert!(serialized_size(&large_slots).unwrap() <= MAX_CRDS_OBJECT_SIZE as u64); - let retrieved_slots = large_slots.to_slots(0); - assert!(retrieved_slots.len() <= range.len()); - assert!(retrieved_slots.last().unwrap() - retrieved_slots.first().unwrap() > 5000); - } - - #[test] - fn test_restart_last_voted_fork_slots() { - let keypair = Keypair::new(); - let slot = 53; - let slot_parent = slot - 5; - let shred_version = 21; - let original_slots_vec = [slot_parent, slot]; - let slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - &original_slots_vec, - Hash::default(), - shred_version, - ) - .unwrap(); - let value = - CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots.clone()), &keypair); - assert_eq!(value.sanitize(), Ok(())); - let label = value.label(); - assert_eq!( - label, - CrdsValueLabel::RestartLastVotedForkSlots(keypair.pubkey()) - ); - assert_eq!(label.pubkey(), keypair.pubkey()); - assert_eq!(value.wallclock(), slots.wallclock); - let retrieved_slots = slots.to_slots(0); - assert_eq!(retrieved_slots.len(), 2); - assert_eq!(retrieved_slots[0], slot_parent); - assert_eq!(retrieved_slots[1], slot); - - let bad_value = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - &[], - Hash::default(), - shred_version, - ); - assert!(bad_value.is_err()); - - let last_slot: Slot = 8000; - let large_slots_vec: Vec = (0..last_slot + 1).collect(); - let large_slots = RestartLastVotedForkSlots::new( - keypair.pubkey(), - timestamp(), - &large_slots_vec, - Hash::default(), - shred_version, - ) - .unwrap(); - assert!(serialized_size(&large_slots).unwrap() < MAX_CRDS_OBJECT_SIZE as u64); - let retrieved_slots = large_slots.to_slots(0); - assert_eq!(retrieved_slots, large_slots_vec); - } - - fn check_run_length_encoding(slots: Vec) { - let last_voted_slot = slots[slots.len() - 1]; - let mut bitvec = BitVec::new_fill(false, last_voted_slot - slots[0] + 1); - for slot in &slots { - bitvec.set(last_voted_slot - slot, true); - } - let rle = RunLengthEncoding::new(&bitvec); - let retrieved_slots = rle.to_slots(last_voted_slot, 0); - assert_eq!(retrieved_slots, slots); - } - - #[test] - fn test_run_length_encoding() { - check_run_length_encoding((1000..16384 + 1000).map(|x| x as Slot).collect_vec()); - check_run_length_encoding([1000 as Slot].into()); - check_run_length_encoding( - [ - 1000 as Slot, - RestartLastVotedForkSlots::MAX_SLOTS as Slot + 999, - ] - .into(), - ); - check_run_length_encoding((1000..1800).step_by(2).map(|x| x as Slot).collect_vec()); - - let mut rng = rand::thread_rng(); - let large_length = 500; - let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); - check_run_length_encoding(range); - } } diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index b1ceab79b26949..70e56d35e82334 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -30,7 +30,7 @@ pub struct DuplicateShred { pub(crate) wallclock: u64, pub(crate) slot: Slot, _unused: u32, - shred_type: ShredType, + _unused_shred_type: ShredType, // Serialized DuplicateSlotProof split into chunks. num_chunks: u8, chunk_index: u8, @@ -90,8 +90,8 @@ pub enum Error { /// Check that `shred1` and `shred2` indicate a valid duplicate proof /// - Must be for the same slot -/// - Must have the same `shred_type` /// - Must both sigverify for the correct leader +/// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type` /// - If `shred1` and `shred2` share the same index they must be not equal /// - If `shred1` and `shred2` do not share the same index and are data shreds /// verify that they indicate an index conflict. One of them must be the @@ -106,10 +106,6 @@ where return Err(Error::SlotMismatch); } - if shred1.shred_type() != shred2.shred_type() { - return Err(Error::ShredTypeMismatch); - } - if let Some(leader_schedule) = leader_schedule { let slot_leader = leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?; @@ -118,6 +114,20 @@ where } } + // Merkle root conflict check + if shred1.fec_set_index() == shred2.fec_set_index() + && shred1.merkle_root().ok() != shred2.merkle_root().ok() + { + // This catches a mixture of legacy and merkle shreds + // as well as merkle shreds with different roots in the + // same fec set + return Ok(()); + } + + if shred1.shred_type() != shred2.shred_type() { + return Err(Error::ShredTypeMismatch); + } + if shred1.index() == shred2.index() { if shred1.payload() != shred2.payload() { return Ok(()); @@ -164,7 +174,7 @@ where } let other_shred = Shred::new_from_serialized_shred(other_payload)?; check_shreds(leader_schedule, &shred, &other_shred)?; - let (slot, shred_type) = (shred.slot(), shred.shred_type()); + let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), shred2: other_shred.into_payload(), @@ -184,27 +194,21 @@ where from: self_pubkey, wallclock, slot, - shred_type, num_chunks, chunk_index: i as u8, chunk, _unused: 0, + _unused_shred_type: ShredType::Code, }); Ok(chunks) } // Returns a predicate checking if a duplicate-shred chunk matches -// (slot, shred_type) and has valid chunk_index. -fn check_chunk( - slot: Slot, - shred_type: ShredType, - num_chunks: u8, -) -> impl Fn(&DuplicateShred) -> Result<(), Error> { +// the slot and has valid chunk_index. +fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result<(), Error> { move |dup| { if dup.slot != slot { Err(Error::SlotMismatch) - } else if dup.shred_type != shred_type { - Err(Error::ShredTypeMismatch) } else if dup.num_chunks != num_chunks { Err(Error::NumChunksMismatch) } else if dup.chunk_index >= num_chunks { @@ -226,13 +230,12 @@ pub(crate) fn into_shreds( let mut chunks = chunks.into_iter(); let DuplicateShred { slot, - shred_type, num_chunks, chunk_index, chunk, .. } = chunks.next().ok_or(Error::InvalidDuplicateShreds)?; - let check_chunk = check_chunk(slot, shred_type, num_chunks); + let check_chunk = check_chunk(slot, num_chunks); let mut data = HashMap::new(); data.insert(chunk_index, chunk); for chunk in chunks { @@ -260,8 +263,6 @@ pub(crate) fn into_shreds( let shred2 = Shred::new_from_serialized_shred(proof.shred2)?; if shred1.slot() != slot || shred2.slot() != slot { Err(Error::SlotMismatch) - } else if shred1.shred_type() != shred_type || shred2.shred_type() != shred_type { - Err(Error::ShredTypeMismatch) } else { check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?; Ok((shred1, shred2)) @@ -300,7 +301,7 @@ pub(crate) mod tests { from: Pubkey::new_unique(), wallclock: u64::MAX, slot: Slot::MAX, - shred_type: ShredType::Data, + _unused_shred_type: ShredType::Data, num_chunks: u8::MAX, chunk_index: u8::MAX, chunk: Vec::default(), @@ -421,7 +422,7 @@ pub(crate) mod tests { wallclock: u64, max_size: usize, // Maximum serialized size of each DuplicateShred. ) -> Result, Error> { - let (slot, shred_type) = (shred.slot(), shred.shred_type()); + let slot = shred.slot(); let proof = DuplicateSlotProof { shred1: shred.into_payload(), shred2: other_shred.into_payload(), @@ -437,11 +438,11 @@ pub(crate) mod tests { from: self_pubkey, wallclock, slot, - shred_type, num_chunks, chunk_index: i as u8, chunk, _unused: 0, + _unused_shred_type: ShredType::Code, }); Ok(chunks) } @@ -949,4 +950,186 @@ pub(crate) mod tests { ); } } + + #[test] + fn test_merkle_root_conflict_round_trip() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let (legacy_data_shreds, legacy_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + false, /* merkle_variant */ + &shredder, + &leader, + true, + ); + + let (diff_data_shreds, diff_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let test_cases = vec![ + (data_shreds[0].clone(), diff_data_shreds[1].clone()), + (coding_shreds[0].clone(), diff_coding_shreds[1].clone()), + (data_shreds[0].clone(), diff_coding_shreds[0].clone()), + (coding_shreds[0].clone(), diff_data_shreds[0].clone()), + // Mix of legacy and merkle for same fec set + (legacy_coding_shreds[0].clone(), data_shreds[0].clone()), + (coding_shreds[0].clone(), legacy_data_shreds[0].clone()), + (legacy_data_shreds[0].clone(), coding_shreds[0].clone()), + (data_shreds[0].clone(), legacy_coding_shreds[0].clone()), + ]; + for (shred1, shred2) in test_cases.into_iter() { + let chunks: Vec<_> = from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap(); + assert_eq!(shred1, shred3); + assert_eq!(shred2, shred4); + } + } + + #[test] + fn test_merkle_root_conflict_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..31_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + let (next_data_shreds, next_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index + 1, + next_shred_index + 1, + 10, + true, + &shredder, + &leader, + true, + ); + + let (legacy_data_shreds, legacy_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + false, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // Same fec set same merkle root + (coding_shreds[0].clone(), data_shreds[0].clone()), + (data_shreds[0].clone(), coding_shreds[0].clone()), + // Different FEC set different merkle root + (coding_shreds[0].clone(), next_data_shreds[0].clone()), + (next_coding_shreds[0].clone(), data_shreds[0].clone()), + (data_shreds[0].clone(), next_coding_shreds[0].clone()), + (next_data_shreds[0].clone(), coding_shreds[0].clone()), + // Legacy shreds + ( + legacy_coding_shreds[0].clone(), + legacy_data_shreds[0].clone(), + ), + ( + legacy_data_shreds[0].clone(), + legacy_coding_shreds[0].clone(), + ), + // Mix of legacy and merkle with different fec index + (legacy_coding_shreds[0].clone(), next_data_shreds[0].clone()), + (next_coding_shreds[0].clone(), legacy_data_shreds[0].clone()), + (legacy_data_shreds[0].clone(), next_coding_shreds[0].clone()), + (next_data_shreds[0].clone(), legacy_coding_shreds[0].clone()), + ]; + for (shred1, shred2) in test_cases.into_iter() { + assert_matches!( + from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + ) + .err() + .unwrap(), + Error::ShredTypeMismatch + ); + + let chunks: Vec<_> = from_shred_bypass_checks( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.clone(), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + + assert_matches!( + into_shreds(&leader.pubkey(), chunks).err().unwrap(), + Error::ShredTypeMismatch + ); + } + } } diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 11b609f3a37f52..2aea3078bbd7e7 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -24,6 +24,7 @@ pub mod legacy_contact_info; pub mod ping_pong; mod push_active_set; mod received_cache; +pub mod restart_crds_values; pub mod weighted_shuffle; #[macro_use] diff --git a/gossip/src/restart_crds_values.rs b/gossip/src/restart_crds_values.rs new file mode 100644 index 00000000000000..02f9359cce71f0 --- /dev/null +++ b/gossip/src/restart_crds_values.rs @@ -0,0 +1,320 @@ +use { + crate::crds_value::new_rand_timestamp, + bv::BitVec, + itertools::Itertools, + rand::Rng, + solana_sdk::{ + clock::Slot, + hash::Hash, + pubkey::Pubkey, + sanitize::{Sanitize, SanitizeError}, + serde_varint, + }, + thiserror::Error, +}; + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, AbiExample, Debug)] +pub struct RestartLastVotedForkSlots { + pub from: Pubkey, + pub wallclock: u64, + offsets: SlotsOffsets, + pub last_voted_slot: Slot, + pub last_voted_hash: Hash, + pub shred_version: u16, +} + +#[derive(Debug, Error)] +pub enum RestartLastVotedForkSlotsError { + #[error("Last voted fork cannot be empty")] + LastVotedForkEmpty, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, AbiExample, AbiEnumVisitor)] +enum SlotsOffsets { + RunLengthEncoding(RunLengthEncoding), + RawOffsets(RawOffsets), +} + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct U16(#[serde(with = "serde_varint")] u16); + +// The vector always starts with 1. Encode number of 1's and 0's consecutively. +// For example, 110000111 is [2, 4, 3]. +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct RunLengthEncoding(Vec); + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, AbiExample)] +struct RawOffsets(BitVec); + +impl Sanitize for RestartLastVotedForkSlots { + fn sanitize(&self) -> std::result::Result<(), SanitizeError> { + self.last_voted_hash.sanitize() + } +} + +impl RestartLastVotedForkSlots { + // This number is MAX_CRDS_OBJECT_SIZE - empty serialized RestartLastVotedForkSlots. + const MAX_BYTES: usize = 824; + + // Per design doc, we should start wen_restart within 7 hours. + pub const MAX_SLOTS: usize = u16::MAX as usize; + + pub fn new( + from: Pubkey, + now: u64, + last_voted_fork: &[Slot], + last_voted_hash: Hash, + shred_version: u16, + ) -> Result { + let Some((&first_voted_slot, &last_voted_slot)) = + last_voted_fork.iter().minmax().into_option() + else { + return Err(RestartLastVotedForkSlotsError::LastVotedForkEmpty); + }; + let max_size = last_voted_slot.saturating_sub(first_voted_slot) + 1; + let mut uncompressed_bitvec = BitVec::new_fill(false, max_size); + for slot in last_voted_fork { + uncompressed_bitvec.set(last_voted_slot - *slot, true); + } + let run_length_encoding = RunLengthEncoding::new(&uncompressed_bitvec); + let offsets = + if run_length_encoding.num_encoded_slots() > RestartLastVotedForkSlots::MAX_BYTES * 8 { + SlotsOffsets::RunLengthEncoding(run_length_encoding) + } else { + SlotsOffsets::RawOffsets(RawOffsets::new(uncompressed_bitvec)) + }; + Ok(Self { + from, + wallclock: now, + offsets, + last_voted_slot, + last_voted_hash, + shred_version, + }) + } + + /// New random Version for tests and benchmarks. + pub fn new_rand(rng: &mut R, pubkey: Option) -> Self { + let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand); + let num_slots = rng.gen_range(2..20); + let slots = std::iter::repeat_with(|| 47825632 + rng.gen_range(0..512)) + .take(num_slots) + .collect::>(); + RestartLastVotedForkSlots::new( + pubkey, + new_rand_timestamp(rng), + &slots, + Hash::new_unique(), + 1, + ) + .unwrap() + } + + pub fn to_slots(&self, min_slot: Slot) -> Vec { + match &self.offsets { + SlotsOffsets::RunLengthEncoding(run_length_encoding) => { + run_length_encoding.to_slots(self.last_voted_slot, min_slot) + } + SlotsOffsets::RawOffsets(raw_offsets) => { + raw_offsets.to_slots(self.last_voted_slot, min_slot) + } + } + } +} + +impl RunLengthEncoding { + fn new(bits: &BitVec) -> Self { + let encoded = (0..bits.len()) + .map(|i| bits.get(i)) + .dedup_with_count() + .map_while(|(count, _)| u16::try_from(count).ok()) + .scan(0, |current_bytes, count| { + *current_bytes += ((u16::BITS - count.leading_zeros() + 6) / 7).max(1) as usize; + (*current_bytes <= RestartLastVotedForkSlots::MAX_BYTES).then_some(U16(count)) + }) + .collect(); + Self(encoded) + } + + fn num_encoded_slots(&self) -> usize { + self.0.iter().map(|x| usize::from(x.0)).sum() + } + + fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { + let mut slots: Vec = self + .0 + .iter() + .map(|bit_count| usize::from(bit_count.0)) + .zip([1, 0].iter().cycle()) + .flat_map(|(bit_count, bit)| std::iter::repeat(bit).take(bit_count)) + .enumerate() + .filter(|(_, bit)| **bit == 1) + .map_while(|(offset, _)| { + let offset = Slot::try_from(offset).ok()?; + last_slot.checked_sub(offset) + }) + .take(RestartLastVotedForkSlots::MAX_SLOTS) + .take_while(|slot| *slot >= min_slot) + .collect(); + slots.reverse(); + slots + } +} + +impl RawOffsets { + fn new(mut bits: BitVec) -> Self { + bits.truncate(RestartLastVotedForkSlots::MAX_BYTES as u64 * 8); + bits.shrink_to_fit(); + Self(bits) + } + + fn to_slots(&self, last_slot: Slot, min_slot: Slot) -> Vec { + let mut slots: Vec = (0..self.0.len()) + .filter(|index| self.0.get(*index)) + .map_while(|offset| last_slot.checked_sub(offset)) + .take_while(|slot| *slot >= min_slot) + .collect(); + slots.reverse(); + slots + } +} + +#[cfg(test)] +mod test { + use { + super::*, + crate::{ + cluster_info::MAX_CRDS_OBJECT_SIZE, + crds_value::{CrdsData, CrdsValue, CrdsValueLabel}, + }, + bincode::serialized_size, + solana_sdk::{signature::Signer, signer::keypair::Keypair, timing::timestamp}, + std::iter::repeat_with, + }; + + fn make_rand_slots(rng: &mut R) -> impl Iterator + '_ { + repeat_with(|| rng.gen_range(1..5)).scan(0, |slot, step| { + *slot += step; + Some(*slot) + }) + } + + #[test] + fn test_restart_last_voted_fork_slots_max_bytes() { + let keypair = Keypair::new(); + let header = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &[1, 2], + Hash::default(), + 0, + ) + .unwrap(); + // If the following assert fails, please update RestartLastVotedForkSlots::MAX_BYTES + assert_eq!( + RestartLastVotedForkSlots::MAX_BYTES, + MAX_CRDS_OBJECT_SIZE - serialized_size(&header).unwrap() as usize + ); + + // Create large enough slots to make sure we are discarding some to make slots fit. + let mut rng = rand::thread_rng(); + let large_length = 8000; + let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); + let large_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &range, + Hash::default(), + 0, + ) + .unwrap(); + assert!(serialized_size(&large_slots).unwrap() <= MAX_CRDS_OBJECT_SIZE as u64); + let retrieved_slots = large_slots.to_slots(0); + assert!(retrieved_slots.len() <= range.len()); + assert!(retrieved_slots.last().unwrap() - retrieved_slots.first().unwrap() > 5000); + } + + #[test] + fn test_restart_last_voted_fork_slots() { + let keypair = Keypair::new(); + let slot = 53; + let slot_parent = slot - 5; + let shred_version = 21; + let original_slots_vec = [slot_parent, slot]; + let slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &original_slots_vec, + Hash::default(), + shred_version, + ) + .unwrap(); + let value = + CrdsValue::new_signed(CrdsData::RestartLastVotedForkSlots(slots.clone()), &keypair); + assert_eq!(value.sanitize(), Ok(())); + let label = value.label(); + assert_eq!( + label, + CrdsValueLabel::RestartLastVotedForkSlots(keypair.pubkey()) + ); + assert_eq!(label.pubkey(), keypair.pubkey()); + assert_eq!(value.wallclock(), slots.wallclock); + let retrieved_slots = slots.to_slots(0); + assert_eq!(retrieved_slots.len(), 2); + assert_eq!(retrieved_slots[0], slot_parent); + assert_eq!(retrieved_slots[1], slot); + + let bad_value = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &[], + Hash::default(), + shred_version, + ); + assert!(bad_value.is_err()); + + let last_slot: Slot = 8000; + let large_slots_vec: Vec = (0..last_slot + 1).collect(); + let large_slots = RestartLastVotedForkSlots::new( + keypair.pubkey(), + timestamp(), + &large_slots_vec, + Hash::default(), + shred_version, + ) + .unwrap(); + assert!(serialized_size(&large_slots).unwrap() < MAX_CRDS_OBJECT_SIZE as u64); + let retrieved_slots = large_slots.to_slots(0); + assert_eq!(retrieved_slots, large_slots_vec); + } + + fn check_run_length_encoding(slots: Vec) { + let last_voted_slot = slots[slots.len() - 1]; + let mut bitvec = BitVec::new_fill(false, last_voted_slot - slots[0] + 1); + for slot in &slots { + bitvec.set(last_voted_slot - slot, true); + } + let rle = RunLengthEncoding::new(&bitvec); + let retrieved_slots = rle.to_slots(last_voted_slot, 0); + assert_eq!(retrieved_slots, slots); + } + + #[test] + fn test_run_length_encoding() { + check_run_length_encoding((1000..16384 + 1000).map(|x| x as Slot).collect_vec()); + check_run_length_encoding([1000 as Slot].into()); + check_run_length_encoding( + [ + 1000 as Slot, + RestartLastVotedForkSlots::MAX_SLOTS as Slot + 999, + ] + .into(), + ); + check_run_length_encoding((1000..1800).step_by(2).map(|x| x as Slot).collect_vec()); + + let mut rng = rand::thread_rng(); + let large_length = 500; + let range: Vec = make_rand_slots(&mut rng).take(large_length).collect(); + check_run_length_encoding(range); + } +} diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 0bb28e4a2779ca..63198c1c6188fa 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -2,7 +2,7 @@ use { crate::LEDGER_TOOL_DIRECTORY, clap::{value_t, values_t_or_exit, ArgMatches}, solana_accounts_db::{ - accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig}, + accounts_db::{AccountsDb, AccountsDbConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, }, @@ -53,11 +53,6 @@ pub fn get_accounts_db_config( ..AccountsIndexConfig::default() }; - let filler_accounts_config = FillerAccountsConfig { - count: value_t!(arg_matches, "accounts_filler_count", usize).unwrap_or(0), - size: value_t!(arg_matches, "accounts_filler_size", usize).unwrap_or(0), - }; - let accounts_hash_cache_path = arg_matches .value_of("accounts_hash_cache_path") .map(Into::into) @@ -77,7 +72,6 @@ pub fn get_accounts_db_config( index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64) .ok(), exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index c4d5c77f302669..ed98f2b0e5bbf4 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -1,6 +1,6 @@ //! The `bigtable` subcommand use { - crate::ledger_path::canonicalize_ledger_path, + crate::{ledger_path::canonicalize_ledger_path, output::CliEntries}, clap::{ value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, }, @@ -143,6 +143,24 @@ async fn block( Ok(()) } +async fn entries( + slot: Slot, + output_format: OutputFormat, + config: solana_storage_bigtable::LedgerStorageConfig, +) -> Result<(), Box> { + let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) + .await + .map_err(|err| format!("Failed to connect to storage: {err:?}"))?; + + let entries = bigtable.get_entries(slot).await?; + let cli_entries = CliEntries { + entries: entries.map(Into::into).collect(), + slot, + }; + println!("{}", output_format.formatted_string(&cli_entries)); + Ok(()) +} + async fn blocks( starting_slot: Slot, limit: usize, @@ -453,7 +471,10 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { debug!("worker {}: received slot {}", i, slot); if !args.force { - match destination_bigtable_clone.confirmed_block_exists(slot).await { + match destination_bigtable_clone + .confirmed_block_exists(slot) + .await + { Ok(exist) => { if exist { skip_slots_clone.lock().unwrap().push(slot); @@ -461,7 +482,11 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("confirmed_block_exists() failed from the destination Bigtable, slot: {}, err: {}", slot, err); + error!( + "confirmed_block_exists() failed from the destination \ + Bigtable, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } @@ -481,33 +506,44 @@ async fn copy(args: CopyArgs) -> Result<(), Box> { } } Err(err) => { - error!("failed to get a confirmed block from the source Bigtable, slot: {}, err: {}", slot, err); + error!( + "failed to get a confirmed block from the source Bigtable, \ + slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } }; } else { let confirmed_block = - match source_bigtable_clone.get_confirmed_block(slot).await { - Ok(block) => match VersionedConfirmedBlock::try_from(block) { - Ok(block) => block, + match source_bigtable_clone.get_confirmed_block(slot).await { + Ok(block) => match VersionedConfirmedBlock::try_from(block) { + Ok(block) => block, + Err(err) => { + error!( + "failed to convert confirmed block to versioned \ + confirmed block, slot: {}, err: {}", + slot, err + ); + failed_slots_clone.lock().unwrap().push(slot); + continue; + } + }, + Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { + debug!("block not found, slot: {}", slot); + block_not_found_slots_clone.lock().unwrap().push(slot); + continue; + } Err(err) => { - error!("failed to convert confirmed block to versioned confirmed block, slot: {}, err: {}", slot, err); + error!( + "failed to get confirmed block, slot: {}, err: {}", + slot, err + ); failed_slots_clone.lock().unwrap().push(slot); continue; } - }, - Err(solana_storage_bigtable::Error::BlockNotFound(slot)) => { - debug!("block not found, slot: {}", slot); - block_not_found_slots_clone.lock().unwrap().push(slot); - continue; - } - Err(err) => { - error!("failed to get confirmed block, slot: {}, err: {}", slot, err); - failed_slots_clone.lock().unwrap().push(slot); - continue; - } - }; + }; match destination_bigtable_clone .upload_confirmed_block(slot, confirmed_block) @@ -609,7 +645,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the target Bigtable instance") + .help("Name of the target Bigtable instance"), ) .arg( Arg::with_name("rpc_bigtable_app_profile_id") @@ -618,7 +654,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Bigtable application profile id to use in requests") + .help("Bigtable application profile id to use in requests"), ) .subcommand( SubCommand::with_name("upload") @@ -648,9 +684,9 @@ impl BigTableSubCommand for App<'_, '_> { .long("force") .takes_value(false) .help( - "Force reupload of any blocks already present in BigTable instance\ - Note: reupload will *not* delete any data from the tx-by-addr table;\ - Use with care.", + "Force reupload of any blocks already present in BigTable \ + instance. Note: reupload will *not* delete any data from the \ + tx-by-addr table; Use with care.", ), ), ) @@ -658,24 +694,25 @@ impl BigTableSubCommand for App<'_, '_> { SubCommand::with_name("delete-slots") .about("Delete ledger information from BigTable") .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to delete"), - ) - .arg( - Arg::with_name("force") - .long("force") - .takes_value(false) - .help( - "Deletions are only performed when the force flag is enabled. \ - If force is not enabled, show stats about what ledger data \ - will be deleted in a real deletion. "), - ), + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to delete"), ) + .arg( + Arg::with_name("force") + .long("force") + .takes_value(false) + .help( + "Deletions are only performed when the force flag is enabled. \ + If force is not enabled, show stats about what ledger data \ + will be deleted in a real deletion. ", + ), + ), + ) .subcommand( SubCommand::with_name("first-available-block") .about("Get the first available block in the storage"), @@ -708,8 +745,10 @@ impl BigTableSubCommand for App<'_, '_> { ) .subcommand( SubCommand::with_name("compare-blocks") - .about("Find the missing confirmed blocks of an owned bigtable for a given range \ - by comparing to a reference bigtable") + .about( + "Find the missing confirmed blocks of an owned bigtable for a given \ + range by comparing to a reference bigtable", + ) .arg( Arg::with_name("starting_slot") .validator(is_slot) @@ -745,7 +784,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Name of the reference Bigtable instance to compare to") + .help("Name of the reference Bigtable instance to compare to"), ) .arg( Arg::with_name("reference_app_profile_id") @@ -753,7 +792,9 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Reference Bigtable application profile id to use in requests") + .help( + "Reference Bigtable application profile id to use in requests", + ), ), ) .subcommand( @@ -769,6 +810,19 @@ impl BigTableSubCommand for App<'_, '_> { .required(true), ), ) + .subcommand( + SubCommand::with_name("entries") + .about("Get the entry data for a block") + .arg( + Arg::with_name("slot") + .long("slot") + .validator(is_slot) + .value_name("SLOT") + .takes_value(true) + .index(1) + .required(true), + ), + ) .subcommand( SubCommand::with_name("confirm") .about("Confirm transaction by signature") @@ -785,8 +839,8 @@ impl BigTableSubCommand for App<'_, '_> { .subcommand( SubCommand::with_name("transaction-history") .about( - "Show historical transactions affecting the given address \ - from newest to oldest", + "Show historical transactions affecting the given address from newest \ + to oldest", ) .arg( Arg::with_name("address") @@ -815,8 +869,8 @@ impl BigTableSubCommand for App<'_, '_> { .default_value("1000") .help( "Number of transaction signatures to query at once. \ - Smaller: more responsive/lower throughput. \ - Larger: less responsive/higher throughput", + Smaller: more responsive/lower throughput. \ + Larger: less responsive/higher throughput", ), ) .arg( @@ -850,7 +904,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_source") .help( - "Source Bigtable credential filepath (credential may be readonly)", + "Source Bigtable credential filepath (credential may be \ + readonly)", ), ) .arg( @@ -859,9 +914,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_SOURCE") .takes_value(true) .conflicts_with("source_credential_path") - .help( - "Source Bigtable emulated source", - ), + .help("Source Bigtable emulated source"), ) .arg( Arg::with_name("source_instance_name") @@ -869,7 +922,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Source Bigtable instance name") + .help("Source Bigtable instance name"), ) .arg( Arg::with_name("source_app_profile_id") @@ -877,7 +930,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("SOURCE_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Source Bigtable app profile id") + .help("Source Bigtable app profile id"), ) .arg( Arg::with_name("destination_credential_path") @@ -886,7 +939,8 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .conflicts_with("emulated_destination") .help( - "Destination Bigtable credential filepath (credential must have Bigtable write permissions)", + "Destination Bigtable credential filepath (credential must \ + have Bigtable write permissions)", ), ) .arg( @@ -895,9 +949,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("EMULATED_DESTINATION") .takes_value(true) .conflicts_with("destination_credential_path") - .help( - "Destination Bigtable emulated destination", - ), + .help("Destination Bigtable emulated destination"), ) .arg( Arg::with_name("destination_instance_name") @@ -905,7 +957,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_INSTANCE_NAME") .default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME) - .help("Destination Bigtable instance name") + .help("Destination Bigtable instance name"), ) .arg( Arg::with_name("destination_app_profile_id") @@ -913,7 +965,7 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .value_name("DESTINATION_APP_PROFILE_ID") .default_value(solana_storage_bigtable::DEFAULT_APP_PROFILE_ID) - .help("Destination Bigtable app profile id") + .help("Destination Bigtable app profile id"), ) .arg( Arg::with_name("starting_slot") @@ -922,9 +974,7 @@ impl BigTableSubCommand for App<'_, '_> { .value_name("START_SLOT") .takes_value(true) .required(true) - .help( - "Start copying at this slot", - ), + .help("Start copying at this slot (inclusive)"), ) .arg( Arg::with_name("ending_slot") @@ -932,26 +982,25 @@ impl BigTableSubCommand for App<'_, '_> { .validator(is_slot) .value_name("END_SLOT") .takes_value(true) - .help("Stop copying at this slot (inclusive, START_SLOT ..= END_SLOT)"), + .help("Stop copying at this slot (inclusive)"), ) .arg( Arg::with_name("force") - .long("force") - .value_name("FORCE") - .takes_value(false) - .help( - "Force copy of blocks already present in destination Bigtable instance", - ), + .long("force") + .value_name("FORCE") + .takes_value(false) + .help( + "Force copy of blocks already present in destination Bigtable \ + instance", + ), ) .arg( Arg::with_name("dry_run") - .long("dry-run") - .value_name("DRY_RUN") - .takes_value(false) - .help( - "Dry run. It won't upload any blocks", - ), - ) + .long("dry-run") + .value_name("DRY_RUN") + .takes_value(false) + .help("Dry run. It won't upload any blocks"), + ), ), ) } @@ -1061,6 +1110,16 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { }; runtime.block_on(block(slot, output_format, config)) } + ("entries", Some(arg_matches)) => { + let slot = value_t_or_exit!(arg_matches, "slot", Slot); + let config = solana_storage_bigtable::LedgerStorageConfig { + read_only: true, + instance_name, + app_profile_id, + ..solana_storage_bigtable::LedgerStorageConfig::default() + }; + runtime.block_on(entries(slot, output_format, config)) + } ("blocks", Some(arg_matches)) => { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 6514312bc5d43d..292aee2e1ee391 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -16,9 +16,7 @@ use { AccessType, BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions, ShredStorageType, }, - blockstore_processor::{ - self, BlockstoreProcessorError, ProcessOptions, TransactionStatusSender, - }, + blockstore_processor::{self, ProcessOptions, TransactionStatusSender}, }, solana_measure::measure, solana_rpc::transaction_status_service::TransactionStatusService, @@ -71,7 +69,7 @@ pub fn load_and_process_ledger( process_options: ProcessOptions, snapshot_archive_path: Option, incremental_snapshot_archive_path: Option, -) -> Result<(Arc>, Option), BlockstoreProcessorError> { +) -> Result<(Arc>, Option), String> { let bank_snapshots_dir = if blockstore.is_primary_access() { blockstore.ledger_path().join("snapshot") } else { @@ -109,8 +107,8 @@ pub fn load_and_process_ledger( }) }; - let start_slot_msg = "The starting slot will be the latest snapshot slot, or genesis if \ - the --no-snapshot flag is specified or if no snapshots are found."; + let start_slot_msg = "The starting slot will be the latest snapshot slot, or genesis if the \ + --no-snapshot flag is specified or if no snapshots are found."; match process_options.halt_at_slot { // Skip the following checks for sentinel values of Some(0) and None. // For Some(0), no slots will be be replayed after starting_slot. @@ -120,7 +118,7 @@ pub fn load_and_process_ledger( if halt_slot < starting_slot { eprintln!( "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the ending slot is less than the starting slot. {start_slot_msg}" + {halt_slot}; the ending slot is less than the starting slot. {start_slot_msg}" ); exit(1); } @@ -128,8 +126,8 @@ pub fn load_and_process_ledger( if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( "Unable to process blockstore from starting slot {starting_slot} to \ - {halt_slot}; the blockstore does not contain a replayable chain between these \ - slots. {start_slot_msg}" + {halt_slot}; the blockstore does not contain a replayable chain between \ + these slots. {start_slot_msg}" ); exit(1); } @@ -245,7 +243,8 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though accounts_update_notifier, exit.clone(), - ); + ) + .map_err(|err| err.to_string())?; let block_verification_method = value_t!( arg_matches, "block_verification_method", @@ -345,7 +344,8 @@ pub fn load_and_process_ledger( None, // Maybe support this later, though &accounts_background_request_sender, ) - .map(|_| (bank_forks, starting_snapshot_hashes)); + .map(|_| (bank_forks, starting_snapshot_hashes)) + .map_err(|err| err.to_string()); exit.store(true, Ordering::Relaxed); accounts_background_service.join().unwrap(); @@ -367,8 +367,8 @@ pub fn open_blockstore( let shred_storage_type = get_shred_storage_type( ledger_path, &format!( - "Shred storage type cannot be inferred for ledger at {ledger_path:?}, \ - using default RocksLevel", + "Shred storage type cannot be inferred for ledger at {ledger_path:?}, using default \ + RocksLevel", ), ); @@ -401,13 +401,13 @@ pub fn open_blockstore( if missing_blockstore && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - is missing at least one critical file: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it is missing at least one \ + critical file: {err:?}" ); } else if missing_column && is_secondary { eprintln!( - "Failed to open blockstore at {ledger_path:?}, it \ - does not have all necessary columns: {err:?}" + "Failed to open blockstore at {ledger_path:?}, it does not have all necessary \ + columns: {err:?}" ); } else { eprintln!("Failed to open blockstore at {ledger_path:?}: {err:?}"); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index a6b34d39d63e76..ac8404edd53dbb 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -244,7 +244,8 @@ fn output_slot( println!(" {meta:?} is_full: {is_full}"); } else { println!( - " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, is_full: {}", + " num_shreds: {}, parent_slot: {:?}, next_slots: {:?}, num_entries: {}, \ + is_full: {}", num_shreds, meta.parent_slot, meta.next_slots, @@ -743,17 +744,23 @@ fn analyze_column< db: &Database, name: &str, ) { + let mut key_len: u64 = 0; let mut key_tot: u64 = 0; let mut val_hist = histogram::Histogram::new(); let mut val_tot: u64 = 0; let mut row_hist = histogram::Histogram::new(); - let a = C::key_size() as u64; - for (_x, y) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { - let b = y.len() as u64; - key_tot += a; - val_hist.increment(b).unwrap(); - val_tot += b; - row_hist.increment(a + b).unwrap(); + for (key, val) in db.iter::(blockstore_db::IteratorMode::Start).unwrap() { + // Key length is fixed, only need to calculate it once + if key_len == 0 { + key_len = C::key(key).len() as u64; + } + let val_len = val.len() as u64; + + key_tot += key_len; + val_hist.increment(val_len).unwrap(); + val_tot += val_len; + + row_hist.increment(key_len + val_len).unwrap(); } let json_result = if val_hist.entries() > 0 { @@ -761,7 +768,7 @@ fn analyze_column< "column":name, "entries":val_hist.entries(), "key_stats":{ - "max":a, + "max":key_len, "total_bytes":key_tot, }, "val_stats":{ @@ -790,7 +797,7 @@ fn analyze_column< "column":name, "entries":val_hist.entries(), "key_stats":{ - "max":a, + "max":key_len, "total_bytes":0, }, "val_stats":{ @@ -874,7 +881,8 @@ fn print_blockstore_file_metadata( for file in live_files { if sst_file_name.is_none() || file.name.eq(sst_file_name.as_ref().unwrap()) { println!( - "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, num_entries: {}", + "[{}] cf_name: {}, level: {}, start_slot: {:?}, end_slot: {:?}, size: {}, \ + num_entries: {}", file.name, file.column_family_name, file.level, @@ -936,7 +944,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> let result = cost_tracker.try_add(&tx_cost); if result.is_err() { println!( - "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason {result:?}", + "Slot: {slot}, CostModel rejected transaction {transaction:?}, reason \ + {result:?}", ); } for (program_id, _instruction) in transaction.message().program_instructions_iter() @@ -947,7 +956,8 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> } println!( - "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs {num_programs}", + "Slot: {slot}, Entries: {num_entries}, Transactions: {num_transactions}, Programs \ + {num_programs}", ); println!(" Programs: {program_ids:?}"); @@ -1005,7 +1015,7 @@ fn get_latest_optimistic_slots( if hash_and_timestamp_opt.is_none() { warn!( "Slot {slot} is an ancestor of latest optimistically confirmed slot \ - {latest_slot}, but was not marked as optimistically confirmed in blockstore." + {latest_slot}, but was not marked as optimistically confirmed in blockstore." ); } (slot, hash_and_timestamp_opt, contains_nonvote_tx) @@ -1111,16 +1121,22 @@ fn main() { .value_name("MEGABYTES") .validator(is_parsable::) .takes_value(true) - .help("How much memory the accounts index can consume. If this is exceeded, some account index entries will be stored on disk."); + .help( + "How much memory the accounts index can consume. If this is exceeded, some account \ + index entries will be stored on disk.", + ); let disable_disk_index = Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") - .help("Disable the disk-based accounts index. It is enabled by default. The entire accounts index will be kept in memory.") + .help( + "Disable the disk-based accounts index. It is enabled by default. The entire accounts \ + index will be kept in memory.", + ) .conflicts_with("accounts_index_memory_limit_mb"); let accountsdb_skip_shrink = Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") .help( - "Enables faster starting of ledger-tool by skipping shrink. \ - This option is for use during testing.", + "Enables faster starting of ledger-tool by skipping shrink. This option is for use \ + during testing.", ); let accountsdb_verify_refcounts = Arg::with_name("accounts_db_verify_refcounts") .long("accounts-db-verify-refcounts") @@ -1128,29 +1144,14 @@ fn main() { "Debug option to scan all AppendVecs and verify account index refcounts prior to clean", ) .hidden(hidden_unless_forced()); - let accounts_db_test_skip_rewrites_but_include_in_bank_hash = Arg::with_name("accounts_db_test_skip_rewrites") - .long("accounts-db-test-skip-rewrites") - .help( - "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", - ) - .hidden(hidden_unless_forced()); - let accounts_filler_count = Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.") - .hidden(hidden_unless_forced()); - let accounts_filler_size = Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .requires("accounts_filler_count") - .help("Size per filler account in bytes.") - .hidden(hidden_unless_forced()); + let accounts_db_test_skip_rewrites_but_include_in_bank_hash = + Arg::with_name("accounts_db_test_skip_rewrites") + .long("accounts-db-test-skip-rewrites") + .help( + "Debug option to skip rewrites for rent-exempt accounts but still add them in \ + bank delta hash calculation", + ) + .hidden(hidden_unless_forced()); let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -1167,9 +1168,8 @@ fn main() { .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. \ - May be specified multiple times. \ - [default: [ledger]/accounts_index]", + "Persistent accounts-index location. May be specified multiple times. [default: \ + [ledger]/accounts_index]", ); let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1198,19 +1198,26 @@ fn main() { ) .hidden(hidden_unless_forced()); let halt_at_slot_store_hash_raw_data = Arg::with_name("halt_at_slot_store_hash_raw_data") - .long("halt-at-slot-store-hash-raw-data") - .help("After halting at slot, run an accounts hash calculation and store the raw hash data for debugging.") - .hidden(hidden_unless_forced()); + .long("halt-at-slot-store-hash-raw-data") + .help( + "After halting at slot, run an accounts hash calculation and store the raw hash data \ + for debugging.", + ) + .hidden(hidden_unless_forced()); let verify_index_arg = Arg::with_name("verify_accounts_index") .long("verify-accounts-index") .takes_value(false) .help("For debugging and tests on accounts index."); - let limit_load_slot_count_from_snapshot_arg = Arg::with_name("limit_load_slot_count_from_snapshot") - .long("limit-load-slot-count-from-snapshot") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("For debugging and profiling with large snapshots, artificially limit how many slots are loaded from a snapshot."); + let limit_load_slot_count_from_snapshot_arg = + Arg::with_name("limit_load_slot_count_from_snapshot") + .long("limit-load-slot-count-from-snapshot") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help( + "For debugging and profiling with large snapshots, artificially limit how many \ + slots are loaded from a snapshot.", + ); let hard_forks_arg = Arg::with_name("hard_forks") .long("hard-fork") .value_name("SLOT") @@ -1234,9 +1241,8 @@ fn main() { .value_name("NUM_HASHES|\"sleep\"") .takes_value(true) .help( - "How many PoH hashes to roll before emitting the next tick. \ - If \"sleep\", for development \ - sleep for the target tick duration instead of hashing", + "How many PoH hashes to roll before emitting the next tick. If \"sleep\", for \ + development sleep for the target tick duration instead of hashing", ); let snapshot_version_arg = Arg::with_name("snapshot_version") .long("snapshot-version") @@ -1257,36 +1263,38 @@ fn main() { .long(use_snapshot_archives_at_startup::cli::LONG_ARG) .takes_value(true) .possible_values(use_snapshot_archives_at_startup::cli::POSSIBLE_VALUES) - .default_value(use_snapshot_archives_at_startup::cli::default_value()) + .default_value(use_snapshot_archives_at_startup::cli::default_value_for_ledger_tool()) .help(use_snapshot_archives_at_startup::cli::HELP) .long_help(use_snapshot_archives_at_startup::cli::LONG_HELP); let default_max_full_snapshot_archives_to_retain = &DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_full_snapshot_archives_to_retain = Arg::with_name( - "maximum_full_snapshots_to_retain", - ) - .long("maximum-full-snapshots-to-retain") - .alias("maximum-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_full_snapshot_archives_to_retain) - .validator(validate_maximum_full_snapshot_archives_to_retain) - .help( - "The maximum number of full snapshot archives to hold on to when purging older snapshots.", - ); + let maximum_full_snapshot_archives_to_retain = + Arg::with_name("maximum_full_snapshots_to_retain") + .long("maximum-full-snapshots-to-retain") + .alias("maximum-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_full_snapshot_archives_to_retain) + .validator(validate_maximum_full_snapshot_archives_to_retain) + .help( + "The maximum number of full snapshot archives to hold on to when purging older \ + snapshots.", + ); let default_max_incremental_snapshot_archives_to_retain = &DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN.to_string(); - let maximum_incremental_snapshot_archives_to_retain = Arg::with_name( - "maximum_incremental_snapshots_to_retain", - ) - .long("maximum-incremental-snapshots-to-retain") - .value_name("NUMBER") - .takes_value(true) - .default_value(default_max_incremental_snapshot_archives_to_retain) - .validator(validate_maximum_incremental_snapshot_archives_to_retain) - .help("The maximum number of incremental snapshot archives to hold on to when purging older snapshots."); + let maximum_incremental_snapshot_archives_to_retain = + Arg::with_name("maximum_incremental_snapshots_to_retain") + .long("maximum-incremental-snapshots-to-retain") + .value_name("NUMBER") + .takes_value(true) + .default_value(default_max_incremental_snapshot_archives_to_retain) + .validator(validate_maximum_incremental_snapshot_archives_to_retain) + .help( + "The maximum number of incremental snapshot archives to hold on to when purging \ + older snapshots.", + ); let geyser_plugin_args = Arg::with_name("geyser_plugin_config") .long("geyser-plugin-config") @@ -1339,27 +1347,30 @@ fn main() { "tolerate_corrupted_tail_records", "absolute_consistency", "point_in_time", - "skip_any_corrupted_record"]) - .help( - "Mode to recovery the ledger db write ahead log" - ), + "skip_any_corrupted_record", + ]) + .help("Mode to recovery the ledger db write ahead log"), ) .arg( Arg::with_name("force_update_to_open") .long("force-update-to-open") .takes_value(false) .global(true) - .help("Allow commands that would otherwise not alter the \ - blockstore to make necessary updates in order to open it"), + .help( + "Allow commands that would otherwise not alter the blockstore to make \ + necessary updates in order to open it", + ), ) .arg( Arg::with_name("ignore_ulimit_nofile_error") .long("ignore-ulimit-nofile-error") .value_name("FORMAT") .global(true) - .help("Allow opening the blockstore to succeed even if the desired open file \ - descriptor limit cannot be configured. Use with caution as some commands may \ - run fine with a reduced file descriptor limit while others will not"), + .help( + "Allow opening the blockstore to succeed even if the desired open file \ + descriptor limit cannot be configured. Use with caution as some commands may \ + run fine with a reduced file descriptor limit while others will not", + ), ) .arg( Arg::with_name("snapshot_archive_path") @@ -1394,8 +1405,10 @@ fn main() { .global(true) .takes_value(true) .possible_values(&["json", "json-compact"]) - .help("Return information in specified output format, \ - currently only available for bigtable and program subcommands"), + .help( + "Return information in specified output format, currently only available for \ + bigtable and program subcommands", + ), ) .arg( Arg::with_name("verbose") @@ -1409,704 +1422,756 @@ fn main() { .bigtable_subcommand() .subcommand( SubCommand::with_name("print") - .about("Print the ledger") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("num_slots") - .long("num-slots") - .value_name("SLOT") - .validator(is_slot) - .takes_value(true) - .help("Number of slots to print"), - ) - .arg( - Arg::with_name("only_rooted") - .long("only-rooted") - .takes_value(false) - .help("Only print root slots"), - ) + .about("Print the ledger") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("num_slots") + .long("num-slots") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .help("Number of slots to print"), + ) + .arg( + Arg::with_name("only_rooted") + .long("only-rooted") + .takes_value(false) + .help("Only print root slots"), + ), ) .subcommand( SubCommand::with_name("copy") - .about("Copy the ledger") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("target_db") - .long("target-db") - .value_name("DIR") - .takes_value(true) - .help("Target db"), - ) + .about("Copy the ledger") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("target_db") + .long("target-db") + .value_name("DIR") + .takes_value(true) + .help("Target db"), + ), ) .subcommand( SubCommand::with_name("slot") - .about("Print the contents of one or more slots") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to print"), - ) - .arg(&allow_dead_slots_arg) + .about("Print the contents of one or more slots") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to print"), + ) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("dead-slots") - .arg(&starting_slot_arg) - .about("Print all the dead slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the dead slots in the ledger"), ) .subcommand( SubCommand::with_name("duplicate-slots") - .arg(&starting_slot_arg) - .about("Print all the duplicate slots in the ledger") + .arg(&starting_slot_arg) + .about("Print all the duplicate slots in the ledger"), ) .subcommand( SubCommand::with_name("set-dead-slot") - .about("Mark one or more slots dead") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark dead"), - ) + .about("Mark one or more slots dead") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark dead"), + ), ) .subcommand( SubCommand::with_name("remove-dead-slot") - .about("Remove the dead flag for a slot") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .takes_value(true) - .multiple(true) - .required(true) - .help("Slots to mark as not dead"), - ) + .about("Remove the dead flag for a slot") + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .takes_value(true) + .multiple(true) + .required(true) + .help("Slots to mark as not dead"), + ), ) .subcommand( SubCommand::with_name("genesis") - .about("Prints the ledger's genesis config") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg( - Arg::with_name("accounts") - .long("accounts") - .takes_value(false) - .help("Print the ledger's genesis accounts"), - ) - .arg( - Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .requires("accounts") - .help("Do not print account data when printing account contents."), - ) - .arg(&accounts_data_encoding_arg) + .about("Prints the ledger's genesis config") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg( + Arg::with_name("accounts") + .long("accounts") + .takes_value(false) + .help("Print the ledger's genesis accounts"), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .requires("accounts") + .help("Do not print account data when printing account contents."), + ) + .arg(&accounts_data_encoding_arg), ) .subcommand( SubCommand::with_name("genesis-hash") - .about("Prints the ledger's genesis hash") - .arg(&max_genesis_archive_unpacked_size_arg) + .about("Prints the ledger's genesis hash") + .arg(&max_genesis_archive_unpacked_size_arg), ) .subcommand( SubCommand::with_name("parse_full_frozen") - .about("Parses log for information about critical events about \ - ancestors of the given `ending_slot`") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) - .arg( - Arg::with_name("log_path") - .long("log-path") - .value_name("PATH") - .takes_value(true) - .help("path to log file to parse"), - ) + .about( + "Parses log for information about critical events about ancestors of the \ + given `ending_slot`", + ) + .arg(&starting_slot_arg) + .arg(&ending_slot_arg) + .arg( + Arg::with_name("log_path") + .long("log-path") + .value_name("PATH") + .takes_value(true) + .help("path to log file to parse"), + ), ) .subcommand( SubCommand::with_name("modify-genesis") - .about("Modifies genesis parameters") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("cluster_type") - .long("cluster-type") - .possible_values(&ClusterType::STRINGS) - .takes_value(true) - .help( - "Selects the features that will be enabled for the cluster" - ), - ) - .arg( - Arg::with_name("output_directory") - .index(1) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the modified genesis config"), - ) + .about("Modifies genesis parameters") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("cluster_type") + .long("cluster-type") + .possible_values(&ClusterType::STRINGS) + .takes_value(true) + .help("Selects the features that will be enabled for the cluster"), + ) + .arg( + Arg::with_name("output_directory") + .index(1) + .value_name("DIR") + .takes_value(true) + .help("Output directory for the modified genesis config"), + ), ) .subcommand( SubCommand::with_name("shred-version") - .about("Prints the ledger's shred hash") - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the ledger's shred hash") + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("shred-meta") - .about("Prints raw shred metadata") - .arg(&starting_slot_arg) - .arg(&ending_slot_arg) + .about("Prints raw shred metadata") + .arg(&starting_slot_arg) + .arg(&ending_slot_arg), ) .subcommand( SubCommand::with_name("bank-hash") - .about("Prints the hash of the working bank after reading the ledger") - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&halt_at_slot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .about("Prints the hash of the working bank after reading the ledger") + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&halt_at_slot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash), ) .subcommand( SubCommand::with_name("bounds") - .about( - "Print lowest and highest non-empty slots. \ - Note that there may be empty slots within the bounds", - ) - .arg( - Arg::with_name("all") - .long("all") - .takes_value(false) - .required(false) - .help("Additionally print all the non-empty slots within the bounds"), - ) + .about( + "Print lowest and highest non-empty slots. Note that there may be empty slots \ + within the bounds", + ) + .arg( + Arg::with_name("all") + .long("all") + .takes_value(false) + .required(false) + .help("Additionally print all the non-empty slots within the bounds"), + ), ) .subcommand( SubCommand::with_name("json") - .about("Print the ledger in JSON format") - .arg(&starting_slot_arg) - .arg(&allow_dead_slots_arg) + .about("Print the ledger in JSON format") + .arg(&starting_slot_arg) + .arg(&allow_dead_slots_arg), ) .subcommand( SubCommand::with_name("verify") - .about("Verify the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_path_arg) - .arg(&halt_at_slot_arg) - .arg(&limit_load_slot_count_from_snapshot_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_skip_shrink) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_filler_count) - .arg(&accounts_filler_size) - .arg(&verify_index_arg) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&ancient_append_vecs) - .arg(&halt_at_slot_store_hash_raw_data) - .arg(&hard_forks_arg) - .arg(&accounts_db_test_hash_calculation_arg) - .arg(&no_os_memory_stats_reporting_arg) - .arg(&allow_dead_slots_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&debug_key_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("skip_poh_verify") - .long("skip-poh-verify") - .takes_value(false) - .help( - "Deprecated, please use --skip-verification.\n\ - Skip ledger PoH and transaction verification." - ), - ) - .arg( - Arg::with_name("skip_verification") - .long("skip-verification") - .takes_value(false) - .help("Skip ledger PoH and transaction verification."), - ) - .arg( - Arg::with_name("enable_rpc_transaction_history") - .long("enable-rpc-transaction-history") - .takes_value(false) - .help("Store transaction info for processed slots into local ledger"), - ) - .arg( - Arg::with_name("run_final_hash_calc") - .long("run-final-accounts-hash-calculation") - .takes_value(false) - .help("After 'verify' completes, run a final accounts hash calculation. Final hash calculation could race with accounts background service tasks and assert."), - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_compare_calculation") - .long("partitioned-epoch-rewards-compare-calculation") - .takes_value(false) - .help("Do normal epoch rewards distribution, but also calculate rewards using the partitioned rewards code path and compare the resulting vote and stake accounts") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") - .long("partitioned-epoch-rewards-force-enable-single-slot") - .takes_value(false) - .help("Force the partitioned rewards distribution, but distribute all rewards in the first slot in the epoch. This should match consensus with the normal rewards distribution.") - .conflicts_with("partitioned_epoch_rewards_compare_calculation") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("print_accounts_stats") - .long("print-accounts-stats") - .takes_value(false) - .help("After verifying the ledger, print some information about the account stores"), - ) - .arg( - Arg::with_name("write_bank_file") - .long("write-bank-file") - .takes_value(false) - .help("After verifying the ledger, write a file that contains the information \ - that went into computing the completed bank's bank hash. The file will be \ - written within /bank_hash_details/"), - ) - ).subcommand( + .about("Verify the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_path_arg) + .arg(&halt_at_slot_arg) + .arg(&limit_load_slot_count_from_snapshot_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_skip_shrink) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&verify_index_arg) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&ancient_append_vecs) + .arg(&halt_at_slot_store_hash_raw_data) + .arg(&hard_forks_arg) + .arg(&accounts_db_test_hash_calculation_arg) + .arg(&no_os_memory_stats_reporting_arg) + .arg(&allow_dead_slots_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&debug_key_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("skip_poh_verify") + .long("skip-poh-verify") + .takes_value(false) + .help( + "Deprecated, please use --skip-verification. Skip ledger PoH and \ + transaction verification.", + ), + ) + .arg( + Arg::with_name("skip_verification") + .long("skip-verification") + .takes_value(false) + .help("Skip ledger PoH and transaction verification."), + ) + .arg( + Arg::with_name("enable_rpc_transaction_history") + .long("enable-rpc-transaction-history") + .takes_value(false) + .help("Store transaction info for processed slots into local ledger"), + ) + .arg( + Arg::with_name("run_final_hash_calc") + .long("run-final-accounts-hash-calculation") + .takes_value(false) + .help( + "After 'verify' completes, run a final accounts hash calculation. \ + Final hash calculation could race with accounts background service \ + tasks and assert.", + ), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_compare_calculation") + .long("partitioned-epoch-rewards-compare-calculation") + .takes_value(false) + .help( + "Do normal epoch rewards distribution, but also calculate rewards \ + using the partitioned rewards code path and compare the resulting \ + vote and stake accounts", + ) + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("partitioned_epoch_rewards_force_enable_single_slot") + .long("partitioned-epoch-rewards-force-enable-single-slot") + .takes_value(false) + .help( + "Force the partitioned rewards distribution, but distribute all \ + rewards in the first slot in the epoch. This should match consensus \ + with the normal rewards distribution.", + ) + .conflicts_with("partitioned_epoch_rewards_compare_calculation") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("print_accounts_stats") + .long("print-accounts-stats") + .takes_value(false) + .help( + "After verifying the ledger, print some information about the account \ + stores", + ), + ) + .arg( + Arg::with_name("write_bank_file") + .long("write-bank-file") + .takes_value(false) + .help( + "After verifying the ledger, write a file that contains the \ + information that went into computing the completed bank's bank hash. \ + The file will be written within /bank_hash_details/", + ), + ), + ) + .subcommand( SubCommand::with_name("graph") - .about("Create a Graphviz rendering of the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_all_votes") - .long("include-all-votes") - .help("Include all votes in the graph"), - ) - .arg( - Arg::with_name("graph_filename") - .index(1) - .value_name("FILENAME") - .takes_value(true) - .help("Output file"), - ) - .arg( - Arg::with_name("vote_account_mode") - .long("vote-account-mode") - .takes_value(true) - .value_name("MODE") - .default_value(default_graph_vote_account_mode.as_ref()) - .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) - .help("Specify if and how to graph vote accounts. Enabling will incur significant rendering overhead, especially `with-history`") - ) - ).subcommand( + .about("Create a Graphviz rendering of the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_all_votes") + .long("include-all-votes") + .help("Include all votes in the graph"), + ) + .arg( + Arg::with_name("graph_filename") + .index(1) + .value_name("FILENAME") + .takes_value(true) + .help("Output file"), + ) + .arg( + Arg::with_name("vote_account_mode") + .long("vote-account-mode") + .takes_value(true) + .value_name("MODE") + .default_value(default_graph_vote_account_mode.as_ref()) + .possible_values(GraphVoteAccountMode::ALL_MODE_STRINGS) + .help( + "Specify if and how to graph vote accounts. Enabling will incur \ + significant rendering overhead, especially `with-history`", + ), + ), + ) + .subcommand( SubCommand::with_name("create-snapshot") - .about("Create a new ledger snapshot") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&accountsdb_skip_shrink) - .arg(&ancient_append_vecs) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&snapshot_version_arg) - .arg(&maximum_full_snapshot_archives_to_retain) - .arg(&maximum_incremental_snapshot_archives_to_retain) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("snapshot_slot") - .index(1) - .value_name("SLOT") - .validator(|value| { - if value.parse::().is_ok() - || value == "ROOT" - { - Ok(()) - } else { - Err(format!( - "Unable to parse as a number or the keyword ROOT, provided: {value}" - )) - } - }) - .takes_value(true) - .help("Slot at which to create the snapshot; accepts keyword ROOT for the highest root"), - ) - .arg( - Arg::with_name("output_directory") - .index(2) - .value_name("DIR") - .takes_value(true) - .help("Output directory for the snapshot [default: --snapshot-archive-path if present else --ledger directory]"), - ) - .arg( - Arg::with_name("warp_slot") - .required(false) - .long("warp-slot") - .takes_value(true) - .value_name("WARP_SLOT") - .validator(is_slot) - .help("After loading the snapshot slot warp the ledger to WARP_SLOT, \ - which could be a slot in a galaxy far far away"), - ) - .arg( - Arg::with_name("faucet_lamports") - .short("t") - .long("faucet-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .requires("faucet_pubkey") - .help("Number of lamports to assign to the faucet"), - ) - .arg( - Arg::with_name("faucet_pubkey") - .short("m") - .long("faucet-pubkey") - .value_name("PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .requires("faucet_lamports") - .help("Path to file containing the faucet's pubkey"), - ) - .arg( - Arg::with_name("bootstrap_validator") - .short("b") - .long("bootstrap-validator") - .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .number_of_values(3) - .multiple(true) - .help("The bootstrap validator's identity, vote and stake pubkeys"), - ) - .arg( - Arg::with_name("bootstrap_stake_authorized_pubkey") - .long("bootstrap-stake-authorized-pubkey") - .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") - .takes_value(true) - .validator(is_pubkey_or_keypair) - .help( - "Path to file containing the pubkey authorized to manage the bootstrap \ - validator's stake [default: --bootstrap-validator IDENTITY_PUBKEY]", - ), - ) - .arg( - Arg::with_name("bootstrap_validator_lamports") - .long("bootstrap-validator-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_lamports) - .help("Number of lamports to assign to the bootstrap validator"), - ) - .arg( - Arg::with_name("bootstrap_validator_stake_lamports") - .long("bootstrap-validator-stake-lamports") - .value_name("LAMPORTS") - .takes_value(true) - .default_value(default_bootstrap_validator_stake_lamports) - .help("Number of lamports to assign to the bootstrap validator's stake account"), - ) - .arg( - Arg::with_name("rent_burn_percentage") - .long("rent-burn-percentage") - .value_name("NUMBER") - .takes_value(true) - .help("Adjust percentage of collected rent to burn") - .validator(is_valid_percentage), - ) - .arg(&hashes_per_tick) - .arg( - Arg::with_name("accounts_to_remove") - .required(false) - .long("remove-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of accounts to remove while creating the snapshot"), - ) - .arg( - Arg::with_name("feature_gates_to_deactivate") - .required(false) - .long("deactivate-feature-gate") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of feature gates to deactivate while creating the snapshot") - ) - .arg( - Arg::with_name("vote_accounts_to_destake") - .required(false) - .long("destake-vote-account") - .takes_value(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .multiple(true) - .help("List of validator vote accounts to destake") - ) - .arg( - Arg::with_name("remove_stake_accounts") - .required(false) - .long("remove-stake-accounts") - .takes_value(false) - .help("Remove all existing stake accounts from the new snapshot") - ) - .arg( - Arg::with_name("incremental") - .long("incremental") - .takes_value(false) - .help("Create an incremental snapshot instead of a full snapshot. This requires \ - that the ledger is loaded from a full snapshot, which will be used as the \ - base for the incremental snapshot.") - .conflicts_with("no_snapshot") - ) - .arg( - Arg::with_name("minimized") - .long("minimized") - .takes_value(false) - .help("Create a minimized snapshot instead of a full snapshot. This snapshot \ - will only include information needed to replay the ledger from the \ - snapshot slot to the ending slot.") - .conflicts_with("incremental") - .requires("ending_slot") - ) - .arg( - Arg::with_name("ending_slot") - .long("ending-slot") - .takes_value(true) - .value_name("ENDING_SLOT") - .help("Ending slot for minimized snapshot creation") - ) - .arg( - Arg::with_name("snapshot_archive_format") - .long("snapshot-archive-format") - .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) - .default_value(DEFAULT_ARCHIVE_COMPRESSION) - .value_name("ARCHIVE_TYPE") - .takes_value(true) - .help("Snapshot archive format to use.") - .conflicts_with("no_snapshot") - ) - ).subcommand( + .about("Create a new ledger snapshot") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&accountsdb_skip_shrink) + .arg(&ancient_append_vecs) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&snapshot_version_arg) + .arg(&maximum_full_snapshot_archives_to_retain) + .arg(&maximum_incremental_snapshot_archives_to_retain) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("snapshot_slot") + .index(1) + .value_name("SLOT") + .validator(|value| { + if value.parse::().is_ok() || value == "ROOT" { + Ok(()) + } else { + Err(format!( + "Unable to parse as a number or the keyword ROOT, provided: \ + {value}" + )) + } + }) + .takes_value(true) + .help( + "Slot at which to create the snapshot; accepts keyword ROOT for the \ + highest root", + ), + ) + .arg( + Arg::with_name("output_directory") + .index(2) + .value_name("DIR") + .takes_value(true) + .help( + "Output directory for the snapshot \ + [default: --snapshot-archive-path if present else --ledger directory]", + ), + ) + .arg( + Arg::with_name("warp_slot") + .required(false) + .long("warp-slot") + .takes_value(true) + .value_name("WARP_SLOT") + .validator(is_slot) + .help( + "After loading the snapshot slot warp the ledger to WARP_SLOT, which \ + could be a slot in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("faucet_lamports") + .short("t") + .long("faucet-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .requires("faucet_pubkey") + .help("Number of lamports to assign to the faucet"), + ) + .arg( + Arg::with_name("faucet_pubkey") + .short("m") + .long("faucet-pubkey") + .value_name("PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .requires("faucet_lamports") + .help("Path to file containing the faucet's pubkey"), + ) + .arg( + Arg::with_name("bootstrap_validator") + .short("b") + .long("bootstrap-validator") + .value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .number_of_values(3) + .multiple(true) + .help("The bootstrap validator's identity, vote and stake pubkeys"), + ) + .arg( + Arg::with_name("bootstrap_stake_authorized_pubkey") + .long("bootstrap-stake-authorized-pubkey") + .value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY") + .takes_value(true) + .validator(is_pubkey_or_keypair) + .help( + "Path to file containing the pubkey authorized to manage the \ + bootstrap validator's stake + [default: --bootstrap-validator IDENTITY_PUBKEY]", + ), + ) + .arg( + Arg::with_name("bootstrap_validator_lamports") + .long("bootstrap-validator-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_lamports) + .help("Number of lamports to assign to the bootstrap validator"), + ) + .arg( + Arg::with_name("bootstrap_validator_stake_lamports") + .long("bootstrap-validator-stake-lamports") + .value_name("LAMPORTS") + .takes_value(true) + .default_value(default_bootstrap_validator_stake_lamports) + .help( + "Number of lamports to assign to the bootstrap validator's stake \ + account", + ), + ) + .arg( + Arg::with_name("rent_burn_percentage") + .long("rent-burn-percentage") + .value_name("NUMBER") + .takes_value(true) + .help("Adjust percentage of collected rent to burn") + .validator(is_valid_percentage), + ) + .arg(&hashes_per_tick) + .arg( + Arg::with_name("accounts_to_remove") + .required(false) + .long("remove-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of accounts to remove while creating the snapshot"), + ) + .arg( + Arg::with_name("feature_gates_to_deactivate") + .required(false) + .long("deactivate-feature-gate") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of feature gates to deactivate while creating the snapshot"), + ) + .arg( + Arg::with_name("vote_accounts_to_destake") + .required(false) + .long("destake-vote-account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("List of validator vote accounts to destake"), + ) + .arg( + Arg::with_name("remove_stake_accounts") + .required(false) + .long("remove-stake-accounts") + .takes_value(false) + .help("Remove all existing stake accounts from the new snapshot"), + ) + .arg( + Arg::with_name("incremental") + .long("incremental") + .takes_value(false) + .help( + "Create an incremental snapshot instead of a full snapshot. This \ + requires that the ledger is loaded from a full snapshot, which will \ + be used as the base for the incremental snapshot.", + ) + .conflicts_with("no_snapshot"), + ) + .arg( + Arg::with_name("minimized") + .long("minimized") + .takes_value(false) + .help( + "Create a minimized snapshot instead of a full snapshot. This \ + snapshot will only include information needed to replay the ledger \ + from the snapshot slot to the ending slot.", + ) + .conflicts_with("incremental") + .requires("ending_slot"), + ) + .arg( + Arg::with_name("ending_slot") + .long("ending-slot") + .takes_value(true) + .value_name("ENDING_SLOT") + .help("Ending slot for minimized snapshot creation"), + ) + .arg( + Arg::with_name("snapshot_archive_format") + .long("snapshot-archive-format") + .possible_values(SUPPORTED_ARCHIVE_COMPRESSION) + .default_value(DEFAULT_ARCHIVE_COMPRESSION) + .value_name("ARCHIVE_TYPE") + .takes_value(true) + .help("Snapshot archive format to use.") + .conflicts_with("no_snapshot"), + ), + ) + .subcommand( SubCommand::with_name("accounts") - .about("Print account stats and contents after processing the ledger") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&geyser_plugin_args) - .arg(&accounts_data_encoding_arg) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("include_sysvars") - .long("include-sysvars") - .takes_value(false) - .help("Include sysvars too"), - ) - .arg( - Arg::with_name("no_account_contents") - .long("no-account-contents") - .takes_value(false) - .help("Do not print contents of each account, which is very slow with lots of accounts."), - ) - .arg(Arg::with_name("no_account_data") - .long("no-account-data") - .takes_value(false) - .help("Do not print account data when printing account contents."), - ) - .arg(&max_genesis_archive_unpacked_size_arg) - ).subcommand( + .about("Print account stats and contents after processing the ledger") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&geyser_plugin_args) + .arg(&accounts_data_encoding_arg) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("include_sysvars") + .long("include-sysvars") + .takes_value(false) + .help("Include sysvars too"), + ) + .arg( + Arg::with_name("no_account_contents") + .long("no-account-contents") + .takes_value(false) + .help( + "Do not print contents of each account, which is very slow with lots \ + of accounts.", + ), + ) + .arg( + Arg::with_name("no_account_data") + .long("no-account-data") + .takes_value(false) + .help("Do not print account data when printing account contents."), + ) + .arg(&max_genesis_archive_unpacked_size_arg), + ) + .subcommand( SubCommand::with_name("capitalization") - .about("Print capitalization (aka, total supply) while checksumming it") - .arg(&no_snapshot_arg) - .arg(&account_paths_arg) - .arg(&accounts_hash_cache_path_arg) - .arg(&accounts_index_bins) - .arg(&accounts_index_limit) - .arg(&disable_disk_index) - .arg(&accountsdb_verify_refcounts) - .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_db_skip_initial_hash_calc_arg) - .arg(&halt_at_slot_arg) - .arg(&hard_forks_arg) - .arg(&max_genesis_archive_unpacked_size_arg) - .arg(&geyser_plugin_args) - .arg(&use_snapshot_archives_at_startup) - .arg( - Arg::with_name("warp_epoch") - .required(false) - .long("warp-epoch") - .takes_value(true) - .value_name("WARP_EPOCH") - .help("After loading the snapshot warp the ledger to WARP_EPOCH, \ - which could be an epoch in a galaxy far far away"), - ) - .arg( - Arg::with_name("inflation") - .required(false) - .long("inflation") - .takes_value(true) - .possible_values(&["pico", "full", "none"]) - .help("Overwrite inflation when warping"), - ) - .arg( - Arg::with_name("enable_credits_auto_rewind") - .required(false) - .long("enable-credits-auto-rewind") - .takes_value(false) - .help("Enable credits auto rewind"), - ) - .arg( - Arg::with_name("recalculate_capitalization") - .required(false) - .long("recalculate-capitalization") - .takes_value(false) - .help("Recalculate capitalization before warping; circumvents \ - bank's out-of-sync capitalization"), - ) - .arg( - Arg::with_name("csv_filename") - .long("csv-filename") - .value_name("FILENAME") - .takes_value(true) - .help("Output file in the csv format"), - ) - ).subcommand( + .about("Print capitalization (aka, total supply) while checksumming it") + .arg(&no_snapshot_arg) + .arg(&account_paths_arg) + .arg(&accounts_hash_cache_path_arg) + .arg(&accounts_index_bins) + .arg(&accounts_index_limit) + .arg(&disable_disk_index) + .arg(&accountsdb_verify_refcounts) + .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_skip_initial_hash_calc_arg) + .arg(&halt_at_slot_arg) + .arg(&hard_forks_arg) + .arg(&max_genesis_archive_unpacked_size_arg) + .arg(&geyser_plugin_args) + .arg(&use_snapshot_archives_at_startup) + .arg( + Arg::with_name("warp_epoch") + .required(false) + .long("warp-epoch") + .takes_value(true) + .value_name("WARP_EPOCH") + .help( + "After loading the snapshot warp the ledger to WARP_EPOCH, which \ + could be an epoch in a galaxy far far away", + ), + ) + .arg( + Arg::with_name("inflation") + .required(false) + .long("inflation") + .takes_value(true) + .possible_values(&["pico", "full", "none"]) + .help("Overwrite inflation when warping"), + ) + .arg( + Arg::with_name("enable_credits_auto_rewind") + .required(false) + .long("enable-credits-auto-rewind") + .takes_value(false) + .help("Enable credits auto rewind"), + ) + .arg( + Arg::with_name("recalculate_capitalization") + .required(false) + .long("recalculate-capitalization") + .takes_value(false) + .help( + "Recalculate capitalization before warping; circumvents bank's \ + out-of-sync capitalization", + ), + ) + .arg( + Arg::with_name("csv_filename") + .long("csv-filename") + .value_name("FILENAME") + .takes_value(true) + .help("Output file in the csv format"), + ), + ) + .subcommand( SubCommand::with_name("purge") - .about("Delete a range of slots from the ledger") - .arg( - Arg::with_name("start_slot") - .index(1) - .value_name("SLOT") - .takes_value(true) - .required(true) - .help("Start slot to purge from (inclusive)"), - ) - .arg( - Arg::with_name("end_slot") - .index(2) - .value_name("SLOT") - .help("Ending slot to stop purging (inclusive) \ - [default: the highest slot in the ledger]"), - ) - .arg( - Arg::with_name("batch_size") - .long("batch-size") - .value_name("NUM") - .takes_value(true) - .default_value("1000") - .help("Removes at most BATCH_SIZE slots while purging in loop"), - ) - .arg( - Arg::with_name("no_compaction") - .long("no-compaction") - .required(false) - .takes_value(false) - .help("--no-compaction is deprecated, ledger compaction \ - after purge is disabled by default") - .conflicts_with("enable_compaction") - .hidden(hidden_unless_forced()) - ) - .arg( - Arg::with_name("enable_compaction") - .long("enable-compaction") - .required(false) - .takes_value(false) - .help("Perform ledger compaction after purge. Compaction \ - will optimize storage space, but may take a long \ - time to complete.") - .conflicts_with("no_compaction") - ) - .arg( - Arg::with_name("dead_slots_only") - .long("dead-slots-only") - .required(false) - .takes_value(false) - .help("Limit purging to dead slots only") - ) + .about("Delete a range of slots from the ledger") + .arg( + Arg::with_name("start_slot") + .index(1) + .value_name("SLOT") + .takes_value(true) + .required(true) + .help("Start slot to purge from (inclusive)"), + ) + .arg(Arg::with_name("end_slot").index(2).value_name("SLOT").help( + "Ending slot to stop purging (inclusive) \ + [default: the highest slot in the ledger]", + )) + .arg( + Arg::with_name("batch_size") + .long("batch-size") + .value_name("NUM") + .takes_value(true) + .default_value("1000") + .help("Removes at most BATCH_SIZE slots while purging in loop"), + ) + .arg( + Arg::with_name("no_compaction") + .long("no-compaction") + .required(false) + .takes_value(false) + .help( + "--no-compaction is deprecated, ledger compaction after purge is \ + disabled by default", + ) + .conflicts_with("enable_compaction") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("enable_compaction") + .long("enable-compaction") + .required(false) + .takes_value(false) + .help( + "Perform ledger compaction after purge. Compaction will optimize \ + storage space, but may take a long time to complete.", + ) + .conflicts_with("no_compaction"), + ) + .arg( + Arg::with_name("dead_slots_only") + .long("dead-slots-only") + .required(false) + .takes_value(false) + .help("Limit purging to dead slots only"), + ), ) .subcommand( SubCommand::with_name("list-roots") - .about("Output up to last root hashes and their \ - heights starting at the given block height") - .arg( - Arg::with_name("max_height") - .long("max-height") - .value_name("NUM") - .takes_value(true) - .help("Maximum block height") - ) - .arg( - Arg::with_name("start_root") - .long("start-root") - .value_name("NUM") - .takes_value(true) - .help("First root to start searching from") - ) - .arg( - Arg::with_name("slot_list") - .long("slot-list") - .value_name("FILENAME") - .required(false) - .takes_value(true) - .help("The location of the output YAML file. A list of \ - rollback slot heights and hashes will be written to the file") - ) - .arg( - Arg::with_name("num_roots") - .long("num-roots") - .value_name("NUM") - .takes_value(true) - .default_value(DEFAULT_ROOT_COUNT) - .required(false) - .help("Number of roots in the output"), - ) + .about( + "Output up to last root hashes and their heights starting at the \ + given block height", + ) + .arg( + Arg::with_name("max_height") + .long("max-height") + .value_name("NUM") + .takes_value(true) + .help("Maximum block height"), + ) + .arg( + Arg::with_name("start_root") + .long("start-root") + .value_name("NUM") + .takes_value(true) + .help("First root to start searching from"), + ) + .arg( + Arg::with_name("slot_list") + .long("slot-list") + .value_name("FILENAME") + .required(false) + .takes_value(true) + .help( + "The location of the output YAML file. A list of rollback slot \ + heights and hashes will be written to the file", + ), + ) + .arg( + Arg::with_name("num_roots") + .long("num-roots") + .value_name("NUM") + .takes_value(true) + .default_value(DEFAULT_ROOT_COUNT) + .required(false) + .help("Number of roots in the output"), + ), ) .subcommand( SubCommand::with_name("latest-optimistic-slots") - .about("Output up to the most recent optimistic \ - slots with their hashes and timestamps.") + .about( + "Output up to the most recent optimistic slots with their hashes \ + and timestamps.", + ) .arg( Arg::with_name("num_slots") .long("num-slots") @@ -2121,25 +2186,27 @@ fn main() { .long("exclude-vote-only-slots") .required(false) .help("Exclude slots that contain only votes from output"), - ) + ), ) .subcommand( SubCommand::with_name("repair-roots") - .about("Traverses the AncestorIterator backward from a last known root \ - to restore missing roots to the Root column") + .about( + "Traverses the AncestorIterator backward from a last known root to restore \ + missing roots to the Root column", + ) .arg( Arg::with_name("start_root") .long("before") .value_name("NUM") .takes_value(true) - .help("Recent root after the range to repair") + .help("Recent root after the range to repair"), ) .arg( Arg::with_name("end_root") .long("until") .value_name("NUM") .takes_value(true) - .help("Earliest slot to check for root repair") + .help("Earliest slot to check for root repair"), ) .arg( Arg::with_name("max_slots") @@ -2148,40 +2215,47 @@ fn main() { .takes_value(true) .default_value(DEFAULT_MAX_SLOTS_ROOT_REPAIR) .required(true) - .help("Override the maximum number of slots to check for root repair") - ) - ) - .subcommand( - SubCommand::with_name("analyze-storage") - .about("Output statistics in JSON format about \ - all column families in the ledger rocksdb") + .help("Override the maximum number of slots to check for root repair"), + ), ) + .subcommand(SubCommand::with_name("analyze-storage").about( + "Output statistics in JSON format about all column families in the ledger rocksdb", + )) .subcommand( SubCommand::with_name("compute-slot-cost") - .about("runs cost_model over the block at the given slots, \ - computes how expensive a block was based on cost_model") - .arg( - Arg::with_name("slots") - .index(1) - .value_name("SLOTS") - .validator(is_slot) - .multiple(true) - .takes_value(true) - .help("Slots that their blocks are computed for cost, default to all slots in ledger"), - ) + .about( + "runs cost_model over the block at the given slots, computes how expensive a \ + block was based on cost_model", + ) + .arg( + Arg::with_name("slots") + .index(1) + .value_name("SLOTS") + .validator(is_slot) + .multiple(true) + .takes_value(true) + .help( + "Slots that their blocks are computed for cost, default to all slots \ + in ledger", + ), + ), ) .subcommand( SubCommand::with_name("print-file-metadata") - .about("Print the metadata of the specified ledger-store file. \ - If no file name is specified, it will print the metadata of all ledger files.") - .arg( - Arg::with_name("file_name") - .long("file-name") - .takes_value(true) - .value_name("SST_FILE_NAME") - .help("The ledger file name (e.g. 011080.sst.) \ - If no file name is specified, it will print the metadata of all ledger files.") - ) + .about( + "Print the metadata of the specified ledger-store file. If no file name is \ + specified, it will print the metadata of all ledger files.", + ) + .arg( + Arg::with_name("file_name") + .long("file-name") + .takes_value(true) + .value_name("SST_FILE_NAME") + .help( + "The ledger file name (e.g. 011080.sst.) If no file name is \ + specified, it will print the metadata of all ledger files.", + ), + ), ) .program_subcommand() .get_matches(); @@ -2255,11 +2329,10 @@ fn main() { let _ = get_shred_storage_type( &target_db, &format!( - "No --target-db ledger at {:?} was detected, default \ - compaction (RocksLevel) will be used. Fifo compaction \ - can be enabled for a new ledger by manually creating \ - {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory within \ - the specified --target_db directory.", + "No --target-db ledger at {:?} was detected, default compaction \ + (RocksLevel) will be used. Fifo compaction can be enabled for a new \ + ledger by manually creating {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory \ + within the specified --target_db directory.", &target_db ), ); @@ -2708,7 +2781,11 @@ fn main() { } if write_bank_file { let working_bank = bank_forks.read().unwrap().working_bank(); - let _ = bank_hash_details::write_bank_hash_details_file(&working_bank); + bank_hash_details::write_bank_hash_details_file(&working_bank) + .map_err(|err| { + warn!("Unable to write bank hash_details file: {err}"); + }) + .ok(); } exit_signal.store(true, Ordering::Relaxed); system_monitor_service.join().unwrap(); @@ -2812,8 +2889,8 @@ fn main() { let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); if bootstrap_validator_stake_lamports < minimum_stake_lamports { eprintln!( - "Error: insufficient --bootstrap-validator-stake-lamports. \ - Minimum amount is {minimum_stake_lamports}" + "Error: insufficient --bootstrap-validator-stake-lamports. Minimum amount \ + is {minimum_stake_lamports}" ); exit(1); } @@ -2893,7 +2970,8 @@ fn main() { .is_none() { eprintln!( - "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is not full.", + "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is \ + not full.", ); exit(1); } @@ -2903,7 +2981,8 @@ fn main() { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { eprintln!( - "Error: ending_slot ({ending_slot}) must be greater than snapshot_slot ({snapshot_slot})" + "Error: ending_slot ({ending_slot}) must be greater than \ + snapshot_slot ({snapshot_slot})" ); exit(1); } @@ -3012,7 +3091,8 @@ fn main() { for address in feature_gates_to_deactivate { let mut account = bank.get_account(&address).unwrap_or_else(|| { eprintln!( - "Error: Feature-gate account does not exist, unable to deactivate it: {address}" + "Error: Feature-gate account does not exist, unable to \ + deactivate it: {address}" ); exit(1); }); @@ -3142,7 +3222,8 @@ fn main() { if let Some(warp_slot) = warp_slot { if warp_slot < minimum_warp_slot { eprintln!( - "Error: --warp-slot too close. Must be >= {minimum_warp_slot}" + "Error: --warp-slot too close. Must be >= \ + {minimum_warp_slot}" ); exit(1); } @@ -3195,13 +3276,17 @@ fn main() { if is_incremental { if starting_snapshot_hashes.is_none() { - eprintln!("Unable to create incremental snapshot without a base full snapshot"); + eprintln!( + "Unable to create incremental snapshot without a base full \ + snapshot" + ); exit(1); } let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; if bank.slot() <= full_snapshot_slot { eprintln!( - "Unable to create incremental snapshot: Slot must be greater than full snapshot slot. slot: {}, full snapshot slot: {}", + "Unable to create incremental snapshot: Slot must be greater \ + than full snapshot slot. slot: {}, full snapshot slot: {}", bank.slot(), full_snapshot_slot, ); @@ -3226,7 +3311,8 @@ fn main() { }); println!( - "Successfully created incremental snapshot for slot {}, hash {}, base slot: {}: {}", + "Successfully created incremental snapshot for slot {}, hash {}, \ + base slot: {}: {}", bank.slot(), bank.hash(), full_snapshot_slot, @@ -3261,12 +3347,23 @@ fn main() { let ending_epoch = bank.epoch_schedule().get_epoch(ending_slot.unwrap()); if starting_epoch != ending_epoch { - warn!("Minimized snapshot range crosses epoch boundary ({} to {}). Bank hashes after {} will not match replays from a full snapshot", - starting_epoch, ending_epoch, bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch)); + warn!( + "Minimized snapshot range crosses epoch boundary ({} to \ + {}). Bank hashes after {} will not match replays from a \ + full snapshot", + starting_epoch, + ending_epoch, + bank.epoch_schedule() + .get_last_slot_in_epoch(starting_epoch) + ); } if minimize_snapshot_possibly_incomplete { - warn!("Minimized snapshot may be incomplete due to missing accounts from CPI'd address lookup table extensions. This may lead to mismatched bank hashes while replaying."); + warn!( + "Minimized snapshot may be incomplete due to missing \ + accounts from CPI'd address lookup table extensions. \ + This may lead to mismatched bank hashes while replaying." + ); } } } @@ -3519,8 +3616,9 @@ fn main() { let old_cap = base_bank.set_capitalization(); let new_cap = base_bank.capitalization(); warn!( - "Skewing capitalization a bit to enable credits_auto_rewind as \ - requested: increasing {} from {} to {}", + "Skewing capitalization a bit to enable \ + credits_auto_rewind as requested: increasing {} from {} \ + to {}", feature_account_balance, old_cap, new_cap, ); assert_eq!( @@ -3954,13 +4052,14 @@ fn main() { exit(1); } info!( - "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot only: {})", - start_slot, - end_slot, - end_slot - start_slot, - perform_compaction, - dead_slots_only, - ); + "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot \ + only: {})", + start_slot, + end_slot, + end_slot - start_slot, + perform_compaction, + dead_slots_only, + ); let purge_from_blockstore = |start_slot, end_slot| { blockstore.purge_from_next_slots(start_slot, end_slot); if perform_compaction { @@ -4093,9 +4192,8 @@ fn main() { let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked if arg_matches.is_present("end_root") && num_slots > max_slots { eprintln!( - "Requested range {num_slots} too large, max {max_slots}. \ - Either adjust `--until` value, or pass a larger `--repair-limit` \ - to override the limit", + "Requested range {num_slots} too large, max {max_slots}. Either adjust \ + `--until` value, or pass a larger `--repair-limit` to override the limit", ); exit(1); } diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 46c2a62f1bfb13..2c0db44372e1b2 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -1,7 +1,9 @@ use { - serde::Serialize, + serde::{Deserialize, Serialize}, solana_cli_output::{QuietDisplay, VerboseDisplay}, - std::fmt::{Display, Formatter, Result}, + solana_sdk::clock::Slot, + solana_transaction_status::EntrySummary, + std::fmt::{self, Display, Formatter, Result}, }; #[derive(Serialize, Debug, Default)] @@ -67,3 +69,52 @@ impl Display for SlotBounds<'_> { Ok(()) } } + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliEntries { + pub entries: Vec, + #[serde(skip_serializing)] + pub slot: Slot, +} + +impl QuietDisplay for CliEntries {} +impl VerboseDisplay for CliEntries {} + +impl fmt::Display for CliEntries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "Slot {}", self.slot)?; + for (i, entry) in self.entries.iter().enumerate() { + writeln!( + f, + " Entry {} - num_hashes: {}, hash: {}, transactions: {}, starting_transaction_index: {}", + i, + entry.num_hashes, + entry.hash, + entry.num_transactions, + entry.starting_transaction_index, + )?; + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliEntry { + num_hashes: u64, + hash: String, + num_transactions: u64, + starting_transaction_index: usize, +} + +impl From for CliEntry { + fn from(entry_summary: EntrySummary) -> Self { + Self { + num_hashes: entry_summary.num_hashes, + hash: entry_summary.hash.to_string(), + num_transactions: entry_summary.num_transactions, + starting_transaction_index: entry_summary.starting_transaction_index, + } + } +} diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index bc44841f818ba0..616dcabbc431f5 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -143,8 +143,8 @@ impl ProgramSubCommand for App<'_, '_> { fn program_subcommand(self) -> Self { let program_arg = Arg::with_name("PROGRAM") .help( - "Program file to use. This is either an ELF shared-object file to be executed, \ - or an assembly file to be assembled and executed.", + "Program file to use. This is either an ELF shared-object file to be executed, or \ + an assembly file to be assembled and executed.", ) .required(true) .index(1); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index c75380581fc16d..993f6d2c2f7645 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -25,18 +25,50 @@ use { solana_sdk::genesis_config::GenesisConfig, std::{ path::PathBuf, - process, result, + result, sync::{atomic::AtomicBool, Arc, RwLock}, }, + thiserror::Error, }; +#[derive(Error, Debug)] +pub enum BankForksUtilsError { + #[error("accounts path(s) not present when booting from snapshot")] + AccountPathsNotPresent, + + #[error( + "failed to load bank: {source}, full snapshot archive: {full_snapshot_archive}, \ + incremental snapshot archive: {incremental_snapshot_archive}" + )] + BankFromSnapshotsArchive { + source: snapshot_utils::SnapshotError, + full_snapshot_archive: String, + incremental_snapshot_archive: String, + }, + + #[error( + "there is no local state to startup from. \ + Ensure --{flag} is NOT set to \"{value}\" and restart" + )] + NoBankSnapshotDirectory { flag: String, value: String }, + + #[error("failed to load bank: {source}, snapshot: {path}")] + BankFromSnapshotsDirectory { + source: snapshot_utils::SnapshotError, + path: PathBuf, + }, + + #[error("failed to process blockstore from root: {0}")] + ProcessBlockstoreFromRoot(#[source] BlockstoreProcessorError), +} + pub type LoadResult = result::Result< ( Arc>, LeaderScheduleCache, Option, ), - BlockstoreProcessorError, + BankForksUtilsError, >; /// Load the banks via genesis or a snapshot then processes all full blocks in blockstore @@ -68,8 +100,7 @@ pub fn load( entry_notification_sender, accounts_update_notifier, exit, - ); - + )?; blockstore_processor::process_blockstore_from_root( blockstore, &bank_forks, @@ -80,7 +111,9 @@ pub fn load( entry_notification_sender, &AbsRequestSender::default(), ) - .map(|_| (bank_forks, leader_schedule_cache, starting_snapshot_hashes)) + .map_err(BankForksUtilsError::ProcessBlockstoreFromRoot)?; + + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -95,11 +128,7 @@ pub fn load_bank_forks( entry_notification_sender: Option<&EntryNotifierSender>, accounts_update_notifier: Option, exit: Arc, -) -> ( - Arc>, - LeaderScheduleCache, - Option, -) { +) -> LoadResult { fn get_snapshots_to_load( snapshot_config: Option<&SnapshotConfig>, ) -> Option<( @@ -157,18 +186,9 @@ pub fn load_bank_forks( process_options, accounts_update_notifier, exit, - ); + )?; (bank_forks, Some(starting_snapshot_hashes)) } else { - let maybe_filler_accounts = process_options - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count > 0); - - if let Some(true) = maybe_filler_accounts { - panic!("filler accounts specified, but not loading from snapshot"); - } - info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( genesis_config, @@ -202,7 +222,7 @@ pub fn load_bank_forks( .for_each(|hard_fork_slot| root_bank.register_hard_fork(*hard_fork_slot)); } - (bank_forks, leader_schedule_cache, starting_snapshot_hashes) + Ok((bank_forks, leader_schedule_cache, starting_snapshot_hashes)) } #[allow(clippy::too_many_arguments)] @@ -216,11 +236,10 @@ fn bank_forks_from_snapshot( process_options: &ProcessOptions, accounts_update_notifier: Option, exit: Arc, -) -> (Arc>, StartingSnapshotHashes) { +) -> Result<(Arc>, StartingSnapshotHashes), BankForksUtilsError> { // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { - error!("Account paths not present when booting from snapshot"); - process::exit(1); + return Err(BankForksUtilsError::AccountPathsNotPresent); } let latest_snapshot_archive_slot = std::cmp::max( @@ -270,29 +289,21 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .unwrap_or_else(|err| { - error!( - "Failed to load bank: {err} \ - \nfull snapshot archive: {} \ - \nincremental snapshot archive: {}", - full_snapshot_archive_info.path().display(), - incremental_snapshot_archive_info - .as_ref() - .map(|archive| archive.path().display().to_string()) - .unwrap_or("none".to_string()), - ); - process::exit(1); - }); + .map_err(|err| BankForksUtilsError::BankFromSnapshotsArchive { + source: err, + full_snapshot_archive: full_snapshot_archive_info.path().display().to_string(), + incremental_snapshot_archive: incremental_snapshot_archive_info + .as_ref() + .map(|archive| archive.path().display().to_string()) + .unwrap_or("none".to_string()), + })?; bank } else { - let Some(bank_snapshot) = latest_bank_snapshot else { - error!( - "There is no local state to startup from. Ensure --{} is *not* set to \"{}\" and restart.", - use_snapshot_archives_at_startup::cli::LONG_ARG, - UseSnapshotArchivesAtStartup::Never.to_string(), - ); - process::exit(1); - }; + let bank_snapshot = + latest_bank_snapshot.ok_or_else(|| BankForksUtilsError::NoBankSnapshotDirectory { + flag: use_snapshot_archives_at_startup::cli::LONG_ARG.to_string(), + value: UseSnapshotArchivesAtStartup::Never.to_string(), + })?; // If a newer snapshot archive was downloaded, it is possible that its slot is // higher than the local bank we will load. Did the user intend for this? @@ -327,14 +338,10 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .unwrap_or_else(|err| { - error!( - "Failed to load bank: {err} \ - \nsnapshot: {}", - bank_snapshot.snapshot_path().display(), - ); - process::exit(1); - }); + .map_err(|err| BankForksUtilsError::BankFromSnapshotsDirectory { + source: err, + path: bank_snapshot.snapshot_path(), + })?; bank }; @@ -358,5 +365,5 @@ fn bank_forks_from_snapshot( incremental: incremental_snapshot_hash, }; - (BankForks::new_rw_arc(bank), starting_snapshot_hashes) + Ok((BankForks::new_rw_arc(bank), starting_snapshot_hashes)) } diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index be28ee8a0703d8..2a076d46be7aca 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -178,10 +178,10 @@ pub async fn upload_confirmed_blocks( break; } - let _ = match blockstore.get_rooted_block(slot, true) { - Ok(confirmed_block) => { + let _ = match blockstore.get_rooted_block_with_entries(slot, true) { + Ok(confirmed_block_with_entries) => { num_blocks_read += 1; - sender.send((slot, Some(confirmed_block))) + sender.send((slot, Some(confirmed_block_with_entries))) } Err(err) => { warn!( @@ -227,7 +227,8 @@ pub async fn upload_confirmed_blocks( Some(confirmed_block) => { let bt = bigtable.clone(); Some(tokio::spawn(async move { - bt.upload_confirmed_block(slot, confirmed_block).await + bt.upload_confirmed_block_with_entries(slot, confirmed_block) + .await })) } }); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index ae27fa56c3db30..3e7c8b9286fea3 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -464,7 +464,12 @@ impl Blockstore { } fn erasure_meta(&self, erasure_set: ErasureSetId) -> Result> { - self.erasure_meta_cf.get(erasure_set.store_key()) + let (slot, fec_set_index) = erasure_set.store_key(); + self.erasure_meta_cf.get((slot, u64::from(fec_set_index))) + } + + fn merkle_root_meta(&self, erasure_set: ErasureSetId) -> Result> { + self.merkle_root_meta_cf.get(erasure_set.store_key()) } /// Check whether the specified slot is an orphan slot which does not @@ -801,6 +806,9 @@ impl Blockstore { /// - [`cf::ErasureMeta`]: the associated ErasureMeta of the coding and data /// shreds inside `shreds` will be updated and committed to /// `cf::ErasureMeta`. + /// - [`cf::MerkleRootMeta`]: the associated MerkleRootMeta of the coding and data + /// shreds inside `shreds` will be updated and committed to + /// `cf::MerkleRootMeta`. /// - [`cf::Index`]: stores (slot id, index to the index_working_set_entry) /// pair to the `cf::Index` column family for each index_working_set_entry /// which insert did occur in this function call. @@ -843,6 +851,7 @@ impl Blockstore { let mut just_inserted_shreds = HashMap::with_capacity(shreds.len()); let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); let mut slot_meta_working_set = HashMap::new(); let mut index_working_set = HashMap::new(); let mut duplicate_shreds = vec![]; @@ -862,6 +871,7 @@ impl Blockstore { match self.check_insert_data_shred( shred, &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, @@ -899,6 +909,7 @@ impl Blockstore { self.check_insert_coding_shred( shred, &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_inserted_shreds, @@ -945,6 +956,7 @@ impl Blockstore { match self.check_insert_data_shred( shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, @@ -1007,7 +1019,12 @@ impl Blockstore { )?; for (erasure_set, erasure_meta) in erasure_metas { - write_batch.put::(erasure_set.store_key(), &erasure_meta)?; + let (slot, fec_set_index) = erasure_set.store_key(); + write_batch.put::((slot, u64::from(fec_set_index)), &erasure_meta)?; + } + + for (erasure_set, merkle_root_meta) in merkle_root_metas { + write_batch.put::(erasure_set.store_key(), &merkle_root_meta)?; } for (&slot, index_working_set_entry) in index_working_set.iter() { @@ -1167,6 +1184,7 @@ impl Blockstore { &self, shred: Shred, erasure_metas: &mut HashMap, + merkle_root_metas: &mut HashMap, index_working_set: &mut HashMap, write_batch: &mut WriteBatch, just_received_shreds: &mut HashMap, @@ -1183,10 +1201,16 @@ impl Blockstore { self.get_index_meta_entry(slot, index_working_set, index_meta_time_us); let index_meta = &mut index_meta_working_set_entry.index; + let erasure_set = shred.erasure_set(); + + if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) { + if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() { + entry.insert(meta); + } + } // This gives the index of first coding shred in this FEC block // So, all coding shreds in a given FEC block will have the same set index - if !is_trusted { if index_meta.coding().contains(shred_index) { metrics.num_coding_shreds_exists += 1; @@ -1200,7 +1224,6 @@ impl Blockstore { } } - let erasure_set = shred.erasure_set(); let erasure_meta = erasure_metas.entry(erasure_set).or_insert_with(|| { self.erasure_meta(erasure_set) .expect("Expect database get to succeed") @@ -1263,6 +1286,10 @@ impl Blockstore { if result { index_meta_working_set_entry.did_insert_occur = true; metrics.num_inserted += 1; + + merkle_root_metas + .entry(erasure_set) + .or_insert(MerkleRootMeta::from_shred(&shred)); } if let HashMapEntry::Vacant(entry) = just_received_shreds.entry(shred.id()) { @@ -1311,8 +1338,8 @@ impl Blockstore { /// /// The resulting `write_batch` may include updates to [`cf::DeadSlots`] /// and [`cf::ShredData`]. Note that it will also update the in-memory copy - /// of `erasure_metas` and `index_working_set`, which will later be - /// used to update other column families such as [`cf::ErasureMeta`] and + /// of `erasure_metas`, `merkle_root_metas`, and `index_working_set`, which will + /// later be used to update other column families such as [`cf::ErasureMeta`] and /// [`cf::Index`]. /// /// Arguments: @@ -1320,6 +1347,9 @@ impl Blockstore { /// - `erasure_metas`: the in-memory hash-map that maintains the dirty /// copy of the erasure meta. It will later be written to /// `cf::ErasureMeta` in insert_shreds_handle_duplicate(). + /// - `merkle_root_metas`: the in-memory hash-map that maintains the dirty + /// copy of the merkle root meta. It will later be written to + /// `cf::MerkleRootMeta` in `insert_shreds_handle_duplicate()`. /// - `index_working_set`: the in-memory hash-map that maintains the /// dirty copy of the index meta. It will later be written to /// `cf::Index` in insert_shreds_handle_duplicate(). @@ -1343,6 +1373,7 @@ impl Blockstore { &self, shred: Shred, erasure_metas: &mut HashMap, + merkle_root_metas: &mut HashMap, index_working_set: &mut HashMap, slot_meta_working_set: &mut HashMap, write_batch: &mut WriteBatch, @@ -1368,6 +1399,12 @@ impl Blockstore { ); let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); + let erasure_set = shred.erasure_set(); + if let HashMapEntry::Vacant(entry) = merkle_root_metas.entry(erasure_set) { + if let Some(meta) = self.merkle_root_meta(erasure_set).unwrap() { + entry.insert(meta); + } + } if !is_trusted { if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) { @@ -1402,7 +1439,6 @@ impl Blockstore { } } - let erasure_set = shred.erasure_set(); let newly_completed_data_sets = self.insert_data_shred( slot_meta, index_meta.data_mut(), @@ -1410,6 +1446,9 @@ impl Blockstore { write_batch, shred_source, )?; + merkle_root_metas + .entry(erasure_set) + .or_insert(MerkleRootMeta::from_shred(&shred)); just_inserted_shreds.insert(shred.id(), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; @@ -2962,18 +3001,18 @@ impl Blockstore { if let Ok(entries) = self.get_slot_entries(slot, 0) { entries.into_par_iter().for_each(|entry| { entry.transactions.into_iter().for_each(|tx| { + if let Some(lookups) = tx.message.address_table_lookups() { + add_to_set( + &lookup_tables, + lookups.iter().map(|lookup| &lookup.account_key), + ); + } // Attempt to verify transaction and load addresses from the current bank, // or manually scan the transaction for addresses if the transaction. if let Ok(tx) = bank.fully_verify_transaction(tx.clone()) { add_to_set(&result, tx.message().account_keys().iter()); } else { add_to_set(&result, tx.message.static_account_keys()); - if let Some(lookups) = tx.message.address_table_lookups() { - add_to_set( - &lookup_tables, - lookups.iter().map(|lookup| &lookup.account_key), - ); - } let tx = SanitizedVersionedTransaction::try_from(tx) .expect("transaction failed to sanitize"); @@ -2993,6 +3032,7 @@ impl Blockstore { lookup_tables.into_par_iter().for_each(|lookup_table_key| { bank.get_account(&lookup_table_key) .map(|lookup_table_account| { + add_to_set(&result, &[lookup_table_key]); AddressLookupTable::deserialize(lookup_table_account.data()).map(|t| { add_to_set(&result, &t.addresses[..]); }) @@ -6734,6 +6774,374 @@ pub mod tests { ),); } + #[test] + fn test_merkle_root_metas_coding() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let index = 0; + let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10); + let coding_shred = coding_shreds[index as usize].clone(); + + let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); + let mut index_working_set = HashMap::new(); + let mut just_received_shreds = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut index_meta_time_us = 0; + assert!(blockstore.check_insert_coding_shred( + coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut vec![], + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .merkle_root(), + coding_shred.merkle_root().ok(), + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .first_received_shred_type(), + ShredType::Code, + ); + + for (erasure_set, merkle_root_meta) in merkle_root_metas { + write_batch + .put::(erasure_set.store_key(), &merkle_root_meta) + .unwrap(); + } + blockstore.db.write(write_batch).unwrap(); + + // Add a shred with different merkle root and index + let (_, coding_shreds, _) = setup_erasure_shreds(slot, parent_slot, 10); + let new_coding_shred = coding_shreds[(index + 1) as usize].clone(); + + erasure_metas.clear(); + index_working_set.clear(); + just_received_shreds.clear(); + let mut merkle_root_metas = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut duplicates = vec![]; + + assert!(blockstore.check_insert_coding_shred( + new_coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut duplicates, + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + // Verify that we still have the merkle root meta from the original shred + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + + // Blockstore should also have the merkle root meta of the original shred + assert_eq!( + blockstore + .merkle_root_meta(coding_shred.erasure_set()) + .unwrap() + .unwrap() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + blockstore + .merkle_root_meta(coding_shred.erasure_set()) + .unwrap() + .unwrap() + .first_received_shred_index(), + index + ); + + // Add a shred from different fec set + let new_index = index + 31; + let (_, coding_shreds, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, new_index); + let new_coding_shred = coding_shreds[0].clone(); + + assert!(blockstore.check_insert_coding_shred( + new_coding_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + &mut vec![], + false, + ShredSource::Turbine, + &mut BlockstoreInsertionMetrics::default(), + )); + + // Verify that we still have the merkle root meta for the original shred + // and the new shred + assert_eq!(merkle_root_metas.len(), 2); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .merkle_root(), + coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&coding_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&new_coding_shred.erasure_set()) + .unwrap() + .merkle_root(), + new_coding_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&new_coding_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + new_index + ); + } + + #[test] + fn test_merkle_root_metas_data() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let parent_slot = 0; + let slot = 1; + let index = 11; + let fec_set_index = 11; + let (data_shreds, _, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let data_shred = data_shreds[0].clone(); + + let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); + let mut index_working_set = HashMap::new(); + let mut just_received_shreds = HashMap::new(); + let mut slot_meta_working_set = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut index_meta_time_us = 0; + blockstore + .check_insert_data_shred( + data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut vec![], + None, + ShredSource::Turbine, + ) + .unwrap(); + + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .first_received_shred_type(), + ShredType::Data, + ); + + for (erasure_set, merkle_root_meta) in merkle_root_metas { + write_batch + .put::(erasure_set.store_key(), &merkle_root_meta) + .unwrap(); + } + blockstore.db.write(write_batch).unwrap(); + + // Add a shred with different merkle root and index + let (data_shreds, _, _) = + setup_erasure_shreds_with_index(slot, parent_slot, 10, fec_set_index); + let new_data_shred = data_shreds[1].clone(); + + erasure_metas.clear(); + index_working_set.clear(); + just_received_shreds.clear(); + let mut merkle_root_metas = HashMap::new(); + let mut write_batch = blockstore.db.batch().unwrap(); + let mut duplicates = vec![]; + + assert!(blockstore + .check_insert_data_shred( + new_data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut duplicates, + None, + ShredSource::Turbine, + ) + .is_ok()); + + // Verify that we still have the merkle root meta from the original shred + assert_eq!(merkle_root_metas.len(), 1); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + + // Blockstore should also have the merkle root meta of the original shred + assert_eq!( + blockstore + .merkle_root_meta(data_shred.erasure_set()) + .unwrap() + .unwrap() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + blockstore + .merkle_root_meta(data_shred.erasure_set()) + .unwrap() + .unwrap() + .first_received_shred_index(), + index + ); + + // Add a shred from different fec set + let new_index = fec_set_index + 31; + let new_data_shred = Shred::new_from_data( + slot, + new_index, + 1, // parent_offset + &[3, 3, 3], // data + ShredFlags::empty(), + 0, // reference_tick, + 0, // version + fec_set_index + 30, + ); + + blockstore + .check_insert_data_shred( + new_data_shred.clone(), + &mut erasure_metas, + &mut merkle_root_metas, + &mut index_working_set, + &mut slot_meta_working_set, + &mut write_batch, + &mut just_received_shreds, + &mut index_meta_time_us, + false, + &mut vec![], + None, + ShredSource::Turbine, + ) + .unwrap(); + + // Verify that we still have the merkle root meta for the original shred + // and the new shred + assert_eq!(merkle_root_metas.len(), 2); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .merkle_root(), + data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&data_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + index + ); + assert_eq!( + merkle_root_metas + .get(&new_data_shred.erasure_set()) + .unwrap() + .merkle_root(), + new_data_shred.merkle_root().ok() + ); + assert_eq!( + merkle_root_metas + .get(&new_data_shred.erasure_set()) + .unwrap() + .first_received_shred_index(), + new_index + ); + } + #[test] fn test_check_insert_coding_shred() { let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -6752,6 +7160,7 @@ pub mod tests { ); let mut erasure_metas = HashMap::new(); + let mut merkle_root_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_shreds = HashMap::new(); let mut write_batch = blockstore.db.batch().unwrap(); @@ -6759,6 +7168,7 @@ pub mod tests { assert!(blockstore.check_insert_coding_shred( coding_shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_received_shreds, @@ -6774,6 +7184,7 @@ pub mod tests { assert!(!blockstore.check_insert_coding_shred( coding_shred.clone(), &mut erasure_metas, + &mut merkle_root_metas, &mut index_working_set, &mut write_batch, &mut just_received_shreds, @@ -9261,6 +9672,15 @@ pub mod tests { slot: u64, parent_slot: u64, num_entries: u64, + ) -> (Vec, Vec, Arc) { + setup_erasure_shreds_with_index(slot, parent_slot, num_entries, 0) + } + + fn setup_erasure_shreds_with_index( + slot: u64, + parent_slot: u64, + num_entries: u64, + fec_set_index: u32, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); @@ -9268,10 +9688,10 @@ pub mod tests { let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &leader_keypair, &entries, - true, // is_last_in_slot - 0, // next_shred_index - 0, // next_code_index - true, // merkle_variant + true, // is_last_in_slot + fec_set_index, // next_shred_index + fec_set_index, // next_code_index + true, // merkle_variant &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 0b2b14445539d6..18ba491ea34bd1 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -35,7 +35,7 @@ use { }, solana_storage_proto::convert::generated, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, ffi::{CStr, CString}, fs, marker::PhantomData, @@ -419,49 +419,52 @@ impl Rocks { } let oldest_slot = OldestSlot::default(); let column_options = options.column_options.clone(); + let cf_descriptors = Self::cf_descriptors(path, &options, &oldest_slot); // Open the database let db = match access_type { - AccessType::Primary | AccessType::PrimaryForMaintenance => Rocks { - db: DB::open_cf_descriptors( - &db_options, - path, - Self::cf_descriptors(&options, &oldest_slot), - )?, - access_type, - oldest_slot, - column_options, - write_batch_perf_status: PerfSamplingStatus::default(), - }, + AccessType::Primary | AccessType::PrimaryForMaintenance => { + DB::open_cf_descriptors(&db_options, path, cf_descriptors)? + } AccessType::Secondary => { let secondary_path = path.join("solana-secondary"); - info!( - "Opening Rocks with secondary (read only) access at: {:?}", - secondary_path + "Opening Rocks with secondary (read only) access at: {secondary_path:?}. \ + This secondary access could temporarily degrade other accesses, such as \ + by solana-validator" ); - info!("This secondary access could temporarily degrade other accesses, such as by solana-validator"); - - Rocks { - db: DB::open_cf_descriptors_as_secondary( - &db_options, - path, - &secondary_path, - Self::cf_descriptors(&options, &oldest_slot), - )?, - access_type, - oldest_slot, - column_options, - write_batch_perf_status: PerfSamplingStatus::default(), - } + DB::open_cf_descriptors_as_secondary( + &db_options, + path, + &secondary_path, + cf_descriptors, + )? } }; - db.configure_compaction(); + let rocks = Rocks { + db, + access_type, + oldest_slot, + column_options, + write_batch_perf_status: PerfSamplingStatus::default(), + }; + + rocks.configure_compaction(); - Ok(db) + Ok(rocks) } + /// Create the column family (CF) descriptors necessary to open the database. + /// + /// In order to open a RocksDB database with Primary access, all columns must be opened. So, + /// in addition to creating descriptors for all of the expected columns, also create + /// descriptors for columns that were discovered but are otherwise unknown to the software. + /// + /// One case where columns could be unknown is if a RocksDB database is modified with a newer + /// software version that adds a new column, and then also opened with an older version that + /// did not have knowledge of that new column. fn cf_descriptors( + path: &Path, options: &BlockstoreOptions, oldest_slot: &OldestSlot, ) -> Vec { @@ -469,7 +472,7 @@ impl Rocks { let (cf_descriptor_shred_data, cf_descriptor_shred_code) = new_cf_descriptor_pair_shreds::(options, oldest_slot); - vec![ + let mut cf_descriptors = vec![ new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), @@ -491,7 +494,52 @@ impl Rocks { new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), - ] + ]; + + // If the access type is Secondary, we don't need to open all of the + // columns so we can just return immediately. + match options.access_type { + AccessType::Secondary => { + return cf_descriptors; + } + AccessType::Primary | AccessType::PrimaryForMaintenance => {} + } + + // Attempt to detect the column families that are present. It is not a + // fatal error if we cannot, for example, if the Blockstore is brand + // new and will be created by the call to Rocks::open(). + let detected_cfs = match DB::list_cf(&Options::default(), path) { + Ok(detected_cfs) => detected_cfs, + Err(err) => { + warn!("Unable to detect Rocks columns: {err:?}"); + vec![] + } + }; + // The default column is handled automatically, we don't need to create + // a descriptor for it + const DEFAULT_COLUMN_NAME: &str = "default"; + let known_cfs: HashSet<_> = cf_descriptors + .iter() + .map(|cf_descriptor| cf_descriptor.name().to_string()) + .chain(std::iter::once(DEFAULT_COLUMN_NAME.to_string())) + .collect(); + detected_cfs.iter().for_each(|cf_name| { + if known_cfs.get(cf_name.as_str()).is_none() { + info!("Detected unknown column {cf_name}, opening column with basic options"); + // This version of the software was unaware of the column, so + // it is fair to assume that we will not attempt to read or + // write the column. So, set some bare bones settings to avoid + // using extra resources on this unknown column. + let mut options = Options::default(); + // Lower the default to avoid unnecessary allocations + options.set_write_buffer_size(1024 * 1024); + // Disable compactions to avoid any modifications to the column + options.set_disable_auto_compactions(true); + cf_descriptors.push(ColumnFamilyDescriptor::new(cf_name, options)); + } + }); + + cf_descriptors } fn columns() -> Vec<&'static str> { @@ -719,10 +767,6 @@ impl Rocks { pub trait Column { type Index; - fn key_size() -> usize { - std::mem::size_of::() - } - fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; // This trait method is primarily used by `Database::delete_range_cf()`, and is therefore only @@ -2175,7 +2219,9 @@ fn should_enable_compression() -> bool { #[cfg(test)] pub mod tests { - use {super::*, crate::blockstore_db::columns::ShredData}; + use { + super::*, crate::blockstore_db::columns::ShredData, std::path::PathBuf, tempfile::tempdir, + }; #[test] fn test_compaction_filter() { @@ -2228,6 +2274,7 @@ pub mod tests { #[test] fn test_cf_names_and_descriptors_equal_length() { + let path = PathBuf::default(); let options = BlockstoreOptions::default(); let oldest_slot = OldestSlot::default(); // The names and descriptors don't need to be in the same order for our use cases; @@ -2235,7 +2282,7 @@ pub mod tests { // should update both lists. assert_eq!( Rocks::columns().len(), - Rocks::cf_descriptors(&options, &oldest_slot).len() + Rocks::cf_descriptors(&path, &options, &oldest_slot).len() ); } @@ -2260,6 +2307,49 @@ pub mod tests { assert!(!should_enable_cf_compaction("something else")); } + #[test] + fn test_open_unknown_columns() { + solana_logger::setup(); + + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path(); + + // Open with Primary to create the new database + { + let options = BlockstoreOptions { + access_type: AccessType::Primary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let mut rocks = Rocks::open(db_path, options).unwrap(); + + // Introduce a new column that will not be known + rocks + .db + .create_cf("new_column", &Options::default()) + .unwrap(); + } + + // Opening with either Secondary or Primary access should succeed, + // even though the Rocks code is unaware of "new_column" + { + let options = BlockstoreOptions { + access_type: AccessType::Secondary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let _ = Rocks::open(db_path, options).unwrap(); + } + { + let options = BlockstoreOptions { + access_type: AccessType::Primary, + enforce_ulimit_nofile: false, + ..BlockstoreOptions::default() + }; + let _ = Rocks::open(db_path, options).unwrap(); + } + } + impl LedgerColumn where C: ColumnIndexDeprecation + ProtobufColumn + ColumnName, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 77155adb008a87..e2208ce557e12d 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -751,7 +751,8 @@ pub fn test_process_blockstore( None, None, exit, - ); + ) + .unwrap(); process_blockstore_from_root( blockstore, @@ -2692,7 +2693,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash()); let tx = system_transaction::transfer( @@ -2857,7 +2858,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); @@ -2894,7 +2895,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -2954,7 +2955,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3104,8 +3105,12 @@ pub mod tests { let mock_program_id = solana_sdk::pubkey::new_rand(); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltinOk::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockBuiltinOk::vm, + ) + .0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3119,7 +3124,6 @@ pub mod tests { ); let entry = next_entry(&bank.last_blockhash(), 1, vec![tx]); - let bank = Arc::new(bank); let result = process_entries_for_tests_without_scheduler(&bank, vec![entry]); bank.freeze(); let blockhash_ok = bank.last_blockhash(); @@ -3145,8 +3149,12 @@ pub mod tests { let mut bankhash_err = None; (0..get_instruction_errors().len()).for_each(|err| { - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltinErr::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockBuiltinErr::vm, + ) + .0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3182,7 +3190,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3276,7 +3284,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3322,7 +3330,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; @@ -3389,7 +3397,7 @@ pub mod tests { .. } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mut keypairs: Vec = vec![]; @@ -3456,7 +3464,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3518,7 +3526,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); @@ -3559,7 +3567,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::transfer( @@ -3845,7 +3853,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(100); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let genesis_hash = genesis_config.hash(); let keypair = Keypair::new(); @@ -3876,7 +3884,7 @@ pub mod tests { AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), ); - *bank.epoch_schedule() + bank.epoch_schedule().clone() } fn frozen_bank_slots(bank_forks: &BankForks) -> Vec { @@ -3909,7 +3917,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); @@ -3967,14 +3975,18 @@ pub mod tests { &validator_keypairs, vec![100; validator_keypairs.len()], ); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0.freeze(); - let bank1 = Arc::new(Bank::new_from_parent( - bank0.clone(), - &solana_sdk::pubkey::new_rand(), - 1, - )); + let bank1 = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 1, + )) + .clone_without_scheduler(); // The new blockhash is going to be the hash of the last tick in the block let bank_1_blockhash = bank1.last_blockhash(); @@ -4374,9 +4386,9 @@ pub mod tests { .. } = create_genesis_config(100 * LAMPORTS_PER_SOL); let genesis_hash = genesis_config.hash(); - let bank = BankWithScheduler::new_without_scheduler(Arc::new(Bank::new_for_tests( - &genesis_config, - ))); + let bank = BankWithScheduler::new_without_scheduler( + Bank::new_with_bank_forks_for_tests(&genesis_config).0, + ); let mut timing = ConfirmationTiming::default(); let mut progress = ConfirmationProgress::new(genesis_hash); let amount = genesis_config.rent.minimum_balance(0); @@ -4592,7 +4604,7 @@ pub mod tests { genesis_config.ticks_per_slot = TICKS_PER_SLOT; let genesis_hash = genesis_config.hash(); - let slot_0_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (slot_0_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); assert_eq!(slot_0_bank.slot(), 0); assert_eq!(slot_0_bank.tick_height(), 0); assert_eq!(slot_0_bank.max_tick_height(), 2); @@ -4607,7 +4619,12 @@ pub mod tests { assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1)); assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0)); - let slot_2_bank = Arc::new(Bank::new_from_parent(slot_0_bank, &collector_id, 2)); + let new_bank = Bank::new_from_parent(slot_0_bank, &collector_id, 2); + let slot_2_bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); assert_eq!(slot_2_bank.slot(), 2); assert_eq!(slot_2_bank.tick_height(), 2); assert_eq!(slot_2_bank.max_tick_height(), 6); diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index 733ea3c359befd..f847f6ce2871fe 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -40,7 +40,7 @@ pub struct LeaderScheduleCache { impl LeaderScheduleCache { pub fn new_from_bank(bank: &Bank) -> Self { - Self::new(*bank.epoch_schedule(), bank) + Self::new(bank.epoch_schedule().clone(), bank) } pub fn new(epoch_schedule: EpochSchedule, root_bank: &Bank) -> Self { @@ -56,9 +56,11 @@ impl LeaderScheduleCache { cache.set_root(root_bank); // Calculate the schedule for all epochs between 0 and leader_schedule_epoch(root) - let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(root_bank.slot()); + let leader_schedule_epoch = cache + .epoch_schedule + .get_leader_schedule_epoch(root_bank.slot()); for epoch in 0..leader_schedule_epoch { - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); + let first_slot_in_epoch = cache.epoch_schedule.get_first_slot_in_epoch(epoch); cache.slot_leader_at(first_slot_in_epoch, Some(root_bank)); } cache @@ -507,7 +509,7 @@ mod tests { } = create_genesis_config(10_000 * bootstrap_validator_stake_lamports()); genesis_config.epoch_schedule.warmup = false; - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); // Create new vote account @@ -531,7 +533,11 @@ mod tests { target_slot += 1; } - let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), target_slot); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), target_slot)) + .clone_without_scheduler(); let mut expected_slot = 0; let epoch = bank.get_leader_schedule_epoch(target_slot); for i in 0..epoch { diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 4cd4f8b85b918a..1ce6c7ccc164cb 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -283,9 +283,10 @@ impl ErasureSetId { self.0 } - // Storage key for ErasureMeta in blockstore db. - pub(crate) fn store_key(&self) -> (Slot, /*fec_set_index:*/ u64) { - (self.0, u64::from(self.1)) + // Storage key for ErasureMeta and MerkleRootMeta in blockstore db. + // Note: ErasureMeta column uses u64 so this will need to be typecast + pub(crate) fn store_key(&self) -> (Slot, /*fec_set_index:*/ u32) { + (self.0, self.1) } } @@ -894,6 +895,7 @@ pub fn should_discard_shred( root: Slot, max_slot: Slot, shred_version: u16, + should_drop_legacy_shreds: impl Fn(Slot) -> bool, stats: &mut ShredFetchStats, ) -> bool { debug_assert!(root < max_slot); @@ -968,7 +970,11 @@ pub fn should_discard_shred( } } match shred_variant { - ShredVariant::LegacyCode | ShredVariant::LegacyData => (), + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + if should_drop_legacy_shreds(slot) { + return true; + } + } ShredVariant::MerkleCode(_) => { stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); } @@ -1172,6 +1178,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats, ShredFetchStats::default()); @@ -1182,6 +1189,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 1); @@ -1192,6 +1200,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 2); @@ -1202,6 +1211,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 3); @@ -1212,6 +1222,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.index_overrun, 4); @@ -1222,6 +1233,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(stats.bad_parent_offset, 1); @@ -1242,6 +1254,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); @@ -1261,6 +1274,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.index_out_of_bounds); @@ -1281,6 +1295,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); packet.buffer_mut()[OFFSET_OF_SHRED_VARIANT] = u8::MAX; @@ -1290,6 +1305,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); @@ -1301,6 +1317,7 @@ mod tests { root, max_slot, shred_version, + |_| false, // should_drop_legacy_shreds &mut stats )); assert_eq!(1, stats.bad_shred_type); diff --git a/ledger/src/use_snapshot_archives_at_startup.rs b/ledger/src/use_snapshot_archives_at_startup.rs index b173ed1564e5fa..6e19d0c424dcf8 100644 --- a/ledger/src/use_snapshot_archives_at_startup.rs +++ b/ledger/src/use_snapshot_archives_at_startup.rs @@ -48,4 +48,8 @@ pub mod cli { pub fn default_value() -> &'static str { UseSnapshotArchivesAtStartup::default().into() } + + pub fn default_value_for_ledger_tool() -> &'static str { + UseSnapshotArchivesAtStartup::Always.into() + } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index bd6a15eee1881e..b9ef4646a12740 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -5027,6 +5027,7 @@ fn test_boot_from_local_state() { #[test] #[serial] #[allow(unused_attributes)] +#[ignore] fn test_duplicate_shreds_switch_failure() { fn wait_for_duplicate_fork_frozen(ledger_path: &Path, dup_slot: Slot) -> Hash { // Ensure all the slots <= dup_slot are also full so we know we can replay up to dup_slot diff --git a/poh/Cargo.toml b/poh/Cargo.toml index 683d668ddfbd7a..0b93acffe5a2c4 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -28,6 +28,7 @@ rand = { workspace = true } solana-logger = { workspace = true } solana-perf = { workspace = true } solana-poh = { path = ".", features = ["dev-context-only-utils"] } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [features] dev-context-only-utils = [] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index a598e001fc8684..f0d37e24c51f55 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -10,6 +10,8 @@ //! For Entries: //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! +#[cfg(feature = "dev-context-only-utils")] +use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use { crate::{leader_bank_notifier::LeaderBankNotifier, poh_service::PohService}, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, SendError, Sender, TrySendError}, @@ -18,11 +20,7 @@ use { entry::{hash_transactions, Entry}, poh::Poh, }, - solana_ledger::{ - blockstore::Blockstore, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - leader_schedule_cache::LeaderScheduleCache, - }, + solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, solana_measure::{measure, measure_us}, solana_metrics::poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, @@ -1053,6 +1051,7 @@ impl PohRecorder { } // Used in tests + #[cfg(feature = "dev-context-only-utils")] pub fn schedule_dummy_max_height_reached_failure(&mut self) { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index a01c688a527aec..e69db7f119862b 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -402,7 +402,7 @@ mod tests { fn test_poh_service() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let prev_hash = bank.last_blockhash(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) diff --git a/program-runtime/src/compute_budget_processor.rs b/program-runtime/src/compute_budget_processor.rs index b2c3a892493d41..3b705d334d5bc9 100644 --- a/program-runtime/src/compute_budget_processor.rs +++ b/program-runtime/src/compute_budget_processor.rs @@ -5,7 +5,7 @@ use { prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_sdk::{ - borsh0_10::try_from_slice_unchecked, + borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, entrypoint::HEAP_LENGTH as MIN_HEAP_FRAME_BYTES, feature_set::{ diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index bdb870a02c1dda..9786d5070a0ae5 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -615,7 +615,7 @@ impl<'a> InvokeContext<'a> { pub fn get_syscall_context(&self) -> Result<&SyscallContext, InstructionError> { self.syscall_context .last() - .and_then(|syscall_context| syscall_context.as_ref()) + .and_then(std::option::Option::as_ref) .ok_or(InstructionError::CallDepth) } diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index c4e5d20bb6b472..b88a79a61d8982 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -25,7 +25,7 @@ use { fmt::{Debug, Formatter}, sync::{ atomic::{AtomicU64, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, }, }; @@ -115,6 +115,9 @@ impl LoadedProgramType { LoadedProgramType::LegacyV0(program) | LoadedProgramType::LegacyV1(program) | LoadedProgramType::Typed(program) => Some(program.get_loader()), + LoadedProgramType::FailedVerification(env) | LoadedProgramType::Unloaded(env) => { + Some(env) + } #[cfg(test)] LoadedProgramType::TestLoaded(environment) => Some(environment), _ => None, @@ -136,7 +139,7 @@ pub struct LoadedProgram { pub maybe_expiration_slot: Option, /// How often this entry was used by a transaction pub tx_usage_counter: AtomicU64, - /// How often this entry was used by a transaction + /// How often this entry was used by an instruction pub ix_usage_counter: AtomicU64, } @@ -368,7 +371,7 @@ impl LoadedProgram { effective_slot: self.effective_slot, maybe_expiration_slot: self.maybe_expiration_slot, tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), - ix_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), + ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), }) } @@ -508,8 +511,7 @@ pub struct LoadedProgramsForTxBatch { pub struct ExtractedPrograms { pub loaded: LoadedProgramsForTxBatch, - pub missing: Vec<(Pubkey, u64)>, - pub unloaded: Vec<(Pubkey, u64)>, + pub missing: HashMap, } impl LoadedProgramsForTxBatch { @@ -607,13 +609,11 @@ impl LoadedPrograms { if matches!(existing.program, LoadedProgramType::Unloaded(_)) { // The unloaded program is getting reloaded // Copy over the usage counter to the new entry - let mut usage_count = existing.tx_usage_counter.load(Ordering::Relaxed); - saturating_add_assign!( - usage_count, - entry.tx_usage_counter.load(Ordering::Relaxed) + entry.tx_usage_counter.fetch_add( + existing.tx_usage_counter.load(Ordering::Relaxed), + Ordering::Relaxed, ); - entry.tx_usage_counter.store(usage_count, Ordering::Relaxed); - entry.ix_usage_counter.store( + entry.ix_usage_counter.fetch_add( existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); @@ -646,14 +646,9 @@ impl LoadedPrograms { } pub fn prune_by_deployment_slot(&mut self, slot: Slot) { - self.entries.retain(|_key, second_level| { - *second_level = second_level - .iter() - .filter(|entry| entry.deployment_slot != slot) - .cloned() - .collect(); - !second_level.is_empty() - }); + for second_level in self.entries.values_mut() { + second_level.retain(|entry| entry.deployment_slot != slot); + } self.remove_programs_with_no_entries(); } @@ -679,6 +674,7 @@ impl LoadedPrograms { for second_level in self.entries.values_mut() { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; + let mut first_ancestor_env = None; *second_level = second_level .iter() .rev() @@ -686,12 +682,29 @@ impl LoadedPrograms { let relation = fork_graph.relationship(entry.deployment_slot, new_root_slot); if entry.deployment_slot >= new_root_slot { matches!(relation, BlockRelation::Equal | BlockRelation::Descendant) - } else if !first_ancestor_found - && (matches!(relation, BlockRelation::Ancestor) - || entry.deployment_slot <= self.latest_root_slot) + } else if matches!(relation, BlockRelation::Ancestor) + || entry.deployment_slot <= self.latest_root_slot { - first_ancestor_found = true; - first_ancestor_found + if !first_ancestor_found { + first_ancestor_found = true; + first_ancestor_env = entry.program.get_environment(); + return true; + } + // Do not prune the entry if the runtime environment of the entry is different + // than the entry that was previously found (stored in first_ancestor_env). + // Different environment indicates that this entry might belong to an older + // epoch that had a different environment (e.g. different feature set). + // Once the root moves to the new/current epoch, the entry will get pruned. + // But, until then the entry might still be getting used by an older slot. + if let Some(entry_env) = entry.program.get_environment() { + if let Some(env) = first_ancestor_env { + if !Arc::ptr_eq(entry_env, env) { + return true; + } + } + } + self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed); + false } else { self.stats.prunes_orphan.fetch_add(1, Ordering::Relaxed); false @@ -774,13 +787,22 @@ impl LoadedPrograms { &self, working_slot: &S, keys: impl Iterator, - ) -> ExtractedPrograms { + ) -> Arc> { + debug_assert!(self.fork_graph.is_some()); let environments = self.get_environments_for_epoch(working_slot.current_epoch()); - let mut missing = Vec::new(); - let mut unloaded = Vec::new(); let current_slot = working_slot.current_slot(); - let found = keys - .filter_map(|(key, (match_criteria, count))| { + let extracted = Arc::new(Mutex::new(ExtractedPrograms { + loaded: LoadedProgramsForTxBatch { + entries: HashMap::new(), + slot: current_slot, + environments: environments.clone(), + }, + missing: HashMap::new(), + })); + let mut extracting = extracted.lock().unwrap(); + extracting.loaded.entries = keys + .filter_map(|(key, (match_criteria, usage_count))| { + let mut reloading = false; if let Some(second_level) = self.entries.get(&key) { for entry in second_level.iter().rev() { let is_ancestor = if let Some(fork_graph) = &self.fork_graph { @@ -802,62 +824,49 @@ impl LoadedPrograms { || entry.deployment_slot == current_slot || is_ancestor { - if current_slot >= entry.effective_slot { - if !Self::is_entry_usable(entry, current_slot, &match_criteria) { - missing.push((key, count)); - return None; - } - - if !Self::matches_environment(entry, environments) { - missing.push((key, count)); - return None; + let entry_to_return = if current_slot >= entry.effective_slot { + if !Self::is_entry_usable(entry, current_slot, &match_criteria) + || !Self::matches_environment(entry, environments) + { + break; } if let LoadedProgramType::Unloaded(_environment) = &entry.program { - unloaded.push((key, count)); - return None; + reloading = true; + break; } - let mut usage_count = - entry.tx_usage_counter.load(Ordering::Relaxed); - saturating_add_assign!(usage_count, count); - entry.tx_usage_counter.store(usage_count, Ordering::Relaxed); - return Some((key, entry.clone())); + entry.clone() } else if entry.is_implicit_delay_visibility_tombstone(current_slot) { // Found a program entry on the current fork, but it's not effective // yet. It indicates that the program has delayed visibility. Return // the tombstone to reflect that. - return Some(( - key, - Arc::new(LoadedProgram::new_tombstone( - entry.deployment_slot, - LoadedProgramType::DelayVisibility, - )), - )); - } + Arc::new(LoadedProgram::new_tombstone( + entry.deployment_slot, + LoadedProgramType::DelayVisibility, + )) + } else { + continue; + }; + entry_to_return + .tx_usage_counter + .fetch_add(usage_count, Ordering::Relaxed); + return Some((key, entry_to_return)); } } } - missing.push((key, count)); + extracting.missing.insert(key, (usage_count, reloading)); None }) .collect::>>(); - self.stats .misses - .fetch_add(missing.len() as u64, Ordering::Relaxed); + .fetch_add(extracting.missing.len() as u64, Ordering::Relaxed); self.stats .hits - .fetch_add(found.len() as u64, Ordering::Relaxed); - ExtractedPrograms { - loaded: LoadedProgramsForTxBatch { - entries: found, - slot: current_slot, - environments: environments.clone(), - }, - missing, - unloaded, - } + .fetch_add(extracting.loaded.entries.len() as u64, Ordering::Relaxed); + drop(extracting); + extracted } pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { @@ -903,7 +912,6 @@ impl LoadedPrograms { .len() .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT)); self.unload_program_entries(sorted_candidates.iter().take(num_to_unload)); - self.remove_programs_with_no_entries(); } /// Removes all the entries at the given keys, if they exist @@ -915,7 +923,7 @@ impl LoadedPrograms { fn unload_program(&mut self, id: &Pubkey) { if let Some(entries) = self.entries.get_mut(id) { - entries.iter_mut().for_each(|entry| { + for entry in entries.iter_mut() { if let Some(unloaded) = entry.to_unloaded() { *entry = Arc::new(unloaded); self.stats @@ -924,7 +932,7 @@ impl LoadedPrograms { .and_modify(|c| saturating_add_assign!(*c, 1)) .or_insert(1); } - }); + } } } @@ -989,8 +997,8 @@ mod tests { use { crate::loaded_programs::{ BlockRelation, ExtractedPrograms, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, - WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, + LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironment, + ProgramRuntimeEnvironments, WorkingSlot, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, percentage::Percentage, @@ -1003,7 +1011,7 @@ mod tests { ops::ControlFlow, sync::{ atomic::{AtomicU64, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, }, }; @@ -1117,7 +1125,6 @@ mod tests { #[test] fn test_eviction() { let mut programs = vec![]; - let mut num_total_programs: usize = 0; let mut cache = new_mock_cache::(); @@ -1137,7 +1144,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program1, *deployment_slot, usage_counter)); }); @@ -1171,7 +1177,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program2, *deployment_slot, usage_counter)); }); @@ -1204,7 +1209,6 @@ mod tests { AtomicU64::new(usage_counter), ), ); - num_total_programs += 1; programs.push((program3, *deployment_slot, usage_counter)); }); @@ -1481,6 +1485,76 @@ mod tests { assert!(cache.entries.is_empty()); } + #[test] + fn test_prune_different_env() { + let mut cache = new_mock_cache::(); + + let fork_graph = Arc::new(RwLock::new(TestForkGraph { + relation: BlockRelation::Ancestor, + })); + + cache.set_fork_graph(fork_graph); + + let program1 = Pubkey::new_unique(); + let loaded_program = new_test_loaded_program(10, 10); + let (existing, program) = cache.replenish(program1, loaded_program.clone()); + assert!(!existing); + assert_eq!(program, loaded_program); + + let new_env = Arc::new(BuiltinProgram::new_mock()); + cache.upcoming_environments = Some(ProgramRuntimeEnvironments { + program_runtime_v1: new_env.clone(), + program_runtime_v2: new_env.clone(), + }); + let updated_program = Arc::new(LoadedProgram { + program: LoadedProgramType::TestLoaded(new_env.clone()), + account_size: 0, + deployment_slot: 20, + effective_slot: 20, + maybe_expiration_slot: None, + tx_usage_counter: AtomicU64::default(), + ix_usage_counter: AtomicU64::default(), + }); + let (existing, program) = cache.replenish(program1, updated_program.clone()); + assert!(!existing); + assert_eq!(program, updated_program); + + // Test that there are 2 entries for the program + assert_eq!( + cache + .entries + .get(&program1) + .expect("failed to find the program") + .len(), + 2 + ); + + cache.prune(21, cache.latest_root_epoch); + + // Test that prune didn't remove the entry, since environments are different. + assert_eq!( + cache + .entries + .get(&program1) + .expect("failed to find the program") + .len(), + 2 + ); + + cache.prune(22, cache.latest_root_epoch.saturating_add(1)); + + let entries = cache + .entries + .get(&program1) + .expect("failed to find the program"); + // Test that prune removed 1 entry, since epoch changed + assert_eq!(entries.len(), 1); + + let entry = entries.first().expect("Failed to get the program").clone(); + // Test that the correct entry remains in the cache + assert_eq!(entry, updated_program); + } + #[derive(Default)] struct TestForkGraphSpecific { forks: Vec>, @@ -1539,18 +1613,33 @@ mod tests { } fn match_slot( - table: &LoadedProgramsForTxBatch, + extracted: &Arc>, program: &Pubkey, deployment_slot: Slot, working_slot: Slot, ) -> bool { - assert_eq!(table.slot, working_slot); - table + let extracted = extracted.lock().unwrap(); + assert_eq!(extracted.loaded.slot, working_slot); + extracted + .loaded .find(program) .map(|entry| entry.deployment_slot == deployment_slot) .unwrap_or(false) } + fn match_missing( + extracted: &Arc>, + key: &Pubkey, + reload: bool, + ) -> bool { + let extracted = extracted.lock().unwrap(); + extracted + .missing + .get(key) + .filter(|(_count, reloading)| *reloading == reload) + .is_some() + } + #[test] fn test_fork_extract_and_prune() { let mut cache = new_mock_cache::(); @@ -1629,11 +1718,7 @@ mod tests { // 23 // Testing fork 0 - 10 - 12 - 22 with current slot at 22 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(22), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1644,19 +1729,14 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 20, 22)); - assert!(match_slot(&found, &program4, 0, 22)); + assert!(match_slot(&extracted, &program1, 20, 22)); + assert!(match_slot(&extracted, &program4, 0, 22)); - assert!(missing.contains(&(program2, 2))); - assert!(missing.contains(&(program3, 3))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program2, false)); + assert!(match_missing(&extracted, &program3, false)); - // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 15 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 16 + let extracted = cache.extract( &TestWorkingSlot(15), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1667,24 +1747,24 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 15)); - assert!(match_slot(&found, &program2, 11, 15)); + assert!(match_slot(&extracted, &program1, 0, 15)); + assert!(match_slot(&extracted, &program2, 11, 15)); // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16. // A delay visibility tombstone should be returned here. - let tombstone = found.find(&program4).expect("Failed to find the tombstone"); + let tombstone = extracted + .lock() + .unwrap() + .loaded + .find(&program4) + .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 15); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4). - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(18), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1695,21 +1775,16 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 18)); - assert!(match_slot(&found, &program2, 11, 18)); + assert!(match_slot(&extracted, &program1, 0, 18)); + assert!(match_slot(&extracted, &program2, 11, 18)); // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18. - assert!(match_slot(&found, &program4, 15, 18)); + assert!(match_slot(&extracted, &program4, 15, 18)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4). - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(23), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1720,21 +1795,16 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 23)); - assert!(match_slot(&found, &program2, 11, 23)); + assert!(match_slot(&extracted, &program1, 0, 23)); + assert!(match_slot(&extracted, &program2, 11, 23)); // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23. - assert!(match_slot(&found, &program4, 15, 23)); + assert!(match_slot(&extracted, &program4, 15, 23)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(11), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1745,15 +1815,19 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 11)); + assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. - let tombstone = found.find(&program2).expect("Failed to find the tombstone"); + let tombstone = extracted + .lock() + .unwrap() + .loaded + .find(&program2) + .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 11); - assert!(match_slot(&found, &program4, 5, 11)); + assert!(match_slot(&extracted, &program4, 5, 11)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // The following is a special case, where there's an expiration slot let test_program = Arc::new(LoadedProgram { @@ -1768,11 +1842,7 @@ mod tests { assert!(!cache.replenish(program4, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(19), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1783,21 +1853,16 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 19)); - assert!(match_slot(&found, &program2, 11, 19)); + assert!(match_slot(&extracted, &program1, 0, 19)); + assert!(match_slot(&extracted, &program2, 11, 19)); // Program4 deployed at slot 19 should not be expired yet - assert!(match_slot(&found, &program4, 19, 19)); + assert!(match_slot(&extracted, &program4, 19, 19)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 // This would cause program4 deployed at slot 19 to be expired. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(21), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1808,12 +1873,11 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 21)); - assert!(match_slot(&found, &program2, 11, 21)); + assert!(match_slot(&extracted, &program1, 0, 21)); + assert!(match_slot(&extracted, &program2, 11, 21)); - assert!(missing.contains(&(program3, 1))); - assert!(missing.contains(&(program4, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&extracted, &program4, false)); // Remove the expired entry to let the rest of the test continue if let Some(programs) = cache.entries.get_mut(&program4) { @@ -1837,12 +1901,8 @@ mod tests { // | // 23 - // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 21 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22 + let extracted = cache.extract( &TestWorkingSlot(21), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1854,19 +1914,14 @@ mod tests { ); // Since the fork was pruned, we should not find the entry deployed at slot 20. - assert!(match_slot(&found, &program1, 0, 21)); - assert!(match_slot(&found, &program2, 11, 21)); - assert!(match_slot(&found, &program4, 15, 21)); + assert!(match_slot(&extracted, &program1, 0, 21)); + assert!(match_slot(&extracted, &program2, 11, 21)); + assert!(match_slot(&extracted, &program4, 15, 21)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(27), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1877,11 +1932,10 @@ mod tests { .into_iter(), ); - assert!(unloaded.is_empty()); - assert!(match_slot(&found, &program1, 0, 27)); - assert!(match_slot(&found, &program2, 11, 27)); - assert!(match_slot(&found, &program3, 25, 27)); - assert!(match_slot(&found, &program4, 5, 27)); + assert!(match_slot(&extracted, &program1, 0, 27)); + assert!(match_slot(&extracted, &program2, 11, 27)); + assert!(match_slot(&extracted, &program3, 25, 27)); + assert!(match_slot(&extracted, &program4, 5, 27)); cache.prune(15, 0); @@ -1901,11 +1955,7 @@ mod tests { // 23 // Testing fork 16, 19, 23, with root at 15, current slot at 23 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(23), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1916,13 +1966,12 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 23)); - assert!(match_slot(&found, &program2, 11, 23)); - assert!(match_slot(&found, &program4, 15, 23)); + assert!(match_slot(&extracted, &program1, 0, 23)); + assert!(match_slot(&extracted, &program2, 11, 23)); + assert!(match_slot(&extracted, &program4, 15, 23)); // program3 was deployed on slot 25, which has been pruned - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); } #[test] @@ -1964,11 +2013,7 @@ mod tests { assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(12), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -1978,18 +2023,13 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 12)); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program1, 0, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Test the same fork, but request the program modified at a later slot than what's in the cache. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(12), vec![ ( @@ -2005,11 +2045,10 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program1, 1))); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program1, false)); + assert!(match_missing(&extracted, &program3, false)); } #[test] @@ -2068,11 +2107,7 @@ mod tests { ); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(19), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2082,18 +2117,13 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 19)); - assert!(match_slot(&found, &program2, 11, 19)); + assert!(match_slot(&extracted, &program1, 0, 19)); + assert!(match_slot(&extracted, &program2, 11, 19)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(27), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2103,18 +2133,13 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 27)); - assert!(match_slot(&found, &program2, 11, 27)); + assert!(match_slot(&extracted, &program1, 0, 27)); + assert!(match_slot(&extracted, &program2, 11, 27)); - assert!(unloaded.contains(&(program3, 1))); - assert!(missing.is_empty()); + assert!(match_missing(&extracted, &program3, true)); // Testing fork 0 - 10 - 20 - 22 with current slot at 22 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(22), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2124,10 +2149,10 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 20, 22)); + assert!(match_slot(&extracted, &program1, 20, 22)); - assert!(missing.contains(&(program2, 1))); - assert!(unloaded.contains(&(program3, 1))); + assert!(match_missing(&extracted, &program2, false)); + assert!(match_missing(&extracted, &program3, true)); } #[test] @@ -2180,11 +2205,7 @@ mod tests { assert!(!cache.replenish(program1, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(12), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2195,19 +2216,14 @@ mod tests { ); // Program1 deployed at slot 11 should not be expired yet - assert!(match_slot(&found, &program1, 11, 12)); - assert!(match_slot(&found, &program2, 11, 12)); + assert!(match_slot(&extracted, &program1, 11, 12)); + assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(missing.contains(&(program3, 1))); - assert!(unloaded.is_empty()); + assert!(match_missing(&extracted, &program3, false)); // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 // This would cause program4 deployed at slot 15 to be expired. - let ExtractedPrograms { - loaded: found, - missing, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(15), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2216,12 +2232,11 @@ mod tests { ] .into_iter(), ); - assert!(unloaded.is_empty()); - assert!(match_slot(&found, &program2, 11, 15)); + assert!(match_slot(&extracted, &program2, 11, 15)); - assert!(missing.contains(&(program1, 1))); - assert!(missing.contains(&(program3, 1))); + assert!(match_missing(&extracted, &program1, false)); + assert!(match_missing(&extracted, &program3, false)); // Test that the program still exists in the cache, even though it is expired. assert_eq!( @@ -2275,19 +2290,17 @@ mod tests { cache.prune(10, 0); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(20), vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))].into_iter(), ); - assert!(unloaded.is_empty()); // The cache should have the program deployed at slot 0 assert_eq!( - found + extracted + .lock() + .unwrap() + .loaded .entries .get(&program1) .expect("Did not find the program") @@ -2323,11 +2336,7 @@ mod tests { let program2 = Pubkey::new_unique(); assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2336,14 +2345,10 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(match_slot(&found, &program2, 10, 20)); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_slot(&extracted, &program2, 10, 20)); - let ExtractedPrograms { - loaded: found, - missing, - unloaded: _, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(6), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2352,18 +2357,14 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 5, 6)); - assert!(missing.contains(&(program2, 1))); + assert!(match_slot(&extracted, &program1, 5, 6)); + assert!(match_missing(&extracted, &program2, false)); // Pruning slot 5 will remove program1 entry deployed at slot 5. // On fork chaining from slot 5, the entry deployed at slot 0 will become visible. cache.prune_by_deployment_slot(5); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2372,14 +2373,10 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(match_slot(&found, &program2, 10, 20)); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_slot(&extracted, &program2, 10, 20)); - let ExtractedPrograms { - loaded: found, - missing, - unloaded: _, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(6), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2388,18 +2385,14 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 6)); - assert!(missing.contains(&(program2, 1))); + assert!(match_slot(&extracted, &program1, 0, 6)); + assert!(match_missing(&extracted, &program2, false)); // Pruning slot 10 will remove program2 entry deployed at slot 10. // As there is no other entry for program2, extract() will return it as missing. cache.prune_by_deployment_slot(10); - let ExtractedPrograms { - loaded: found, - missing: _, - unloaded: _, - } = cache.extract( + let extracted = cache.extract( &TestWorkingSlot(20), vec![ (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), @@ -2408,8 +2401,8 @@ mod tests { .into_iter(), ); - assert!(match_slot(&found, &program1, 0, 20)); - assert!(missing.contains(&(program2, 1))); + assert!(match_slot(&extracted, &program1, 0, 20)); + assert!(match_missing(&extracted, &program2, false)); } #[test] diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index 0e2e4956a55889..8eeb9c5a005cde 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -300,13 +300,6 @@ impl ThreadExecuteTimings { } pub fn accumulate(&mut self, other: &ThreadExecuteTimings) { - self.execute_timings.saturating_add_in_place( - ExecuteTimingType::TotalBatchesLen, - *other - .execute_timings - .metrics - .index(ExecuteTimingType::TotalBatchesLen), - ); self.execute_timings.accumulate(&other.execute_timings); saturating_add_assign!(self.total_thread_us, other.total_thread_us); saturating_add_assign!( diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 11d9b4b0dd34e4..737ec32c8f6782 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -2,13 +2,14 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] #![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] +#[cfg(not(target_os = "solana"))] pub mod processor; #[deprecated( since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" + note = "Please use `solana_program::address_lookup_table` instead" )] -pub use solana_sdk::address_lookup_table::{ +pub use solana_program::address_lookup_table::{ error, instruction, program::{check_id, id, ID}, state, diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 16a52c07928620..48d771b8656828 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -27,6 +27,7 @@ assert_matches = { workspace = true } memoffset = { workspace = true } rand = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +test-case = { workspace = true } [lib] crate-type = ["lib"] diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 715662c4a06dbe..13f9cbaf905275 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -1302,11 +1302,14 @@ fn update_caller_account( caller_account.vm_data_addr, caller_account.original_data_len, )? { - // Since each instruction account is directly mapped in a memory region - // with a *fixed* length, upon returning from CPI we must ensure that the - // current capacity is at least the original length (what is mapped in - // memory), so that the account's memory region never points to an - // invalid address. + // Since each instruction account is directly mapped in a memory region with a *fixed* + // length, upon returning from CPI we must ensure that the current capacity is at least + // the original length (what is mapped in memory), so that the account's memory region + // never points to an invalid address. + // + // Note that the capacity can be smaller than the original length only if the account is + // reallocated using the AccountSharedData API directly (deprecated). BorrowedAccount + // and CoW don't trigger this, see BorrowedAccount::make_data_mut. let min_capacity = caller_account.original_data_len; if callee_account.capacity() < min_capacity { callee_account @@ -1314,10 +1317,13 @@ fn update_caller_account( zero_all_mapped_spare_capacity = true; } - // If an account's data pointer has changed - because of CoW, reserve() as called above - // or because of using AccountSharedData directly (deprecated) - we must update the - // corresponding MemoryRegion in the caller's address space. Address spaces are fixed so - // we don't need to update the MemoryRegion's length. + // If an account's data pointer has changed we must update the corresponding + // MemoryRegion in the caller's address space. Address spaces are fixed so we don't need + // to update the MemoryRegion's length. + // + // An account's data pointer can change if the account is reallocated because of CoW, + // because of BorrowedAccount::make_data_mut or by a program that uses the + // AccountSharedData API directly (deprecated). let callee_ptr = callee_account.get_data().as_ptr() as u64; if region.host_addr.get() != callee_ptr { region.host_addr.set(callee_ptr); @@ -1328,7 +1334,6 @@ fn update_caller_account( let prev_len = *caller_account.ref_to_len_in_vm.get()? as usize; let post_len = callee_account.get_data().len(); - let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); if prev_len != post_len { let max_increase = if direct_mapping && !invoke_context.get_check_aligned() { 0 @@ -1352,37 +1357,8 @@ fn update_caller_account( if post_len < prev_len { if direct_mapping { // We have two separate regions to zero out: the account data - // and the realloc region. - // - // Here we zero the account data region. - let spare_len = if zero_all_mapped_spare_capacity { - // In the unlikely case where the account data vector has - // changed - which can happen during CoW - we zero the whole - // extra capacity up to the original data length. - // - // The extra capacity up to original data length is - // accessible from the vm and since it's uninitialized - // memory, it could be a source of non determinism. - caller_account.original_data_len - } else { - // If the allocation has not changed, we only zero the - // difference between the previous and current lengths. The - // rest of the memory contains whatever it contained before, - // which is deterministic. - prev_len - } - .saturating_sub(post_len); - if spare_len > 0 { - let dst = callee_account - .spare_data_capacity_mut()? - .get_mut(..spare_len) - .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? - .as_mut_ptr(); - // Safety: we check bounds above - unsafe { ptr::write_bytes(dst, 0, spare_len) }; - } - - // Here we zero the realloc region. + // and the realloc region. Here we zero the realloc region, the + // data region is zeroed further down below. // // This is done for compatibility but really only necessary for // the fringe case of a program calling itself, see @@ -1464,50 +1440,91 @@ fn update_caller_account( )?; *serialized_len_ptr = post_len as u64; } - if !direct_mapping { - let to_slice = &mut caller_account.serialized_data; - let from_slice = callee_account - .get_data() - .get(0..post_len) - .ok_or(SyscallError::InvalidLength)?; - if to_slice.len() != from_slice.len() { - return Err(Box::new(InstructionError::AccountDataTooSmall)); - } - to_slice.copy_from_slice(from_slice); - } else if realloc_bytes_used > 0 { - // In the is_loader_deprecated case, we must have failed with - // InvalidRealloc by now. - debug_assert!(!is_loader_deprecated); - - let to_slice = { - // If a callee reallocs an account, we write into the caller's - // realloc region regardless of whether the caller has write - // permissions to the account or not. If the callee has been able to - // make changes, it means they had permissions to do so, and here - // we're just going to reflect those changes to the caller's frame. + + if direct_mapping { + // Here we zero the account data region. + // + // If zero_all_mapped_spare_capacity=true, we need to zero regardless of whether the account + // size changed, because the underlying vector holding the account might have been + // reallocated and contain uninitialized memory in the spare capacity. + // + // See TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION for an example of + // this case. + let spare_len = if zero_all_mapped_spare_capacity { + // In the unlikely case where the account data vector has + // changed - which can happen during CoW - we zero the whole + // extra capacity up to the original data length. // - // Therefore we temporarily configure the realloc region as writable - // then set it back to whatever state it had. - let realloc_region = caller_account - .realloc_region(memory_mapping, is_loader_deprecated)? - .unwrap(); // unwrapping here is fine, we asserted !is_loader_deprecated - let original_state = realloc_region.state.replace(MemoryState::Writable); - defer! { - realloc_region.state.set(original_state); - }; + // The extra capacity up to original data length is + // accessible from the vm and since it's uninitialized + // memory, it could be a source of non determinism. + caller_account.original_data_len + } else { + // If the allocation has not changed, we only zero the + // difference between the previous and current lengths. The + // rest of the memory contains whatever it contained before, + // which is deterministic. + prev_len + } + .saturating_sub(post_len); + + if spare_len > 0 { + let dst = callee_account + .spare_data_capacity_mut()? + .get_mut(..spare_len) + .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? + .as_mut_ptr(); + // Safety: we check bounds above + unsafe { ptr::write_bytes(dst, 0, spare_len) }; + } - translate_slice_mut::( - memory_mapping, - caller_account - .vm_data_addr - .saturating_add(caller_account.original_data_len as u64), - realloc_bytes_used as u64, - invoke_context.get_check_aligned(), - )? - }; + // Propagate changes to the realloc region in the callee up to the caller. + let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); + if realloc_bytes_used > 0 { + // In the is_loader_deprecated case, we must have failed with + // InvalidRealloc by now. + debug_assert!(!is_loader_deprecated); + + let to_slice = { + // If a callee reallocs an account, we write into the caller's + // realloc region regardless of whether the caller has write + // permissions to the account or not. If the callee has been able to + // make changes, it means they had permissions to do so, and here + // we're just going to reflect those changes to the caller's frame. + // + // Therefore we temporarily configure the realloc region as writable + // then set it back to whatever state it had. + let realloc_region = caller_account + .realloc_region(memory_mapping, is_loader_deprecated)? + .unwrap(); // unwrapping here is fine, we asserted !is_loader_deprecated + let original_state = realloc_region.state.replace(MemoryState::Writable); + defer! { + realloc_region.state.set(original_state); + }; + + translate_slice_mut::( + memory_mapping, + caller_account + .vm_data_addr + .saturating_add(caller_account.original_data_len as u64), + realloc_bytes_used as u64, + invoke_context.get_check_aligned(), + )? + }; + let from_slice = callee_account + .get_data() + .get(caller_account.original_data_len..post_len) + .ok_or(SyscallError::InvalidLength)?; + if to_slice.len() != from_slice.len() { + return Err(Box::new(InstructionError::AccountDataTooSmall)); + } + to_slice.copy_from_slice(from_slice); + } + } else { + let to_slice = &mut caller_account.serialized_data; let from_slice = callee_account .get_data() - .get(caller_account.original_data_len..post_len) + .get(0..post_len) .ok_or(SyscallError::InvalidLength)?; if to_slice.len() != from_slice.len() { return Err(Box::new(InstructionError::AccountDataTooSmall)); diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index 7e9b69fc6f310c..3255c7d07a0e20 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -289,8 +289,8 @@ fn iter_memory_pair_chunks( src_access: AccessType, src_addr: u64, dst_access: AccessType, - mut dst_addr: u64, - n: u64, + dst_addr: u64, + n_bytes: u64, memory_mapping: &MemoryMapping, reverse: bool, mut fun: F, @@ -299,52 +299,90 @@ where T: Default, F: FnMut(*const u8, *const u8, usize) -> Result, { - let mut src_chunk_iter = MemoryChunkIterator::new(memory_mapping, src_access, src_addr, n) - .map_err(EbpfError::from)?; - loop { - // iterate source chunks - let (src_region, src_vm_addr, mut src_len) = match if reverse { - src_chunk_iter.next_back() - } else { - src_chunk_iter.next() - } { - Some(item) => item?, - None => break, - }; - - let mut src_host_addr = Result::from(src_region.vm_to_host(src_vm_addr, src_len as u64))?; - let mut dst_chunk_iter = MemoryChunkIterator::new(memory_mapping, dst_access, dst_addr, n) + let mut src_chunk_iter = + MemoryChunkIterator::new(memory_mapping, src_access, src_addr, n_bytes) + .map_err(EbpfError::from)?; + let mut dst_chunk_iter = + MemoryChunkIterator::new(memory_mapping, dst_access, dst_addr, n_bytes) .map_err(EbpfError::from)?; - // iterate over destination chunks until this source chunk has been completely copied - while src_len > 0 { - loop { - let (dst_region, dst_vm_addr, dst_len) = match if reverse { - dst_chunk_iter.next_back() + + let mut src_chunk = None; + let mut dst_chunk = None; + + macro_rules! memory_chunk { + ($chunk_iter:ident, $chunk:ident) => { + if let Some($chunk) = &mut $chunk { + // Keep processing the current chunk + $chunk + } else { + // This is either the first call or we've processed all the bytes in the current + // chunk. Move to the next one. + let chunk = match if reverse { + $chunk_iter.next_back() } else { - dst_chunk_iter.next() + $chunk_iter.next() } { Some(item) => item?, None => break, }; - let dst_host_addr = - Result::from(dst_region.vm_to_host(dst_vm_addr, dst_len as u64))?; - let chunk_len = src_len.min(dst_len); - fun( - src_host_addr as *const u8, - dst_host_addr as *const u8, - chunk_len, - )?; - src_len = src_len.saturating_sub(chunk_len); - if reverse { - dst_addr = dst_addr.saturating_sub(chunk_len as u64); - } else { - dst_addr = dst_addr.saturating_add(chunk_len as u64); - } - if src_len == 0 { - break; - } - src_host_addr = src_host_addr.saturating_add(chunk_len as u64); + $chunk.insert(chunk) } + }; + } + + loop { + let (src_region, src_chunk_addr, src_remaining) = memory_chunk!(src_chunk_iter, src_chunk); + let (dst_region, dst_chunk_addr, dst_remaining) = memory_chunk!(dst_chunk_iter, dst_chunk); + + // We always process same-length pairs + let chunk_len = *src_remaining.min(dst_remaining); + + let (src_host_addr, dst_host_addr) = { + let (src_addr, dst_addr) = if reverse { + // When scanning backwards not only we want to scan regions from the end, + // we want to process the memory within regions backwards as well. + ( + src_chunk_addr + .saturating_add(*src_remaining as u64) + .saturating_sub(chunk_len as u64), + dst_chunk_addr + .saturating_add(*dst_remaining as u64) + .saturating_sub(chunk_len as u64), + ) + } else { + (*src_chunk_addr, *dst_chunk_addr) + }; + + ( + Result::from(src_region.vm_to_host(src_addr, chunk_len as u64))?, + Result::from(dst_region.vm_to_host(dst_addr, chunk_len as u64))?, + ) + }; + + fun( + src_host_addr as *const u8, + dst_host_addr as *const u8, + chunk_len, + )?; + + // Update how many bytes we have left to scan in each chunk + *src_remaining = src_remaining.saturating_sub(chunk_len); + *dst_remaining = dst_remaining.saturating_sub(chunk_len); + + if !reverse { + // We've scanned `chunk_len` bytes so we move the vm address forward. In reverse + // mode we don't do this since we make progress by decreasing src_len and + // dst_len. + *src_chunk_addr = src_chunk_addr.saturating_add(chunk_len as u64); + *dst_chunk_addr = dst_chunk_addr.saturating_add(chunk_len as u64); + } + + if *src_remaining == 0 { + src_chunk = None; + } + + if *dst_remaining == 0 { + dst_chunk = None; } } @@ -471,11 +509,13 @@ impl<'a> DoubleEndedIterator for MemoryChunkIterator<'a> { #[cfg(test)] #[allow(clippy::indexing_slicing)] +#[allow(clippy::arithmetic_side_effects)] mod tests { use { super::*, assert_matches::assert_matches, solana_rbpf::{ebpf::MM_PROGRAM_START, program::SBPFVersion}, + test_case::test_case, }; fn to_chunk_vec<'a>( @@ -734,72 +774,59 @@ mod tests { memmove_non_contiguous(MM_PROGRAM_START, MM_PROGRAM_START + 8, 4, &memory_mapping).unwrap(); } - #[test] - fn test_overlapping_memmove_non_contiguous_right() { + #[test_case(&[], (0, 0, 0); "no regions")] + #[test_case(&[10], (1, 10, 0); "single region 0 len")] + #[test_case(&[10], (0, 5, 5); "single region no overlap")] + #[test_case(&[10], (0, 0, 10) ; "single region complete overlap")] + #[test_case(&[10], (2, 0, 5); "single region partial overlap start")] + #[test_case(&[10], (0, 1, 6); "single region partial overlap middle")] + #[test_case(&[10], (2, 5, 5); "single region partial overlap end")] + #[test_case(&[3, 5], (0, 5, 2) ; "two regions no overlap, single source region")] + #[test_case(&[4, 7], (0, 5, 5) ; "two regions no overlap, multiple source regions")] + #[test_case(&[3, 8], (0, 0, 11) ; "two regions complete overlap")] + #[test_case(&[2, 9], (3, 0, 5) ; "two regions partial overlap start")] + #[test_case(&[3, 9], (1, 2, 5) ; "two regions partial overlap middle")] + #[test_case(&[7, 3], (2, 6, 4) ; "two regions partial overlap end")] + #[test_case(&[2, 6, 3, 4], (0, 10, 2) ; "many regions no overlap, single source region")] + #[test_case(&[2, 1, 2, 5, 6], (2, 10, 4) ; "many regions no overlap, multiple source regions")] + #[test_case(&[8, 1, 3, 6], (0, 0, 18) ; "many regions complete overlap")] + #[test_case(&[7, 3, 1, 4, 5], (5, 0, 8) ; "many regions overlap start")] + #[test_case(&[1, 5, 2, 9, 3], (5, 4, 8) ; "many regions overlap middle")] + #[test_case(&[3, 9, 1, 1, 2, 1], (2, 9, 8) ; "many regions overlap end")] + fn test_memmove_non_contiguous( + regions: &[usize], + (src_offset, dst_offset, len): (usize, usize, usize), + ) { let config = Config { aligned_memory_mapping: false, ..Config::default() }; - let mem1 = vec![0x11; 1]; - let mut mem2 = vec![0x22; 2]; - let mut mem3 = vec![0x33; 3]; - let mut mem4 = vec![0x44; 4]; - let memory_mapping = MemoryMapping::new( - vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_writable(&mut mem2, MM_PROGRAM_START + 1), - MemoryRegion::new_writable(&mut mem3, MM_PROGRAM_START + 3), - MemoryRegion::new_writable(&mut mem4, MM_PROGRAM_START + 6), - ], - &config, - &SBPFVersion::V2, - ) - .unwrap(); - - // overlapping memmove right - the implementation will copy backwards - assert_eq!( - memmove_non_contiguous(MM_PROGRAM_START + 1, MM_PROGRAM_START, 7, &memory_mapping) - .unwrap(), - 0 - ); - assert_eq!(&mem1, &[0x11]); - assert_eq!(&mem2, &[0x11, 0x22]); - assert_eq!(&mem3, &[0x22, 0x33, 0x33]); - assert_eq!(&mem4, &[0x33, 0x44, 0x44, 0x44]); - } - - #[test] - fn test_overlapping_memmove_non_contiguous_left() { - let config = Config { - aligned_memory_mapping: false, - ..Config::default() + let (mem, memory_mapping) = build_memory_mapping(regions, &config); + + // flatten the memory so we can memmove it with ptr::copy + let mut expected_memory = flatten_memory(&mem); + unsafe { + std::ptr::copy( + expected_memory.as_ptr().add(src_offset), + expected_memory.as_mut_ptr().add(dst_offset), + len, + ) }; - let mut mem1 = vec![0x11; 1]; - let mut mem2 = vec![0x22; 2]; - let mut mem3 = vec![0x33; 3]; - let mut mem4 = vec![0x44; 4]; - let memory_mapping = MemoryMapping::new( - vec![ - MemoryRegion::new_writable(&mut mem1, MM_PROGRAM_START), - MemoryRegion::new_writable(&mut mem2, MM_PROGRAM_START + 1), - MemoryRegion::new_writable(&mut mem3, MM_PROGRAM_START + 3), - MemoryRegion::new_writable(&mut mem4, MM_PROGRAM_START + 6), - ], - &config, - &SBPFVersion::V2, + + // do our memmove + memmove_non_contiguous( + MM_PROGRAM_START + dst_offset as u64, + MM_PROGRAM_START + src_offset as u64, + len as u64, + &memory_mapping, ) .unwrap(); - // overlapping memmove left - the implementation will copy forward - assert_eq!( - memmove_non_contiguous(MM_PROGRAM_START, MM_PROGRAM_START + 1, 7, &memory_mapping) - .unwrap(), - 0 - ); - assert_eq!(&mem1, &[0x22]); - assert_eq!(&mem2, &[0x22, 0x33]); - assert_eq!(&mem3, &[0x33, 0x33, 0x44]); - assert_eq!(&mem4, &[0x44, 0x44, 0x44, 0x44]); + // flatten memory post our memmove + let memory = flatten_memory(&mem); + + // compare libc's memmove with ours + assert_eq!(expected_memory, memory); } #[test] @@ -910,4 +937,33 @@ mod tests { unsafe { memcmp(b"oobar", b"obarb", 5) } ); } + + fn build_memory_mapping<'a>( + regions: &[usize], + config: &'a Config, + ) -> (Vec>, MemoryMapping<'a>) { + let mut regs = vec![]; + let mut mem = Vec::new(); + let mut offset = 0; + for (i, region_len) in regions.iter().enumerate() { + mem.push( + (0..*region_len) + .map(|x| (i * 10 + x) as u8) + .collect::>(), + ); + regs.push(MemoryRegion::new_writable( + &mut mem[i], + MM_PROGRAM_START + offset as u64, + )); + offset += *region_len; + } + + let memory_mapping = MemoryMapping::new(regs, config, &SBPFVersion::V2).unwrap(); + + (mem, memory_mapping) + } + + fn flatten_memory(mem: &[Vec]) -> Vec { + mem.iter().flatten().copied().collect() + } } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 918ade08fd1060..3e6562b8ed7b8a 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -273,7 +273,7 @@ pub fn create_program_runtime_environment_v1<'a>( max_call_depth: compute_budget.max_call_depth, stack_frame_size: compute_budget.stack_frame_size, enable_address_translation: true, - enable_stack_frame_gaps: true, + enable_stack_frame_gaps: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, enable_instruction_tracing: debugging_features, @@ -3203,9 +3203,9 @@ mod tests { let mut sysvar_cache = SysvarCache::default(); sysvar_cache.set_clock(src_clock.clone()); - sysvar_cache.set_epoch_schedule(src_epochschedule); + sysvar_cache.set_epoch_schedule(src_epochschedule.clone()); sysvar_cache.set_fees(src_fees.clone()); - sysvar_cache.set_rent(src_rent); + sysvar_cache.set_rent(src_rent.clone()); sysvar_cache.set_epoch_rewards(src_rewards); let transaction_accounts = vec![ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 31b5be861aa923..99eb6d6cb54742 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -472,7 +472,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustversion", "serde", @@ -686,6 +686,16 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "borsh" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" +dependencies = [ + "borsh-derive 1.2.1", + "cfg_aliases", +] + [[package]] name = "borsh-derive" version = "0.9.3" @@ -712,6 +722,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b41ff04256c5c8330f3dfdaaae2a5cc976a8e75088bafa4625b0d0208de8c" +dependencies = [ + "once_cell", + "proc-macro-crate 2.0.1", + "proc-macro2", + "quote", + "syn 2.0.39", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" version = "0.9.3" @@ -914,6 +938,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -1701,11 +1731,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -1716,9 +1746,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5fd9bcbe8b1087cbd395b51498c01bc997cef73e778a80b77a811af5e2d29f" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", ] @@ -2232,9 +2262,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2362,9 +2392,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -3119,7 +3149,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", "syn 2.0.39", @@ -3178,9 +3208,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.59" +version = "0.10.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" dependencies = [ "bitflags 2.4.1", "cfg-if 1.0.0", @@ -3219,9 +3249,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" dependencies = [ "cc", "libc", @@ -3243,7 +3273,7 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "rand 0.8.5", "thiserror", @@ -3390,9 +3420,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "percentage" @@ -3593,9 +3623,9 @@ dependencies = [ [[package]] name = "prio-graph" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78dd2fa9ca0901b4d0dbf51d9862d7e3fb004605e4f4b4132472c3d08e7d901b" +checksum = "952091df80157ff6f267c9bcb6ad68e42405e217bd83268f2aedee0aa4f03b5c" [[package]] name = "proc-macro-crate" @@ -3616,6 +3646,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +dependencies = [ + "toml_datetime", + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -3642,9 +3682,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] @@ -3718,7 +3758,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", ] [[package]] @@ -4012,7 +4052,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project-lite", "rustls", "rustls-pemfile 1.0.0", @@ -4025,7 +4065,7 @@ dependencies = [ "tokio-rustls", "tokio-util 0.7.1", "tower-service", - "url 2.4.1", + "url 2.5.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4147,9 +4187,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.3", @@ -4304,9 +4344,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -4322,9 +4362,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -4718,7 +4758,7 @@ dependencies = [ name = "solana-banks-client" version = "1.18.0" dependencies = [ - "borsh 0.10.3", + "borsh 1.2.1", "futures 0.3.29", "solana-banks-interface", "solana-program", @@ -4828,7 +4868,7 @@ dependencies = [ "thiserror", "tiny-bip39", "uriparse", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -4842,7 +4882,7 @@ dependencies = [ "serde_yaml", "solana-clap-utils", "solana-sdk", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5349,7 +5389,7 @@ dependencies = [ "solana-sdk", "solana-version", "tokio", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -5415,6 +5455,7 @@ dependencies = [ "blake3", "borsh 0.10.3", "borsh 0.9.3", + "borsh 1.2.1", "bs58", "bv", "bytemuck", @@ -5527,7 +5568,7 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.4.1", + "url 2.5.0", ] [[package]] @@ -6197,7 +6238,7 @@ dependencies = [ "base64 0.21.5", "bincode", "bitflags 2.4.1", - "borsh 0.10.3", + "borsh 1.2.1", "bs58", "bytemuck", "byteorder 1.5.0", @@ -6993,6 +7034,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -7131,9 +7184,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-case" -version = "3.2.1" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ "test-case-macros", ] @@ -7423,6 +7476,23 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" + +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -7441,7 +7511,7 @@ dependencies = [ "http-body", "hyper", "hyper-timeout", - "percent-encoding 2.3.0", + "percent-encoding 2.3.1", "pin-project", "prost", "rustls-pemfile 1.0.0", @@ -7584,7 +7654,7 @@ dependencies = [ "rustls", "sha1", "thiserror", - "url 2.4.1", + "url 2.5.0", "utf-8", "webpki-roots 0.24.0", ] @@ -7703,13 +7773,13 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", - "percent-encoding 2.3.0", + "idna 0.5.0", + "percent-encoding 2.3.1", ] [[package]] @@ -7783,9 +7853,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7793,9 +7863,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", @@ -7820,9 +7890,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7830,9 +7900,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", @@ -7843,9 +7913,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" @@ -8058,6 +8128,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.5.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e87b8dfbe3baffbe687eef2e164e32286eff31a5ee16463ce03d991643ec94" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 7ab496de8eebd4..e61ad6e1aaf724 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -91,7 +91,7 @@ solana-cli-output = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sbf-rust-invoke = { workspace = true } solana-sbf-rust-realloc = { workspace = true, features = ["default"] } solana-sbf-rust-realloc-invoke = { workspace = true } @@ -186,15 +186,16 @@ targets = ["x86_64-unknown-linux-gnu"] # # in `../../Cargo.toml`. # -# `spl-token`, in turn, depends on `solana-program`, which we explicitly specify above as a local -# path dependency: +# `spl-token`, in turn, depends on `solana-program`, which we explicitly specify +# above as a local path dependency: # # solana-program = { path = "../../sdk/program", version = "=1.16.0" } # -# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` dependency only using -# what is available on crates.io. Crates.io normally contains a previous version of these crates, -# and we end up with two versions of `solana-program` and `solana-zk-token-sdk` and all of their -# dependencies in our build tree. +# Unfortunately, Cargo will try to resolve the `spl-token` `solana-program` +# dependency only using what is available on crates.io. Crates.io normally +# contains a previous version of these crates, and we end up with two versions +# of `solana-program` and `solana-zk-token-sdk` and all of their dependencies in +# our build tree. # # If you are developing downstream using non-crates-io solana-program (local or # forked repo, or from github rev, eg), duplicate the following patch statements @@ -203,7 +204,7 @@ targets = ["x86_64-unknown-linux-gnu"] # -p solana-zk-token-sdk` to remove extraneous versions from your Cargo.lock # file. # -# There is a similar override in `../../Cargo.toml`. Please keep both comments and the -# overrides in sync. +# There is a similar override in `../../Cargo.toml`. Please keep both comments +# and the overrides in sync. solana-program = { path = "../../sdk/program" } solana-zk-token-sdk = { path = "../../zk-token-sdk" } diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 7ef6966a80dbe0..7d4175e5f98cdc 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -2,6 +2,7 @@ #![cfg(feature = "sbf_c")] #![allow(clippy::uninlined_format_args)] #![allow(clippy::arithmetic_side_effects)] +#![cfg_attr(not(target_arch = "x86_64"), allow(dead_code, unused_imports))] use { solana_rbpf::memory_region::MemoryState, @@ -101,6 +102,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { } #[bench] +#[cfg(target_arch = "x86_64")] fn bench_program_alu(bencher: &mut Bencher) { let ns_per_s = 1000000000; let one_million = 1000000; @@ -188,12 +190,12 @@ fn bench_program_execute_noop(bencher: &mut Bencher) { .. } = create_genesis_config(50); let bank = Bank::new_for_benches(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let invoke_program_id = load_program(&bank_client, &bpf_loader::id(), &mint_keypair, "noop"); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mint_pubkey = mint_keypair.pubkey(); diff --git a/programs/sbf/rust/invoke/src/processor.rs b/programs/sbf/rust/invoke/src/processor.rs index a45a6ad2dc149e..1943f8f4b578db 100644 --- a/programs/sbf/rust/invoke/src/processor.rs +++ b/programs/sbf/rust/invoke/src/processor.rs @@ -1315,6 +1315,34 @@ fn process_instruction( }, &vec![0; original_data_len - new_len] ); + + // Realloc to [0xFC; 2]. Here we keep the same length, but realloc the underlying + // vector. CPI must zero even if the length is unchanged. + invoke( + &create_instruction( + *callee_program_id, + &[ + (accounts[ARGUMENT_INDEX].key, true, false), + (callee_program_id, false, false), + ], + vec![0xFC; 2], + ), + accounts, + ) + .unwrap(); + + // Check that [2..20] is zeroed + let new_len = account.data_len(); + assert_eq!(&*account.data.borrow(), &[0xFC; 2]); + assert_eq!( + unsafe { + slice::from_raw_parts( + account.data.borrow().as_ptr().add(new_len), + original_data_len - new_len, + ) + }, + &vec![0; original_data_len - new_len] + ); } TEST_WRITE_ACCOUNT => { msg!("TEST_WRITE_ACCOUNT"); diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 665109c16d7097..672b79e24a0af1 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -64,6 +64,7 @@ use { solana_runtime::{ bank::Bank, bank_client::BankClient, + bank_forks::BankForks, genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config, create_genesis_config_with_leader_ex, GenesisConfigInfo, @@ -85,7 +86,12 @@ use { system_program, transaction::{SanitizedTransaction, Transaction, TransactionError}, }, - std::{cell::RefCell, str::FromStr, sync::Arc, time::Duration}, + std::{ + cell::RefCell, + str::FromStr, + sync::{Arc, RwLock}, + time::Duration, + }, }; #[cfg(feature = "sbf_rust")] @@ -97,7 +103,7 @@ fn process_transaction_and_record_inner( Vec>, Vec, ) { - let signature = tx.signatures.get(0).unwrap().clone(); + let signature = tx.signatures.first().unwrap().clone(); let txs = vec![tx]; let tx_batch = bank.prepare_batch_for_tests(txs); let mut results = bank @@ -258,6 +264,7 @@ fn execute_transactions( fn load_program_and_advance_slot( bank_client: &mut BankClient, + bank_forks: &RwLock, loader_id: &Pubkey, payer_keypair: &Keypair, name: &str, @@ -265,7 +272,7 @@ fn load_program_and_advance_slot( let pubkey = load_program(bank_client, loader_id, payer_keypair, name); ( bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks, &Pubkey::default()) .expect("Failed to advance the slot"), pubkey, ) @@ -335,12 +342,13 @@ fn test_program_sbf_sanity() { .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Call user program let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program.0, @@ -386,12 +394,12 @@ fn test_program_sbf_loader_deprecated() { .accounts .remove(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()) .unwrap(); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let program_id = create_program(&bank, &bpf_loader_deprecated::id(), program); - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let account_metas = vec![AccountMeta::new(mint_keypair.pubkey(), true)]; let instruction = Instruction::new_with_bytes(program_id, &[255], account_metas); @@ -413,7 +421,7 @@ fn test_sol_alloc_free_no_longer_deployable() { mint_keypair, .. } = create_genesis_config(50); - let mut bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Populate loader account with elf that depends on _sol_alloc_free syscall let elf = load_program_from_file("solana_sbf_rust_deprecated_loader"); @@ -457,21 +465,41 @@ fn test_sol_alloc_free_no_longer_deployable() { ); // Enable _sol_alloc_free syscall + let slot = bank.slot(); + drop(bank); + let mut bank = Arc::into_inner(bank_forks.write().unwrap().remove(slot).unwrap()).unwrap(); bank.deactivate_feature(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()); bank.clear_signatures(); bank.clear_program_cache(); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); // Try and finalize the program now that sol_alloc_free is re-enabled assert!(bank.process_transaction(&finalize_tx).is_ok()); let new_slot = bank.slot() + 1; - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), new_slot); + let bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent(bank, &Pubkey::default(), new_slot)) + .clone_without_scheduler(); // invoke the program assert!(bank.process_transaction(&invoke_tx).is_ok()); // disable _sol_alloc_free + let slot = bank.slot(); + drop(bank); + let mut bank = Arc::try_unwrap(bank_forks.write().unwrap().remove(slot).unwrap()).unwrap(); bank.activate_feature(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()); bank.clear_signatures(); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); // invoke should still succeed because cached assert!(bank.process_transaction(&invoke_tx).is_ok()); @@ -506,11 +534,11 @@ fn test_program_sbf_duplicate_accounts() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -610,10 +638,11 @@ fn test_program_sbf_error_handling() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -715,12 +744,12 @@ fn test_return_data_and_log_data_syscall() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program, @@ -783,8 +812,7 @@ fn test_program_sbf_invoke_sanity() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let invoke_program_id = @@ -793,6 +821,7 @@ fn test_program_sbf_invoke_sanity() { load_program(&bank_client, &bpf_loader::id(), &mint_keypair, program.2); let (bank, noop_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, program.3, @@ -1180,8 +1209,7 @@ fn test_program_sbf_program_id_spoofing() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let malicious_swap_pubkey = load_program( @@ -1192,6 +1220,7 @@ fn test_program_sbf_program_id_spoofing() { ); let (bank, malicious_system_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_spoof1_system", @@ -1231,8 +1260,7 @@ fn test_program_sbf_caller_has_access_to_cpi_program() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let caller_pubkey = load_program( @@ -1243,6 +1271,7 @@ fn test_program_sbf_caller_has_access_to_cpi_program() { ); let (_, caller2_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_caller_access", @@ -1269,12 +1298,12 @@ fn test_program_sbf_ro_modify() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_ro_modify", @@ -1324,10 +1353,11 @@ fn test_program_sbf_call_depth() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_call_depth", @@ -1357,10 +1387,11 @@ fn test_program_sbf_compute_budget() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_noop", @@ -1484,12 +1515,12 @@ fn test_program_sbf_instruction_introspection() { mint_keypair, .. } = create_genesis_config(50_000); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_instruction_introspection", @@ -1542,8 +1573,8 @@ fn test_program_sbf_test_use_latest_executor() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let panic_id = load_program( &bank_client, &bpf_loader::id(), @@ -1570,7 +1601,7 @@ fn test_program_sbf_test_use_latest_executor() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); assert!(bank_client .send_and_confirm_message(&[&mint_keypair, &program_keypair], message) @@ -1585,7 +1616,7 @@ fn test_program_sbf_test_use_latest_executor() { "solana_sbf_rust_noop", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); bank_client @@ -1602,7 +1633,7 @@ fn test_program_sbf_test_use_latest_executor() { Some(&mint_keypair.pubkey()), ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); assert!(bank_client .send_and_confirm_message(&[&mint_keypair], message) @@ -1619,8 +1650,8 @@ fn test_program_sbf_upgrade() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Deploy upgrade program let buffer_keypair = Keypair::new(); @@ -1636,7 +1667,7 @@ fn test_program_sbf_upgrade() { "solana_sbf_rust_upgradeable", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mut instruction = @@ -1664,7 +1695,7 @@ fn test_program_sbf_upgrade() { ..clock::Clock::default() }); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call upgraded program @@ -1697,7 +1728,7 @@ fn test_program_sbf_upgrade() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call original program @@ -1932,8 +1963,7 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -1989,7 +2019,7 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { .unwrap(); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Deployment is invisible to both top-level-instructions and CPI instructions @@ -2030,8 +2060,7 @@ fn test_program_sbf_invoke_in_same_tx_as_redeployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -2084,7 +2113,7 @@ fn test_program_sbf_invoke_in_same_tx_as_redeployment() { // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective // after 2 slots. So we need to advance the bank client by 2 slots here. let bank = bank_client - .advance_slot(2, &Pubkey::default()) + .advance_slot(2, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Prepare redeployment @@ -2138,8 +2167,7 @@ fn test_program_sbf_invoke_in_same_tx_as_undeployment() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); // Deploy upgradeable program @@ -2181,7 +2209,7 @@ fn test_program_sbf_invoke_in_same_tx_as_undeployment() { // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective // after 2 slots. So we need to advance the bank client by 2 slots here. let bank = bank_client - .advance_slot(2, &Pubkey::default()) + .advance_slot(2, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Prepare undeployment @@ -2231,8 +2259,8 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let invoke_and_return = load_program( &bank_client, &bpf_loader::id(), @@ -2255,7 +2283,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); let mut instruction = Instruction::new_with_bytes( @@ -2290,7 +2318,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ..clock::Clock::default() }); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Call the upgraded program @@ -2323,7 +2351,7 @@ fn test_program_sbf_invoke_upgradeable_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance slot"); // Call original program @@ -2366,7 +2394,8 @@ fn test_program_sbf_disguised_as_sbf_loader() { bank.deactivate_feature( &solana_sdk::feature_set::remove_bpf_loader_incorrect_program_id::id(), ); - let bank_client = BankClient::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; + let bank_client = BankClient::new_shared(bank); let program_id = load_program(&bank_client, &bpf_loader::id(), &mint_keypair, program); let account_metas = vec![AccountMeta::new_readonly(program_id, false)]; @@ -2389,11 +2418,12 @@ fn test_program_reads_from_program_account() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "read_program", @@ -2415,16 +2445,21 @@ fn test_program_sbf_c_dup() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let account_address = Pubkey::new_unique(); let account = AccountSharedData::new_data(42, &[1_u8, 2, 3], &system_program::id()).unwrap(); bank.store_account(&account_address, &account); - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); - let (_, program_id) = - load_program_and_advance_slot(&mut bank_client, &bpf_loader::id(), &mint_keypair, "ser"); + let (_, program_id) = load_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &bpf_loader::id(), + &mint_keypair, + "ser", + ); let account_metas = vec![ AccountMeta::new_readonly(account_address, false), AccountMeta::new_readonly(account_address, false), @@ -2445,8 +2480,8 @@ fn test_program_sbf_upgrade_via_cpi() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); let invoke_and_return = load_program( &bank_client, &bpf_loader::id(), @@ -2468,7 +2503,7 @@ fn test_program_sbf_upgrade_via_cpi() { "solana_sbf_rust_upgradeable", ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let program_account = bank_client.get_account(&program_id).unwrap().unwrap(); let Ok(bpf_loader_upgradeable::UpgradeableLoaderState::Program { @@ -2526,7 +2561,7 @@ fn test_program_sbf_upgrade_via_cpi() { .unwrap(); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Call the upgraded program @@ -2555,8 +2590,8 @@ fn test_program_sbf_set_upgrade_authority_via_cpi() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); // Deploy CPI invoker program let invoke_and_return = load_program( @@ -2581,7 +2616,7 @@ fn test_program_sbf_set_upgrade_authority_via_cpi() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); // Set program upgrade authority instruction to invoke via CPI @@ -2648,8 +2683,7 @@ fn test_program_upgradeable_locks() { mint_keypair, .. } = create_genesis_config(2_000_000_000); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); load_upgradeable_program( @@ -2671,7 +2705,7 @@ fn test_program_upgradeable_locks() { ); let bank = bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); bank_client @@ -2771,12 +2805,12 @@ fn test_program_sbf_finalize() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (_, program_pubkey) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_finalize", @@ -2792,7 +2826,7 @@ fn test_program_sbf_finalize() { ); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let account_metas = vec![ @@ -2819,12 +2853,12 @@ fn test_program_sbf_ro_account_modify() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_ro_account_modify", @@ -2891,11 +2925,12 @@ fn test_program_sbf_realloc() { if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let (bank, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -3218,8 +3253,7 @@ fn test_program_sbf_realloc_invoke() { let mint_pubkey = mint_keypair.pubkey(); let signer = &[&mint_keypair]; - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let realloc_program_id = load_program( @@ -3231,6 +3265,7 @@ fn test_program_sbf_realloc_invoke() { let (bank, realloc_invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc_invoke", @@ -3734,8 +3769,7 @@ fn test_program_sbf_processed_inner_instruction() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); - let bank = Arc::new(bank); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut bank_client = BankClient::new_shared(bank.clone()); let sibling_program_id = load_program( @@ -3758,6 +3792,7 @@ fn test_program_sbf_processed_inner_instruction() { ); let (_, invoke_and_return_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke_and_return", @@ -3816,10 +3851,12 @@ fn test_program_fees() { FeeStructure::new(0.000005, 0.0, vec![(200, 0.0000005), (1400000, 0.000005)]); bank.fee_structure = fee_structure.clone(); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank_client = BankClient::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_noop", @@ -3895,11 +3932,12 @@ fn test_get_minimum_delegation() { } = create_genesis_config(100_123_456_789); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); let (_, program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_get_minimum_delegation", @@ -3921,7 +3959,7 @@ fn test_program_sbf_inner_instruction_alignment_checks() { mint_keypair, .. } = create_genesis_config(50); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let noop = create_program(&bank, &bpf_loader_deprecated::id(), "solana_sbf_rust_noop"); let inner_instruction_alignment_check = create_program( &bank, @@ -3931,9 +3969,9 @@ fn test_program_sbf_inner_instruction_alignment_checks() { // invoke unaligned program, which will call aligned program twice, // unaligned should be allowed once invoke completes - let mut bank_client = BankClient::new(bank); + let mut bank_client = BankClient::new_shared(bank); bank_client - .advance_slot(1, &Pubkey::default()) + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) .expect("Failed to advance the slot"); let mut instruction = Instruction::new_with_bytes( inner_instruction_alignment_check, @@ -3966,7 +4004,7 @@ fn test_cpi_account_ownership_writability() { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let invoke_program_id = load_program( @@ -3985,6 +4023,7 @@ fn test_cpi_account_ownership_writability() { let (bank, realloc_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -4147,7 +4186,7 @@ fn test_cpi_account_data_updates() { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let invoke_program_id = load_program( @@ -4159,6 +4198,7 @@ fn test_cpi_account_data_updates() { let (bank, realloc_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_realloc", @@ -4294,7 +4334,7 @@ fn test_cpi_deprecated_loader_realloc() { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let deprecated_program_id = create_program( &bank, @@ -4306,6 +4346,7 @@ fn test_cpi_deprecated_loader_realloc() { let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4444,11 +4485,12 @@ fn test_cpi_change_account_data_memory_allocation() { LoadedProgram::new_builtin(0, 42, MockBuiltin::vm), ); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4488,7 +4530,7 @@ fn test_cpi_invalid_account_info_pointers() { let mut bank = Bank::new_for_tests(&genesis_config); let feature_set = FeatureSet::all_enabled(); bank.feature_set = Arc::new(feature_set); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let c_invoke_program_id = @@ -4496,6 +4538,7 @@ fn test_cpi_invalid_account_info_pointers() { let (bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", @@ -4556,11 +4599,12 @@ fn test_deny_executable_write() { if !direct_mapping { feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); } - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let (_bank, invoke_program_id) = load_program_and_advance_slot( &mut bank_client, + bank_forks.as_ref(), &bpf_loader::id(), &mint_keypair, "solana_sbf_rust_invoke", diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 27dbc5069bc40e..8d9d60be297250 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -228,117 +228,88 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } Ok(StakeInstruction::InitializeChecked) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let staker_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; - let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } + instruction_context.check_number_of_instruction_accounts(4)?; + let staker_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(2)?, + )?; + let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); + } - let authorized = Authorized { - staker: *staker_pubkey, - withdrawer: *withdrawer_pubkey, - }; + let authorized = Authorized { + staker: *staker_pubkey, + withdrawer: *withdrawer_pubkey, + }; - let rent = - get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &Lockup::default(), &rent) - } else { - Err(InstructionError::InvalidInstructionData) - } + let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + initialize(&mut me, &authorized, &Lockup::default(), &rent) } Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize( - &mut me, - &signers, - authorized_pubkey, - stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize( + &mut me, + &signers, + authorized_pubkey, + stake_authorize, + &clock, + custodian_pubkey, + ) } Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - authorized_pubkey, - args.stake_authorize, - &clock, - custodian_pubkey, - ) - } else { - Err(InstructionError::InvalidInstructionData) + instruction_context.check_number_of_instruction_accounts(2)?; + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + instruction_context.check_number_of_instruction_accounts(4)?; + let authorized_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 4, false)?; + + authorize_with_seed( + transaction_context, + instruction_context, + &mut me, + 1, + &args.authority_seed, + &args.authority_owner, + authorized_pubkey, + args.stake_authorize, + &clock, + custodian_pubkey, + ) } Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { let mut me = get_stake_account()?; - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 2, true)?; + let custodian_pubkey = + get_optional_pubkey(transaction_context, instruction_context, 2, true)?; - let lockup = LockupArgs { - unix_timestamp: lockup_checked.unix_timestamp, - epoch: lockup_checked.epoch, - custodian: custodian_pubkey.cloned(), - }; - let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) - } else { - Err(InstructionError::InvalidInstructionData) - } + let lockup = LockupArgs { + unix_timestamp: lockup_checked.unix_timestamp, + epoch: lockup_checked.epoch, + custodian: custodian_pubkey.cloned(), + }; + let clock = invoke_context.get_sysvar_cache().get_clock()?; + set_lockup(&mut me, &lockup, &signers, &clock) } Ok(StakeInstruction::GetMinimumDelegation) => { let feature_set = invoke_context.feature_set.as_ref(); diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 4c616d2d9e2615..1fec7887f63995 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -14,7 +14,7 @@ use { account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, - feature_set::{self, stake_merge_with_unmatched_credits_observed, FeatureSet}, + feature_set::{self, FeatureSet}, instruction::{checked_add, InstructionError}, pubkey::Pubkey, rent::Rent, @@ -1417,29 +1417,6 @@ impl MergeKind { } } - // Remove this when the `stake_merge_with_unmatched_credits_observed` feature is removed - fn active_stakes_can_merge( - invoke_context: &InvokeContext, - stake: &Stake, - source: &Stake, - ) -> Result<(), InstructionError> { - Self::active_delegations_can_merge(invoke_context, &stake.delegation, &source.delegation)?; - // `credits_observed` MUST match to prevent earning multiple rewards - // from a stake account by merging it into another stake account that - // is small enough to not be paid out every epoch. This would effectively - // reset the larger stake accounts `credits_observed` to that of the - // smaller account. - if stake.credits_observed == source.credits_observed { - Ok(()) - } else { - ic_msg!( - invoke_context, - "Unable to merge due to credits observed mismatch" - ); - Err(StakeError::MergeMismatch.into()) - } - } - fn merge( self, invoke_context: &InvokeContext, @@ -1450,18 +1427,11 @@ impl MergeKind { self.active_stake() .zip(source.active_stake()) .map(|(stake, source)| { - if invoke_context - .feature_set - .is_active(&stake_merge_with_unmatched_credits_observed::id()) - { - Self::active_delegations_can_merge( - invoke_context, - &stake.delegation, - &source.delegation, - ) - } else { - Self::active_stakes_can_merge(invoke_context, stake, source) - } + Self::active_delegations_can_merge( + invoke_context, + &stake.delegation, + &source.delegation, + ) }) .unwrap_or(Ok(()))?; let merged_state = match (self, source) { @@ -1487,7 +1457,6 @@ impl MergeKind { source_stake.delegation.stake, )?; merge_delegation_stake_and_credits_observed( - invoke_context, &mut stake, source_lamports, source_stake.credits_observed, @@ -1504,7 +1473,6 @@ impl MergeKind { // instead be moved into the destination account as extra, // withdrawable `lamports` merge_delegation_stake_and_credits_observed( - invoke_context, &mut stake, source_stake.delegation.stake, source_stake.credits_observed, @@ -1518,19 +1486,13 @@ impl MergeKind { } fn merge_delegation_stake_and_credits_observed( - invoke_context: &InvokeContext, stake: &mut Stake, absorbed_lamports: u64, absorbed_credits_observed: u64, ) -> Result<(), InstructionError> { - if invoke_context - .feature_set - .is_active(&stake_merge_with_unmatched_credits_observed::id()) - { - stake.credits_observed = - stake_weighted_credits_observed(stake, absorbed_lamports, absorbed_credits_observed) - .ok_or(InstructionError::ArithmeticOverflow)?; - } + stake.credits_observed = + stake_weighted_credits_observed(stake, absorbed_lamports, absorbed_credits_observed) + .ok_or(InstructionError::ArithmeticOverflow)?; stake.delegation.stake = checked_add(stake.delegation.stake, absorbed_lamports)?; Ok(()) } @@ -3033,20 +2995,12 @@ mod tests { }; let identical = good_stake; - assert!( - MergeKind::active_stakes_can_merge(&invoke_context, &good_stake, &identical).is_ok() - ); - - let bad_credits_observed = Stake { - credits_observed: good_stake.credits_observed + 1, - ..good_stake - }; - assert!(MergeKind::active_stakes_can_merge( + assert!(MergeKind::active_delegations_can_merge( &invoke_context, - &good_stake, - &bad_credits_observed + &good_stake.delegation, + &identical.delegation ) - .is_err()); + .is_ok()); let good_delegation = good_stake.delegation; let different_stake_ok = Delegation { diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 536fbeb83fce08..7e67832e9d3615 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -213,30 +213,23 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ) } VoteInstruction::AuthorizeChecked(vote_authorize) => { - if invoke_context - .feature_set - .is_active(&feature_set::vote_stake_checked_instructions::id()) - { - instruction_context.check_number_of_instruction_accounts(4)?; - let voter_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - vote_state::authorize( - &mut me, - voter_pubkey, - vote_authorize, - &signers, - &clock, - &invoke_context.feature_set, - ) - } else { - Err(InstructionError::InvalidInstructionData) + instruction_context.check_number_of_instruction_accounts(4)?; + let voter_pubkey = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(3)?, + )?; + if !instruction_context.is_instruction_account_signer(3)? { + return Err(InstructionError::MissingRequiredSignature); } + let clock = + get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + vote_state::authorize( + &mut me, + voter_pubkey, + vote_authorize, + &signers, + &clock, + &invoke_context.feature_set, + ) } } }); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index f5f80cf1183e2b..79d131583ae467 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1272,7 +1272,7 @@ mod tests { let processor_account = AccountSharedData::new(0, 0, &solana_sdk::native_loader::id()); let transaction_context = TransactionContext::new( vec![(id(), processor_account), (node_pubkey, vote_account)], - rent, + rent.clone(), 0, 0, ); diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index 90a55deaa691ed..6339c5080d9b17 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -84,39 +84,52 @@ impl ConnectionPool for QuicPool { } } -#[derive(Clone)] pub struct QuicConfig { - client_certificate: Arc, + // Arc to prevent having to copy the struct + client_certificate: RwLock>, maybe_staked_nodes: Option>>, maybe_client_pubkey: Option, // The optional specified endpoint for the quic based client connections // If not specified, the connection cache will create as needed. client_endpoint: Option, + addr: IpAddr, +} + +impl Clone for QuicConfig { + fn clone(&self) -> Self { + let cert_guard = self.client_certificate.read().unwrap(); + QuicConfig { + client_certificate: RwLock::new(cert_guard.clone()), + maybe_staked_nodes: self.maybe_staked_nodes.clone(), + maybe_client_pubkey: self.maybe_client_pubkey, + client_endpoint: self.client_endpoint.clone(), + addr: self.addr, + } + } } impl NewConnectionConfig for QuicConfig { fn new() -> Result { - let (cert, priv_key) = - new_self_signed_tls_certificate(&Keypair::new(), IpAddr::V4(Ipv4Addr::UNSPECIFIED))?; + let addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let (cert, priv_key) = new_self_signed_tls_certificate(&Keypair::new(), addr)?; Ok(Self { - client_certificate: Arc::new(QuicClientCertificate { + client_certificate: RwLock::new(Arc::new(QuicClientCertificate { certificate: cert, key: priv_key, - }), + })), maybe_staked_nodes: None, maybe_client_pubkey: None, client_endpoint: None, + addr, }) } } impl QuicConfig { fn create_endpoint(&self) -> QuicLazyInitializedEndpoint { - QuicLazyInitializedEndpoint::new( - self.client_certificate.clone(), - self.client_endpoint.as_ref().cloned(), - ) + let cert_guard = self.client_certificate.read().unwrap(); + QuicLazyInitializedEndpoint::new(cert_guard.clone(), self.client_endpoint.as_ref().cloned()) } fn compute_max_parallel_streams(&self) -> usize { @@ -143,7 +156,23 @@ impl QuicConfig { ipaddr: IpAddr, ) -> Result<(), RcgenError> { let (cert, priv_key) = new_self_signed_tls_certificate(keypair, ipaddr)?; - self.client_certificate = Arc::new(QuicClientCertificate { + self.addr = ipaddr; + + let mut cert_guard = self.client_certificate.write().unwrap(); + + *cert_guard = Arc::new(QuicClientCertificate { + certificate: cert, + key: priv_key, + }); + Ok(()) + } + + pub fn update_keypair(&self, keypair: &Keypair) -> Result<(), RcgenError> { + let (cert, priv_key) = new_self_signed_tls_certificate(keypair, self.addr)?; + + let mut cert_guard = self.client_certificate.write().unwrap(); + + *cert_guard = Arc::new(QuicClientCertificate { certificate: cert, key: priv_key, }); @@ -212,6 +241,11 @@ impl ConnectionManager for QuicConnectionManager { fn new_connection_config(&self) -> QuicConfig { self.connection_config.clone() } + + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + self.connection_config.update_keypair(key)?; + Ok(()) + } } impl QuicConnectionManager { diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 7608e2b7b265c7..b874261dee3b69 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -10,8 +10,8 @@ mod tests { }, solana_sdk::{net::DEFAULT_TPU_COALESCE, packet::PACKET_DATA_SIZE, signature::Keypair}, solana_streamer::{ - nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, streamer::StakedNodes, - tls_certificates::new_self_signed_tls_certificate, + nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, quic::SpawnServerResult, + streamer::StakedNodes, tls_certificates::new_self_signed_tls_certificate, }, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -68,7 +68,11 @@ mod tests { let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (s, exit, keypair, ip) = server_args(); - let (_, t) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", s.try_clone().unwrap(), &keypair, @@ -204,7 +208,11 @@ mod tests { let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); let (request_recv_socket, request_recv_exit, keypair, request_recv_ip) = server_args(); - let (request_recv_endpoint, request_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: request_recv_endpoint, + thread: request_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", request_recv_socket.try_clone().unwrap(), &keypair, @@ -228,7 +236,11 @@ mod tests { let addr = response_recv_socket.local_addr().unwrap().ip(); let port = response_recv_socket.local_addr().unwrap().port(); let server_addr = SocketAddr::new(addr, port); - let (response_recv_endpoint, response_recv_thread) = solana_streamer::quic::spawn_server( + let SpawnServerResult { + endpoint: response_recv_endpoint, + thread: response_recv_thread, + key_updater: _, + } = solana_streamer::quic::spawn_server( "quic_streamer_test", response_recv_socket, &keypair2, diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 3edd4b38009639..98d9ee572f6824 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -63,6 +63,7 @@ tokio-util = { workspace = true, features = ["codec", "compat"] } [dev-dependencies] serial_test = { workspace = true } solana-net-utils = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } spl-pod = { workspace = true } symlink = { workspace = true } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 5e62dff9ce55d3..0a92a4d031e9ef 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -631,7 +631,7 @@ impl JsonRpcRequestProcessor { // Since epoch schedule data comes from the genesis config, any commitment level should be // fine let bank = self.bank(Some(CommitmentConfig::finalized())); - *bank.epoch_schedule() + bank.epoch_schedule().clone() } pub fn get_balance( @@ -5057,7 +5057,7 @@ pub mod tests { fn test_rpc_request_processor_new() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(100); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis.genesis_config).0; bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) .unwrap(); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_test")); @@ -5227,7 +5227,7 @@ pub mod tests { fn test_rpc_get_tx_count() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(10); - let bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis.genesis_config).0; // Add 4 transactions bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) .unwrap(); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index b98f0831518675..d36efbc6c4fb7f 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -336,7 +336,7 @@ pub(crate) mod tests { #[test] fn test_notify_transaction() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; let (transaction_status_sender, transaction_status_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path_auto_delete!(); diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 6503740d4d49f1..947da05cc169c2 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -22,6 +22,7 @@ name = "solana_runtime_transaction" [dev-dependencies] bincode = { workspace = true } rand = { workspace = true } +solana-program ={ workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 8ddf004f8b65c9..df6acbccdd2905 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -4,11 +4,11 @@ //! It has two states: //! 1. Statically Loaded: after receiving `packet` from sigverify and deserializing //! it into `solana_sdk::VersionedTransaction`, then sanitizing into -//! `solana_sdk::SanitizedVersionedTransaction`, `RuntimeTransactionStatic` -//! can be created from it with static transaction metadata extracted. +//! `solana_sdk::SanitizedVersionedTransaction`, which can be wrapped into +//! `RuntimeTransaction` with static transaction metadata extracted. //! 2. Dynamically Loaded: after successfully loaded account addresses from onchain -//! ALT, RuntimeTransaction transits into Dynamically Loaded state, with -//! its dynamic metadata loaded. +//! ALT, RuntimeTransaction transits into Dynamically Loaded state, +//! with its dynamic metadata loaded. use { crate::transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, solana_sdk::{ @@ -21,15 +21,27 @@ use { }; #[derive(Debug, Clone, Eq, PartialEq)] -pub struct RuntimeTransactionStatic { +pub struct RuntimeTransaction { signatures: Vec, - message: SanitizedVersionedMessage, + message: M, // transaction meta is a collection of fields, it is updated // during message state transition meta: TransactionMeta, } -impl StaticMeta for RuntimeTransactionStatic { +// These traits gate access to static and dynamic metadata +// so that only transactions with supporting message types +// can access them. +trait StaticMetaAccess {} +trait DynamicMetaAccess: StaticMetaAccess {} + +// Implement the gate traits for the message types that should +// have access to the static and dynamic metadata. +impl StaticMetaAccess for SanitizedVersionedMessage {} +impl StaticMetaAccess for SanitizedMessage {} +impl DynamicMetaAccess for SanitizedMessage {} + +impl StaticMeta for RuntimeTransaction { fn message_hash(&self) -> &Hash { &self.meta.message_hash } @@ -38,7 +50,9 @@ impl StaticMeta for RuntimeTransactionStatic { } } -impl RuntimeTransactionStatic { +impl DynamicMeta for RuntimeTransaction {} + +impl RuntimeTransaction { pub fn try_from( sanitized_versioned_tx: SanitizedVersionedTransaction, message_hash: Option, @@ -62,31 +76,9 @@ impl RuntimeTransactionStatic { } } -/// Statically Loaded transaction can transit to Dynamically Loaded with supplied -/// address_loader, to load accounts from on-chain ALT, then resolve dynamic metadata -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct RuntimeTransactionDynamic { - signatures: Vec, - message: SanitizedMessage, - // transaction meta is a collection of fields, it is updated - // during message state transition - meta: TransactionMeta, -} - -impl DynamicMeta for RuntimeTransactionDynamic {} - -impl StaticMeta for RuntimeTransactionDynamic { - fn message_hash(&self) -> &Hash { - &self.meta.message_hash - } - fn is_simple_vote_tx(&self) -> bool { - self.meta.is_simple_vote_tx - } -} - -impl RuntimeTransactionDynamic { +impl RuntimeTransaction { pub fn try_from( - statically_loaded_runtime_tx: RuntimeTransactionStatic, + statically_loaded_runtime_tx: RuntimeTransaction, address_loader: impl AddressLoader, ) -> Result { let mut tx = Self { @@ -106,3 +98,140 @@ impl RuntimeTransactionDynamic { Ok(()) } } + +#[cfg(test)] +mod tests { + use { + super::*, + solana_program::{ + system_instruction, + vote::{self, state::Vote}, + }, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + message::Message, + signer::{keypair::Keypair, Signer}, + transaction::{SimpleAddressLoader, Transaction, VersionedTransaction}, + }, + }; + + fn vote_sanitized_versioned_transaction() -> SanitizedVersionedTransaction { + let bank_hash = Hash::new_unique(); + let block_hash = Hash::new_unique(); + let vote_keypair = Keypair::new(); + let node_keypair = Keypair::new(); + let auth_keypair = Keypair::new(); + let votes = Vote::new(vec![1, 2, 3], bank_hash); + let vote_ix = + vote::instruction::vote(&vote_keypair.pubkey(), &auth_keypair.pubkey(), votes); + let mut vote_tx = Transaction::new_with_payer(&[vote_ix], Some(&node_keypair.pubkey())); + vote_tx.partial_sign(&[&node_keypair], block_hash); + vote_tx.partial_sign(&[&auth_keypair], block_hash); + + SanitizedVersionedTransaction::try_from(VersionedTransaction::from(vote_tx)).unwrap() + } + + fn non_vote_sanitized_versioned_transaction( + compute_unit_price: u64, + ) -> SanitizedVersionedTransaction { + let from_keypair = Keypair::new(); + let ixs = vec![ + system_instruction::transfer( + &from_keypair.pubkey(), + &solana_sdk::pubkey::new_rand(), + 1, + ), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), + ]; + let message = Message::new(&ixs, Some(&from_keypair.pubkey())); + let tx = Transaction::new(&[&from_keypair], message, Hash::new_unique()); + SanitizedVersionedTransaction::try_from(VersionedTransaction::from(tx)).unwrap() + } + + fn get_transaction_meta( + svt: SanitizedVersionedTransaction, + hash: Option, + is_simple_vote: Option, + ) -> TransactionMeta { + RuntimeTransaction::::try_from(svt, hash, is_simple_vote) + .unwrap() + .meta + } + + #[test] + fn test_new_runtime_transaction_static() { + let hash = Hash::new_unique(); + let compute_unit_price = 1_000; + + assert_eq!( + TransactionMeta { + message_hash: hash, + is_simple_vote_tx: false, + }, + get_transaction_meta( + non_vote_sanitized_versioned_transaction(compute_unit_price), + Some(hash), + None + ) + ); + + assert_eq!( + TransactionMeta { + message_hash: hash, + is_simple_vote_tx: true, + }, + get_transaction_meta( + non_vote_sanitized_versioned_transaction(compute_unit_price), + Some(hash), + Some(true), // override + ) + ); + + assert_eq!( + TransactionMeta { + message_hash: hash, + is_simple_vote_tx: true, + }, + get_transaction_meta(vote_sanitized_versioned_transaction(), Some(hash), None) + ); + + assert_eq!( + TransactionMeta { + message_hash: hash, + is_simple_vote_tx: false, + }, + get_transaction_meta( + vote_sanitized_versioned_transaction(), + Some(hash), + Some(false), // override + ) + ); + } + + #[test] + fn test_advance_transaction_type() { + let hash = Hash::new_unique(); + let compute_unit_price = 999; + + let statically_loaded_transaction = + RuntimeTransaction::::try_from( + non_vote_sanitized_versioned_transaction(compute_unit_price), + Some(hash), + None, + ) + .unwrap(); + + assert_eq!(hash, *statically_loaded_transaction.message_hash()); + assert!(!statically_loaded_transaction.is_simple_vote_tx()); + + let dynamically_loaded_transaction = RuntimeTransaction::::try_from( + statically_loaded_transaction, + SimpleAddressLoader::Disabled, + ); + let dynamically_loaded_transaction = + dynamically_loaded_transaction.expect("created from statically loaded tx"); + + assert_eq!(hash, *dynamically_loaded_transaction.message_hash()); + assert!(!dynamically_loaded_transaction.is_simple_vote_tx()); + } +} diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index 21f4976d695e73..867b549b24706a 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -89,7 +89,7 @@ fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[Transact } for _ in 0..1_000_000_000_u64 { if bank - .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .is_some() { break; @@ -97,13 +97,13 @@ fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[Transact sleep(Duration::from_nanos(1)); } if bank - .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .unwrap() .is_err() { error!( "transaction failed: {:?}", - bank.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) + bank.get_signature_status(transactions.last().unwrap().signatures.first().unwrap()) .unwrap() ); panic!(); diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index 9995a9251960b8..01df5c9bc01390 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -1,12 +1,15 @@ pub mod account_rent_state; use { - crate::accounts::account_rent_state::{check_rent_state_with_account, RentState}, + crate::{ + accounts::account_rent_state::{check_rent_state_with_account, RentState}, + bank::RewardInterval, + }, itertools::Itertools, log::warn, solana_accounts_db::{ account_overrides::AccountOverrides, - accounts::{LoadedTransaction, RewardInterval, TransactionLoadResult, TransactionRent}, + accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, accounts_db::AccountsDb, ancestors::Ancestors, blockhash_queue::BlockhashQueue, @@ -218,7 +221,6 @@ fn load_transaction_accounts( .collect_from_existing_account( key, &mut account, - accounts_db.filler_account_suffix.as_ref(), set_exempt_rent_epoch_max, ) .rent_amount; diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 72a6f72f11927c..efc17176d7337b 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -707,9 +707,21 @@ impl AccountsBackgroundService { bank.force_flush_accounts_cache(); bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); - bank.shrink_ancient_slots(); + // See justification below for why we skip 'shrink' here. + if bank.is_startup_verification_complete() { + bank.shrink_ancient_slots(); + } + } + // Do not 'shrink' until *after* the startup verification is complete. + // This is because startup verification needs to get the snapshot + // storages *as they existed at startup* (to calculate the accounts hash). + // If 'shrink' were to run, then it is possible startup verification + // (1) could race with 'shrink', and fail to assert that shrinking is not in + // progress, or (2) could get snapshot storages that were newer than what + // was in the snapshot itself. + if bank.is_startup_verification_complete() { + bank.shrink_candidate_slots(); } - bank.shrink_candidate_slots(); } stats.record_and_maybe_submit(start_time.elapsed()); sleep(Duration::from_millis(INTERVAL_MS)); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e9cdc01a384ca1..da62ea932748f1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -70,7 +70,7 @@ use { solana_accounts_db::{ account_overrides::AccountOverrides, accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, RewardInterval, + AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, TransactionLoadResult, }, accounts_db::{ @@ -125,9 +125,9 @@ use { account::{ create_account_shared_data_with_fields as create_account, from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, + PROGRAM_OWNERS, }, account_utils::StateMut, - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, @@ -957,30 +957,19 @@ struct StakeRewardCalculation { total_stake_rewards_lamports: u64, } -impl Bank { - pub fn default_for_tests() -> Self { - Self::default_with_accounts(Accounts::default_for_tests()) - } +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub(super) enum RewardInterval { + /// the slot within the epoch is INSIDE the reward distribution interval + InsideInterval, + /// the slot within the epoch is OUTSIDE the reward distribution interval + OutsideInterval, +} +impl Bank { pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self { Self::new_with_paths_for_benches(genesis_config, Vec::new()) } - pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self { - Self::new_for_tests_with_config(genesis_config, BankTestConfig::default()) - } - - pub fn new_for_tests_with_config( - genesis_config: &GenesisConfig, - test_config: BankTestConfig, - ) -> Self { - Self::new_with_config_for_tests( - genesis_config, - test_config.secondary_indexes, - AccountShrinkThreshold::default(), - ) - } - /// Intended for use by tests only. /// create new bank with the given configs. pub fn new_with_runtime_config_for_tests( @@ -996,27 +985,6 @@ impl Bank { ) } - pub fn new_no_wallclock_throttle_for_tests(genesis_config: &GenesisConfig) -> Self { - let mut bank = Self::new_for_tests(genesis_config); - - bank.ns_per_slot = std::u128::MAX; - bank - } - - pub(crate) fn new_with_config_for_tests( - genesis_config: &GenesisConfig, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_paths_for_tests( - genesis_config, - Arc::new(RuntimeConfig::default()), - Vec::new(), - account_indexes, - shrink_ratio, - ) - } - fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { skipped_rewrites: Mutex::default(), @@ -1306,7 +1274,7 @@ impl Bank { parent.freeze(); assert_ne!(slot, parent.slot()); - let epoch_schedule = parent.epoch_schedule; + let epoch_schedule = parent.epoch_schedule().clone(); let epoch = epoch_schedule.get_epoch(slot); let (rc, bank_rc_creation_time_us) = measure_us!({ @@ -1454,7 +1422,7 @@ impl Bank { ); } else { // Save a snapshot of stakes for use in consensus and stake weighted networking - let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot); + let leader_schedule_epoch = new.epoch_schedule().get_leader_schedule_epoch(slot); new.update_epoch_stakes(leader_schedule_epoch); } if new.is_partitioned_rewards_code_enabled() { @@ -1935,6 +1903,7 @@ impl Bank { debug_do_not_add_builtins, ); bank.fill_missing_sysvar_cache_entries(); + bank.rebuild_skipped_rewrites(); // Sanity assertions between bank snapshot and genesis config // Consider removing from serializable bank state @@ -2015,7 +1984,7 @@ impl Bank { fee_rate_governor: self.fee_rate_governor.clone(), collected_rent: self.collected_rent.load(Relaxed), rent_collector: self.rent_collector.clone(), - epoch_schedule: self.epoch_schedule, + epoch_schedule: self.epoch_schedule.clone(), inflation: *self.inflation.read().unwrap(), stakes: &self.stakes_cache, epoch_stakes: &self.epoch_stakes, @@ -3890,15 +3859,15 @@ impl Bank { self.max_tick_height = (self.slot + 1) * self.ticks_per_slot; self.slots_per_year = genesis_config.slots_per_year(); - self.epoch_schedule = genesis_config.epoch_schedule; + self.epoch_schedule = genesis_config.epoch_schedule.clone(); self.inflation = Arc::new(RwLock::new(genesis_config.inflation)); self.rent_collector = RentCollector::new( self.epoch, - *self.epoch_schedule(), + self.epoch_schedule().clone(), self.slots_per_year, - genesis_config.rent, + genesis_config.rent.clone(), ); // Add additional builtin programs specified in the genesis config @@ -4278,20 +4247,6 @@ impl Bank { } } - /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. - pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { - let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); - let sanitized_txs = txs - .into_iter() - .map(SanitizedTransaction::from_transaction_for_tests) - .collect::>(); - let lock_results = self - .rc - .accounts - .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); - TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) - } - /// Prepare a transaction batch from a list of versioned transactions from /// an entry. Used for tests only. pub fn prepare_entry_batch(&self, txs: Vec) -> Result { @@ -4483,7 +4438,7 @@ impl Bank { fn check_age<'a>( &self, - txs: impl Iterator, + txs: impl Iterator + 'a)>, lock_results: &[Result<()>], max_age: usize, error_counters: &mut TransactionErrorMetrics, @@ -4495,7 +4450,7 @@ impl Bank { txs.zip(lock_results) .map(|(tx, lock_res)| match lock_res { Ok(()) => self.check_transaction_age( - tx, + tx.borrow(), max_age, &next_durable_nonce, &hash_queue, @@ -4541,7 +4496,7 @@ impl Bank { fn check_status_cache( &self, - sanitized_txs: &[SanitizedTransaction], + sanitized_txs: &[impl core::borrow::Borrow], lock_results: Vec, error_counters: &mut TransactionErrorMetrics, ) -> Vec { @@ -4550,6 +4505,7 @@ impl Bank { .iter() .zip(lock_results) .map(|(sanitized_tx, (lock_result, nonce))| { + let sanitized_tx = sanitized_tx.borrow(); if lock_result.is_ok() && self.is_transaction_already_processed(sanitized_tx, &rcache) { @@ -4604,7 +4560,7 @@ impl Bank { pub fn check_transactions( &self, - sanitized_txs: &[SanitizedTransaction], + sanitized_txs: &[impl core::borrow::Borrow], lock_results: &[Result<()>], max_age: usize, error_counters: &mut TransactionErrorMetrics, @@ -4892,7 +4848,7 @@ impl Bank { let mut transaction_context = TransactionContext::new( transaction_accounts, - self.rent_collector.rent, + self.rent_collector.rent.clone(), compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, ); @@ -5063,28 +5019,23 @@ impl Bank { let ExtractedPrograms { loaded: mut loaded_programs_for_txs, missing, - unloaded, } = { // Lock the global cache to figure out which programs need to be loaded let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - loaded_programs_cache.extract(self, programs_and_slots.into_iter()) + Mutex::into_inner( + Arc::into_inner( + loaded_programs_cache.extract(self, programs_and_slots.into_iter()), + ) + .unwrap(), + ) + .unwrap() }; // Load missing programs while global cache is unlocked let missing_programs: Vec<(Pubkey, Arc)> = missing .iter() - .map(|(key, count)| { - let program = self.load_program(key, false, None); - program.tx_usage_counter.store(*count, Ordering::Relaxed); - (*key, program) - }) - .collect(); - - // Reload unloaded programs while global cache is unlocked - let unloaded_programs: Vec<(Pubkey, Arc)> = unloaded - .iter() - .map(|(key, count)| { - let program = self.load_program(key, true, None); + .map(|(key, (count, reloading))| { + let program = self.load_program(key, *reloading, None); program.tx_usage_counter.store(*count, Ordering::Relaxed); (*key, program) }) @@ -5097,12 +5048,6 @@ impl Bank { // Use the returned entry as that might have been deduplicated globally loaded_programs_for_txs.replenish(key, entry); } - for (key, program) in unloaded_programs { - let (_was_occupied, entry) = loaded_programs_cache.replenish(key, program); - // Use the returned entry as that might have been deduplicated globally - loaded_programs_for_txs.replenish(key, entry); - } - loaded_programs_for_txs } @@ -5221,12 +5166,6 @@ impl Bank { ); check_time.stop(); - const PROGRAM_OWNERS: &[Pubkey] = &[ - bpf_loader_upgradeable::id(), - bpf_loader::id(), - bpf_loader_deprecated::id(), - loader_v4::id(), - ]; let mut program_accounts_map = self.filter_executable_program_accounts( &self.ancestors, sanitized_txs, @@ -5541,18 +5480,6 @@ impl Bank { self.update_accounts_data_size_delta_off_chain(data_size_delta); } - /// Set the initial accounts data size - /// NOTE: This fn is *ONLY FOR TESTS* - pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) { - self.accounts_data_size_initial = amount; - } - - /// Update the accounts data size off-chain delta - /// NOTE: This fn is *ONLY FOR TESTS* - pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) { - self.update_accounts_data_size_delta_off_chain(amount) - } - fn filter_program_errors_and_collect_fee( &self, txs: &[SanitizedTransaction], @@ -5786,6 +5713,70 @@ impl Bank { }); } + /// After deserialize, populate skipped rewrites with accounts that would normally + /// have had their data rewritten in this slot due to rent collection (but didn't). + /// + /// This is required when starting up from a snapshot to verify the bank hash. + /// + /// A second usage is from the `bank_to_xxx_snapshot_archive()` functions. These fns call + /// `Bank::rehash()` to handle if the user manually modified any accounts and thus requires + /// calculating the bank hash again. Since calculating the bank hash *takes* the skipped + /// rewrites, this second time will not have any skipped rewrites, and thus the hash would be + /// updated to the wrong value. So, rebuild the skipped rewrites before rehashing. + fn rebuild_skipped_rewrites(&self) { + // If the feature gate to *not* add rent collection rewrites to the bank hash is enabled, + // then do *not* add anything to our skipped_rewrites. + if self.bank_hash_skips_rent_rewrites() { + return; + } + + let (skipped_rewrites, measure_skipped_rewrites) = + measure!(self.calculate_skipped_rewrites()); + info!( + "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}", + skipped_rewrites.len() + ); + + *self.skipped_rewrites.lock().unwrap() = skipped_rewrites; + } + + /// Calculates (and returns) skipped rewrites for this bank + /// + /// Refer to `rebuild_skipped_rewrites()` for more documentation. + /// This implementaion is purposely separate to facilitate testing. + /// + /// The key observation is that accounts in Bank::skipped_rewrites are only used IFF the + /// specific account is *not* already in the accounts delta hash. If an account is not in + /// the accounts delta hash, then it means the account was not modified. Since (basically) + /// all accounts are rent exempt, this means (basically) all accounts are unmodified by rent + /// collection. So we just need to load the accounts that would've been checked for rent + /// collection, hash them, and add them to Bank::skipped_rewrites. + /// + /// As of this writing, there are ~350 million acounts on mainnet-beta. + /// Rent collection almost always collects a single slot at a time. + /// So 1 slot of 432,000, of 350 million accounts, is ~800 accounts per slot. + /// Since we haven't started processing anything yet, it should be fast enough to simply + /// load the accounts directly. + /// Empirically, this takes about 3-4 milliseconds. + fn calculate_skipped_rewrites(&self) -> HashMap { + // The returned skipped rewrites may include accounts that were actually *not* skipped! + // (This is safe, as per the fn's documentation above.) + HashMap::from_iter( + self.rent_collection_partitions() + .into_iter() + .map(accounts_partition::pubkey_range_from_partition) + .flat_map(|pubkey_range| { + self.rc + .accounts + .load_to_collect_rent_eagerly(&self.ancestors, pubkey_range) + }) + .map(|(pubkey, account, _slot)| { + let account_hash = AccountsDb::hash_account(&account, &pubkey); + (pubkey, account_hash) + }), + ) + } + fn collect_rent_eagerly(&self) { if self.lazy_rent_collection.load(Relaxed) { return; @@ -5860,11 +5851,6 @@ impl Bank { ); } - #[cfg(test)] - fn restore_old_behavior_for_fragile_tests(&self) { - self.lazy_rent_collection.store(true, Relaxed); - } - fn rent_collection_partitions(&self) -> Vec { if !self.use_fixed_collection_cycle() { // This mode is for production/development/testing. @@ -5935,13 +5921,9 @@ impl Bank { let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_collected_info = if self.should_collect_rent() { - let (rent_collected_info, measure) = - measure!(self.rent_collector.collect_from_existing_account( - pubkey, - account, - self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), - set_exempt_rent_epoch_max, - )); + let (rent_collected_info, measure) = measure!(self + .rent_collector + .collect_from_existing_account(pubkey, account, set_exempt_rent_epoch_max,)); time_collecting_rent_us += measure.as_us(); rent_collected_info } else { @@ -6054,8 +6036,6 @@ impl Bank { /// collect rent and update 'account.rent_epoch' as necessary /// store accounts, whether rent was collected or not (depending on whether we skipping rewrites is enabled) /// update bank's rewrites set for all rewrites that were skipped - /// if 'just_rewrites', function will only update bank's rewrites set and not actually store any accounts. - /// This flag is used when restoring from a snapshot to calculate and verify the initial bank's delta hash. fn collect_rent_in_range( &self, partition: Partition, @@ -6120,7 +6100,7 @@ impl Bank { self.skipped_rewrites .lock() .unwrap() - .extend(&mut results.skipped_rewrites.into_iter()); + .extend(results.skipped_rewrites); // We cannot assert here that we collected from all expected keys. // Some accounts may have been topped off or may have had all funds removed and gone to 0 lamports. @@ -6431,19 +6411,6 @@ impl Bank { execution_results.remove(0) } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. - /// - /// # Panics - /// - /// Panics if any of the transactions do not pass sanitization checks. - #[must_use] - pub fn process_transactions<'a>( - &self, - txs: impl Iterator, - ) -> Vec> { - self.try_process_transactions(txs).unwrap() - } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. /// Short circuits if any of the transactions do not pass sanitization checks. pub fn try_process_transactions<'a>( @@ -6456,16 +6423,6 @@ impl Bank { self.try_process_entry_transactions(txs) } - /// Process entry transactions in a single batch. This is used for benches and unit tests. - /// - /// # Panics - /// - /// Panics if any of the transactions do not pass sanitization checks. - #[must_use] - pub fn process_entry_transactions(&self, txs: Vec) -> Vec> { - self.try_process_entry_transactions(txs).unwrap() - } - /// Process multiple transaction in a single batch. This is used for benches and unit tests. /// Short circuits if any of the transactions do not pass sanitization checks. pub fn try_process_entry_transactions( @@ -6578,14 +6535,6 @@ impl Bank { .flush_accounts_cache(false, Some(self.slot())) } - #[cfg(test)] - pub fn flush_accounts_cache_slot_for_tests(&self) { - self.rc - .accounts - .accounts_db - .flush_accounts_cache_slot_for_tests(self.slot()) - } - pub fn expire_old_recycle_stores(&self) { self.rc.accounts.accounts_db.expire_old_recycle_stores() } @@ -7023,7 +6972,7 @@ impl Bank { .calculate_accounts_delta_hash_internal( slot, ignore, - std::mem::take(&mut self.skipped_rewrites.lock().unwrap()), + self.skipped_rewrites.lock().unwrap().clone(), ); let mut signature_count_buf = [0u8; 8]; @@ -7168,7 +7117,7 @@ impl Bank { if config.run_in_background { let ancestors = ancestors.clone(); let accounts = Arc::clone(accounts); - let epoch_schedule = *epoch_schedule; + let epoch_schedule = epoch_schedule.clone(); let rent_collector = rent_collector.clone(); let accounts_ = Arc::clone(&accounts); accounts.accounts_db.verify_accounts_hash_in_bg.start(|| { @@ -7242,16 +7191,6 @@ impl Bank { .check_complete() } - /// This is only valid to call from tests. - /// block until initial accounts hash verification has completed - pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .wait_for_complete() - } - /// Get this bank's storages to use for snapshots. /// /// If a base slot is provided, return only the storages that are *higher* than this slot. @@ -7491,10 +7430,6 @@ impl Bank { accounts_hash } - pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { - self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) - } - /// Calculate the incremental accounts hash from `base_slot` to `self` pub fn update_incremental_accounts_hash(&self, base_slot: Slot) -> IncrementalAccountsHash { let config = CalcAccountsHashConfig { @@ -7592,22 +7527,9 @@ impl Bank { } }); - let (verified_bank, verify_bank_time_us) = measure_us!({ - let should_verify_bank = !self - .rc - .accounts - .accounts_db - .test_skip_rewrites_but_include_in_bank_hash; - if should_verify_bank { - info!("Verifying bank..."); - let verified = self.verify_hash(); - info!("Verifying bank... Done."); - verified - } else { - info!("Verifying bank... Skipped."); - true - } - }); + info!("Verifying bank..."); + let (verified_bank, verify_bank_time_us) = measure_us!(self.verify_hash()); + info!("Verifying bank... Done."); datapoint_info!( "verify_snapshot_bank", @@ -7889,11 +7811,6 @@ impl Bank { .shrink_ancient_slots(self.epoch_schedule()) } - pub fn no_overflow_rent_distribution_enabled(&self) -> bool { - self.feature_set - .is_active(&feature_set::no_overflow_rent_distribution::id()) - } - pub fn prevent_rent_paying_rent_recipients(&self) -> bool { self.feature_set .is_active(&feature_set::prevent_rent_paying_rent_recipients::id()) @@ -7958,16 +7875,29 @@ impl Bank { caller: ApplyFeatureActivationsCaller, debug_do_not_add_builtins: bool, ) { - use ApplyFeatureActivationsCaller::*; + use ApplyFeatureActivationsCaller as Caller; let allow_new_activations = match caller { - FinishInit => false, - NewFromParent => true, - WarpFromParent => false, + Caller::FinishInit => false, + Caller::NewFromParent => true, + Caller::WarpFromParent => false, }; let (feature_set, new_feature_activations) = self.compute_active_feature_set(allow_new_activations); self.feature_set = Arc::new(feature_set); + // Update activation slot of features in `new_feature_activations` + for feature_id in new_feature_activations.iter() { + if let Some(mut account) = self.get_account_with_fixed_root(feature_id) { + if let Some(mut feature) = feature::from_account(&account) { + feature.activated_at = Some(self.slot()); + if feature::to_account(&feature, &mut account).is_some() { + self.store_account(feature_id, &account); + } + info!("Feature {} activated at slot {}", feature_id, self.slot()); + } + } + } + if new_feature_activations.contains(&feature_set::pico_inflation::id()) { *self.inflation.write().unwrap() = Inflation::pico(); self.fee_rate_governor.burn_percent = 50; // 50% fee burn @@ -8036,38 +7966,27 @@ impl Bank { /// Compute the active feature set based on the current bank state, /// and return it together with the set of newly activated features. - fn compute_active_feature_set( - &mut self, - allow_new_activations: bool, - ) -> (FeatureSet, HashSet) { + fn compute_active_feature_set(&self, include_pending: bool) -> (FeatureSet, HashSet) { let mut active = self.feature_set.active.clone(); let mut inactive = HashSet::new(); - let mut newly_activated = HashSet::new(); + let mut pending = HashSet::new(); let slot = self.slot(); for feature_id in &self.feature_set.inactive { let mut activated = None; - if let Some(mut account) = self.get_account_with_fixed_root(feature_id) { - if let Some(mut feature) = feature::from_account(&account) { + if let Some(account) = self.get_account_with_fixed_root(feature_id) { + if let Some(feature) = feature::from_account(&account) { match feature.activated_at { - None => { - if allow_new_activations { - // Feature has been requested, activate it now - feature.activated_at = Some(slot); - if feature::to_account(&feature, &mut account).is_some() { - self.store_account(feature_id, &account); - } - newly_activated.insert(*feature_id); - activated = Some(slot); - info!("Feature {} activated at slot {}", feature_id, slot); - } + None if include_pending => { + // Feature activation is pending + pending.insert(*feature_id); + activated = Some(slot); } - Some(activation_slot) => { - if slot >= activation_slot { - // Feature is already active - activated = Some(activation_slot); - } + Some(activation_slot) if slot >= activation_slot => { + // Feature has been activated already + activated = Some(activation_slot); } + _ => {} } } } @@ -8078,7 +7997,7 @@ impl Bank { } } - (FeatureSet { active, inactive }, newly_activated) + (FeatureSet { active, inactive }, pending) } fn apply_builtin_program_feature_transitions( @@ -8265,6 +8184,155 @@ impl Bank { } } +#[cfg(feature = "dev-context-only-utils")] +impl Bank { + pub fn wrap_with_bank_forks_for_tests(self) -> (Arc, Arc>) { + let bank_fork = BankForks::new_rw_arc(self); + let bank_arc = bank_fork.read().unwrap().root_bank(); + bank_arc + .loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(bank_fork.clone()); + (bank_arc, bank_fork) + } + + pub fn default_for_tests() -> Self { + Self::default_with_accounts(Accounts::default_for_tests()) + } + + pub fn new_with_bank_forks_for_tests( + genesis_config: &GenesisConfig, + ) -> (Arc, Arc>) { + let bank = Self::new_for_tests(genesis_config); + bank.wrap_with_bank_forks_for_tests() + } + + pub fn new_for_tests(genesis_config: &GenesisConfig) -> Self { + Self::new_for_tests_with_config(genesis_config, BankTestConfig::default()) + } + + pub fn new_with_mockup_builtin_for_tests( + genesis_config: &GenesisConfig, + program_id: Pubkey, + builtin_function: BuiltinFunctionWithContext, + ) -> (Arc, Arc>) { + let mut bank = Self::new_for_tests(genesis_config); + bank.add_mockup_builtin(program_id, builtin_function); + bank.wrap_with_bank_forks_for_tests() + } + + pub fn new_for_tests_with_config( + genesis_config: &GenesisConfig, + test_config: BankTestConfig, + ) -> Self { + Self::new_with_config_for_tests( + genesis_config, + test_config.secondary_indexes, + AccountShrinkThreshold::default(), + ) + } + + pub fn new_no_wallclock_throttle_for_tests( + genesis_config: &GenesisConfig, + ) -> (Arc, Arc>) { + let mut bank = Self::new_for_tests(genesis_config); + + bank.ns_per_slot = std::u128::MAX; + bank.wrap_with_bank_forks_for_tests() + } + + pub(crate) fn new_with_config_for_tests( + genesis_config: &GenesisConfig, + account_indexes: AccountSecondaryIndexes, + shrink_ratio: AccountShrinkThreshold, + ) -> Self { + Self::new_with_paths_for_tests( + genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + account_indexes, + shrink_ratio, + ) + } + + /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. + pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { + let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); + let sanitized_txs = txs + .into_iter() + .map(SanitizedTransaction::from_transaction_for_tests) + .collect::>(); + let lock_results = self + .rc + .accounts + .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); + TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) + } + + /// Set the initial accounts data size + /// NOTE: This fn is *ONLY FOR TESTS* + pub fn set_accounts_data_size_initial_for_tests(&mut self, amount: u64) { + self.accounts_data_size_initial = amount; + } + + /// Update the accounts data size off-chain delta + /// NOTE: This fn is *ONLY FOR TESTS* + pub fn update_accounts_data_size_delta_off_chain_for_tests(&self, amount: i64) { + self.update_accounts_data_size_delta_off_chain(amount) + } + + #[cfg(test)] + fn restore_old_behavior_for_fragile_tests(&self) { + self.lazy_rent_collection.store(true, Relaxed); + } + + /// Process multiple transaction in a single batch. This is used for benches and unit tests. + /// + /// # Panics + /// + /// Panics if any of the transactions do not pass sanitization checks. + #[must_use] + pub fn process_transactions<'a>( + &self, + txs: impl Iterator, + ) -> Vec> { + self.try_process_transactions(txs).unwrap() + } + + /// Process entry transactions in a single batch. This is used for benches and unit tests. + /// + /// # Panics + /// + /// Panics if any of the transactions do not pass sanitization checks. + #[must_use] + pub fn process_entry_transactions(&self, txs: Vec) -> Vec> { + self.try_process_entry_transactions(txs).unwrap() + } + + #[cfg(test)] + pub fn flush_accounts_cache_slot_for_tests(&self) { + self.rc + .accounts + .accounts_db + .flush_accounts_cache_slot_for_tests(self.slot()) + } + + /// This is only valid to call from tests. + /// block until initial accounts hash verification has completed + pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) { + self.rc + .accounts + .accounts_db + .verify_accounts_hash_in_bg + .wait_for_complete() + } + + pub fn update_accounts_hash_for_tests(&self) -> AccountsHash { + self.update_accounts_hash(CalcAccountsHashDataSource::IndexForTests, false, false) + } +} + /// Compute how much an account has changed size. This function is useful when the data size delta /// needs to be computed and passed to an `update_accounts_data_size_delta` function. fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 { diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index a1b4fa74f2ff73..6b40e7aef6e4ba 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -216,14 +216,10 @@ pub fn write_bank_hash_details_file(bank: &Bank) -> std::result::Result<(), Stri // path does not exist. So, call std::fs_create_dir_all first. // https://doc.rust-lang.org/std/fs/fn.write.html _ = std::fs::create_dir_all(parent_dir); - let file = std::fs::File::create(&path).map_err(|err| { - format!( - "Unable to create bank hash file at {}: {err}", - path.display() - ) - })?; + let file = std::fs::File::create(&path) + .map_err(|err| format!("Unable to create file at {}: {err}", path.display()))?; serde_json::to_writer_pretty(file, &details) - .map_err(|err| format!("Unable to write bank hash file contents: {err}"))?; + .map_err(|err| format!("Unable to write file at {}: {err}", path.display()))?; } Ok(()) } diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 497a0e4ede76b4..00f70eb09ac3f6 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -102,14 +102,14 @@ impl Bank { return Err(DepositFeeError::InvalidAccountOwner); } - let rent = self.rent_collector().rent; - let recipient_pre_rent_state = RentState::from_account(&account, &rent); + let rent = &self.rent_collector().rent; + let recipient_pre_rent_state = RentState::from_account(&account, rent); let distribution = account.checked_add_lamports(fees); if distribution.is_err() { return Err(DepositFeeError::LamportOverflow); } if options.check_rent_paying { - let recipient_post_rent_state = RentState::from_account(&account, &rent); + let recipient_post_rent_state = RentState::from_account(&account, rent); let rent_state_transition_allowed = recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); if !rent_state_transition_allowed { @@ -181,19 +181,14 @@ impl Bank { (staked1, pubkey1).cmp(&(staked2, pubkey2)).reverse() }); - let enforce_fix = self.no_overflow_rent_distribution_enabled(); - let mut rent_distributed_in_initial_round = 0; let validator_rent_shares = validator_stakes .into_iter() .map(|(pubkey, staked)| { - let rent_share = if !enforce_fix { - (((staked * rent_to_be_distributed) as f64) / (total_staked as f64)) as u64 - } else { - (((staked as u128) * (rent_to_be_distributed as u128)) / (total_staked as u128)) - .try_into() - .unwrap() - }; + let rent_share = (((staked as u128) * (rent_to_be_distributed as u128)) + / (total_staked as u128)) + .try_into() + .unwrap(); rent_distributed_in_initial_round += rent_share; (pubkey, rent_share) }) @@ -214,7 +209,7 @@ impl Bank { } else { rent_share }; - if !enforce_fix || rent_to_be_paid > 0 { + if rent_to_be_paid > 0 { let check_account_owner = self.validate_fee_collector_account(); let check_rent_paying = self.prevent_rent_paying_rent_recipients(); match self.deposit_fees( @@ -260,15 +255,7 @@ impl Bank { ); } - if enforce_fix { - assert_eq!(leftover_lamports, 0); - } else if leftover_lamports != 0 { - warn!( - "There was leftover from rent distribution: {}", - leftover_lamports - ); - self.capitalization.fetch_sub(leftover_lamports, Relaxed); - } + assert_eq!(leftover_lamports, 0); } pub(super) fn distribute_rent_fees(&self) { @@ -308,7 +295,6 @@ pub mod tests { create_genesis_config, create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - log::info, solana_sdk::{ account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, rent::Rent, signature::Signer, @@ -586,10 +572,9 @@ pub mod tests { let genesis = create_genesis_config(initial_balance); let pubkey = genesis.mint_keypair.pubkey(); let mut genesis_config = genesis.genesis_config; - let rent = Rent::default(); - genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default + genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default let bank = Bank::new_for_tests(&genesis_config); - let min_rent_exempt_balance = rent.minimum_balance(0); + let min_rent_exempt_balance = genesis_config.rent.minimum_balance(0); let deposit_amount = 500; assert!(initial_balance + deposit_amount < min_rent_exempt_balance); @@ -623,50 +608,6 @@ pub mod tests { } } - #[test] - fn test_distribute_rent_to_validators_overflow() { - solana_logger::setup(); - - // These values are taken from the real cluster (testnet) - const RENT_TO_BE_DISTRIBUTED: u64 = 120_525; - const VALIDATOR_STAKE: u64 = 374_999_998_287_840; - - let validator_pubkey = solana_sdk::pubkey::new_rand(); - let mut genesis_config = - create_genesis_config_with_leader(10, &validator_pubkey, VALIDATOR_STAKE) - .genesis_config; - - let bank = Bank::new_for_tests(&genesis_config); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - let new_validator_lamports = bank.get_balance(&validator_pubkey); - assert_eq!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - - genesis_config - .accounts - .remove(&feature_set::no_overflow_rent_distribution::id()) - .unwrap(); - let bank = std::panic::AssertUnwindSafe(Bank::new_for_tests(&genesis_config)); - let old_validator_lamports = bank.get_balance(&validator_pubkey); - let new_validator_lamports = std::panic::catch_unwind(|| { - bank.distribute_rent_to_validators(&bank.vote_accounts(), RENT_TO_BE_DISTRIBUTED); - bank.get_balance(&validator_pubkey) - }); - - if let Ok(new_validator_lamports) = new_validator_lamports { - info!("asserting overflowing incorrect rent distribution"); - assert_ne!( - new_validator_lamports, - old_validator_lamports + RENT_TO_BE_DISTRIBUTED - ); - } else { - info!("NOT-asserting overflowing incorrect rent distribution"); - } - } - #[test] fn test_distribute_rent_to_validators_rent_paying() { solana_logger::setup(); @@ -700,7 +641,7 @@ pub mod tests { .unwrap(); } let bank = Bank::new_for_tests(&genesis_config); - let rent = bank.rent_collector().rent; + let rent = &bank.rent_collector().rent; let rent_exempt_minimum = rent.minimum_balance(0); // Make one validator have an empty identity account @@ -738,7 +679,7 @@ pub mod tests { let account = bank .get_account_with_fixed_root(address) .unwrap_or_default(); - RentState::from_account(&account, &rent) + RentState::from_account(&account, rent) }; // Assert starting RentStates diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index ccf8c4837761db..fd2c19473931d8 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -167,7 +167,6 @@ pub(crate) struct RewardsStoreMetrics { pub(crate) post_capitalization: u64, } -#[allow(dead_code)] pub(crate) fn report_partitioned_reward_metrics(bank: &Bank, timings: RewardsStoreMetrics) { datapoint_info!( "bank-partitioned_epoch_rewards_credit", diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 23278f0dad0618..97a0ef7534f49f 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -16,6 +16,7 @@ use { create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, genesis_sysvar_and_builtin_program_lamports, GenesisConfigInfo, ValidatorVoteKeypairs, }, + snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, }, assert_matches::assert_matches, @@ -25,7 +26,7 @@ use { rayon::ThreadPoolBuilder, serde::{Deserialize, Serialize}, solana_accounts_db::{ - accounts::{AccountAddressFilter, RewardInterval}, + accounts::AccountAddressFilter, accounts_db::{AccountShrinkThreshold, DEFAULT_ACCOUNTS_SHRINK_RATIO}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig, ScanError, ITER_BATCH_SIZE, @@ -127,6 +128,7 @@ use { thread::Builder, time::{Duration, Instant}, }, + tempfile::TempDir, test_case::test_case, }; @@ -154,6 +156,20 @@ impl VoteReward { } } +fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, +) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() +} + #[test] fn test_race_register_tick_freeze() { solana_logger::setup(); @@ -276,13 +292,14 @@ pub(crate) fn create_simple_test_bank(lamports: u64) -> Bank { Bank::new_for_tests(&genesis_config) } -fn create_simple_test_arc_bank(lamports: u64) -> Arc { - Arc::new(create_simple_test_bank(lamports)) +fn create_simple_test_arc_bank(lamports: u64) -> (Arc, Arc>) { + let bank = create_simple_test_bank(lamports); + bank.wrap_with_bank_forks_for_tests() } #[test] fn test_bank_block_height() { - let bank0 = create_simple_test_arc_bank(1); + let bank0 = create_simple_test_arc_bank(1).0; assert_eq!(bank0.block_height(), 0); let bank1 = Arc::new(new_from_parent(bank0)); assert_eq!(bank1.block_height(), 1); @@ -420,11 +437,21 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { &genesis_config.poh_config.target_tick_duration, genesis_config.ticks_per_slot, ) as u64; - let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank = Bank::new_from_parent(root_bank, &Pubkey::default(), slot); + let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks_1.as_ref(), + root_bank, + &Pubkey::default(), + slot, + ); - let root_bank_2 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank_with_success_txs = Bank::new_from_parent(root_bank_2, &Pubkey::default(), slot); + let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank_with_success_txs = new_bank_from_parent_with_bank_forks( + bank_forks_2.as_ref(), + root_bank_2, + &Pubkey::default(), + slot, + ); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -448,7 +475,6 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { let expected_rent = bank.rent_collector().collect_from_existing_account( &keypairs[4].pubkey(), &mut account_copy, - None, set_exempt_rent_epoch_max, ); assert_eq!(expected_rent.rent_amount, too_few_lamports); @@ -615,7 +641,12 @@ fn store_accounts_for_rent_test( } } -fn create_child_bank_for_rent_test(root_bank: Arc, genesis_config: &GenesisConfig) -> Bank { +fn create_child_bank_for_rent_test( + root_bank: Arc, + genesis_config: &GenesisConfig, + bank_forks: &RwLock, + mock_builtin: Option<(Pubkey, BuiltinFunctionWithContext)>, +) -> Arc { let mut bank = Bank::new_from_parent( root_bank, &Pubkey::default(), @@ -626,7 +657,14 @@ fn create_child_bank_for_rent_test(root_bank: Arc, genesis_config: &Genesi ) as u64, ); bank.rent_collector.slots_per_year = 421_812.0; - bank + if let Some((program_id, builtin_function)) = mock_builtin { + bank.add_mockup_builtin(program_id, builtin_function); + } + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() } /// if asserter returns true, check the capitalization @@ -868,6 +906,7 @@ fn test_rent_distribution() { // Enable rent collection bank.rent_collector.epoch = 5; bank.rent_collector.slots_per_year = 192.0; + let bank = bank.wrap_with_bank_forks_for_tests().0; let payer = Keypair::new(); let payer_account = AccountSharedData::new(400, 0, &system_program::id()); @@ -978,8 +1017,9 @@ fn test_rent_exempt_executable_account() { let (mut genesis_config, mint_keypair) = create_genesis_config(100_000); genesis_config.rent = rent_with_exemption_threshold(1000.0); - let root_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank = create_child_bank_for_rent_test(root_bank, &genesis_config); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = + create_child_bank_for_rent_test(root_bank, &genesis_config, bank_forks.as_ref(), None); let account_pubkey = solana_sdk::pubkey::new_rand(); let account_balance = 1; @@ -1043,13 +1083,16 @@ fn test_rent_complex() { genesis_config.rent = rent_with_exemption_threshold(1000.0); - let root_bank = Bank::new_for_tests(&genesis_config); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // until we completely transition to the eager rent collection, - // we must ensure lazy rent collection doens't get broken! + // we must ensure lazy rent collection doesn't get broken! root_bank.restore_old_behavior_for_fragile_tests(); - let root_bank = Arc::new(root_bank); - let mut bank = create_child_bank_for_rent_test(root_bank, &genesis_config); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); + let bank = create_child_bank_for_rent_test( + root_bank, + &genesis_config, + bank_forks.as_ref(), + Some((mock_program_id, MockBuiltin::vm)), + ); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -1232,7 +1275,7 @@ fn test_rent_collection_partitions(bank: &Bank) -> Vec { #[test] fn test_rent_eager_across_epoch_without_gap() { - let mut bank = create_simple_test_arc_bank(1); + let mut bank = create_simple_test_arc_bank(1).0; assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 32)]); bank = Arc::new(new_from_parent(bank)); @@ -1586,7 +1629,7 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() - && (should_collect_rent + && (!should_collect_rent || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id()) { activate_feature(&mut genesis_config, feature_id); @@ -1597,6 +1640,9 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { let rent_due_pubkey = solana_sdk::pubkey::new_rand(); let rent_exempt_pubkey = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + assert_eq!(should_collect_rent, bank.should_collect_rent()); + let zero_lamports = 0; let little_lamports = 1234; let large_lamports = 123_456_789; @@ -1684,7 +1730,11 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { ); } -fn new_from_parent_next_epoch(parent: Arc, epochs: Epoch) -> Bank { +fn new_from_parent_next_epoch( + parent: Arc, + bank_forks: &RwLock, + epochs: Epoch, +) -> Arc { let mut slot = parent.slot(); let mut epoch = parent.epoch(); for _ in 0..epochs { @@ -1692,7 +1742,7 @@ fn new_from_parent_next_epoch(parent: Arc, epochs: Epoch) -> Bank { epoch = parent.epoch_schedule().get_epoch(slot); } - Bank::new_from_parent(parent, &Pubkey::default(), slot) + new_bank_from_parent_with_bank_forks(bank_forks, parent, &Pubkey::default(), slot) } #[test] @@ -1703,17 +1753,21 @@ fn test_collect_rent_from_accounts() { for skip_rewrites in [false, true] { let zero_lamport_pubkey = Pubkey::from([0; 32]); - let genesis_bank = create_simple_test_arc_bank(100000); + let (genesis_bank, bank_forks) = create_simple_test_arc_bank(100000); let mut first_bank = new_from_parent(genesis_bank.clone()); if skip_rewrites { first_bank.activate_feature(&feature_set::skip_rent_rewrites::id()); } - let first_bank = Arc::new(first_bank); + let first_bank = bank_forks + .write() + .unwrap() + .insert(first_bank) + .clone_without_scheduler(); let first_slot = 1; assert_eq!(first_slot, first_bank.slot()); let epoch_delta = 4; - let later_bank = Arc::new(new_from_parent_next_epoch(first_bank, epoch_delta)); // a bank a few epochs in the future + let later_bank = new_from_parent_next_epoch(first_bank, bank_forks.as_ref(), epoch_delta); // a bank a few epochs in the future let later_slot = later_bank.slot(); assert!(later_bank.epoch() == genesis_bank.epoch() + epoch_delta); @@ -1813,6 +1867,7 @@ fn test_bank_update_vote_stake_rewards() { bank._load_vote_and_stake_accounts(&thread_pool, null_tracer()) }); } + #[cfg(test)] fn check_bank_update_vote_stake_rewards(load_vote_and_stake_accounts: F) where @@ -2074,11 +2129,10 @@ fn test_purge_empty_accounts() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let parent = Arc::new(Bank::new_for_tests_with_config( - &genesis_config, - BankTestConfig::default(), - )); - let mut bank = parent; + let (mut bank, bank_forks) = + Bank::new_for_tests_with_config(&genesis_config, BankTestConfig::default()) + .wrap_with_bank_forks_for_tests(); + for _ in 0..10 { let blockhash = bank.last_blockhash(); let pubkey = solana_sdk::pubkey::new_rand(); @@ -2086,7 +2140,7 @@ fn test_purge_empty_accounts() { bank.process_transaction(&tx).unwrap(); bank.freeze(); bank.squash(); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } bank.freeze(); @@ -2096,13 +2150,13 @@ fn test_purge_empty_accounts() { bank.clean_accounts_for_tests(); assert_eq!(bank.update_accounts_hash_for_tests(), hash); - let bank0 = Arc::new(new_from_parent(bank.clone())); + let bank0 = new_from_parent_with_fork_next_slot(bank.clone(), bank_forks.as_ref()); let blockhash = bank.last_blockhash(); let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), amount, blockhash); bank0.process_transaction(&tx).unwrap(); - let bank1 = Arc::new(new_from_parent(bank0.clone())); + let bank1 = new_from_parent_with_fork_next_slot(bank0.clone(), bank_forks.as_ref()); let pubkey = solana_sdk::pubkey::new_rand(); let blockhash = bank.last_blockhash(); let tx = system_transaction::transfer(&keypair, &pubkey, amount, blockhash); @@ -2169,7 +2223,7 @@ fn test_purge_empty_accounts() { fn test_two_payments_to_one_party() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let pubkey = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -2187,7 +2241,7 @@ fn test_one_source_two_tx_one_batch() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); assert_eq!(bank.last_blockhash(), genesis_config.hash()); @@ -2217,7 +2271,7 @@ fn test_one_tx_two_out_atomic_fail() { let (genesis_config, mint_keypair) = create_genesis_config(amount); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let instructions = system_instruction::transfer_many( &mint_keypair.pubkey(), &[(key1, amount), (key2, amount)], @@ -2238,7 +2292,7 @@ fn test_one_tx_two_out_atomic_pass() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = solana_sdk::pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let instructions = system_instruction::transfer_many( &mint_keypair.pubkey(), @@ -2260,7 +2314,7 @@ fn test_one_tx_two_out_atomic_pass() { fn test_detect_failed_duplicate_transactions() { let (mut genesis_config, mint_keypair) = create_genesis_config(10_000); genesis_config.fee_rate_governor = FeeRateGovernor::new(5_000, 0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let dest = Keypair::new(); @@ -2289,7 +2343,7 @@ fn test_detect_failed_duplicate_transactions() { fn test_account_not_found() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); assert_eq!( bank.transfer( @@ -2307,7 +2361,7 @@ fn test_account_not_found() { fn test_insufficient_funds() { let mint_amount = sol_to_lamports(1.); let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let pubkey = solana_sdk::pubkey::new_rand(); let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2335,7 +2389,7 @@ fn test_insufficient_funds() { fn test_executed_transaction_count_post_bank_transaction_count_fix() { let mint_amount = sol_to_lamports(1.); let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2353,8 +2407,8 @@ fn test_executed_transaction_count_post_bank_transaction_count_fix() { assert_eq!(bank.executed_transaction_count(), 2); assert_eq!(bank.transaction_error_count(), 1); - let bank = Arc::new(bank); - let bank2 = Bank::new_from_parent( + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank, &Pubkey::default(), genesis_config.epoch_schedule.first_normal_slot, @@ -2378,7 +2432,7 @@ fn test_executed_transaction_count_post_bank_transaction_count_fix() { fn test_transfer_to_newb() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2389,7 +2443,7 @@ fn test_transfer_to_newb() { fn test_transfer_to_sysvar() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let normal_pubkey = solana_sdk::pubkey::new_rand(); @@ -2404,7 +2458,7 @@ fn test_transfer_to_sysvar() { assert_eq!(bank.get_balance(&normal_pubkey), amount); assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); - let bank = Arc::new(new_from_parent(bank)); + let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); assert_eq!(bank.get_balance(&normal_pubkey), amount); assert_eq!(bank.get_balance(&sysvar_pubkey), 1_169_280); } @@ -2490,7 +2544,7 @@ fn test_bank_tx_fee() { let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let capitalization = bank.capitalization(); @@ -2539,7 +2593,7 @@ fn test_bank_tx_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2603,7 +2657,7 @@ fn test_bank_tx_compute_unit_fee() { let (expected_fee_collected, expected_fee_burned) = genesis_config.fee_rate_governor.burn(expected_fee_paid); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let capitalization = bank.capitalization(); @@ -2651,7 +2705,7 @@ fn test_bank_tx_compute_unit_fee() { ); // Verify that an InstructionError collects fees, too - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); let mut tx = system_transaction::transfer(&mint_keypair, &key, 1, bank.last_blockhash()); // Create a bogus instruction to system_program to cause an instruction error tx.message.instructions[0].data[0] = 40; @@ -2701,19 +2755,19 @@ fn test_bank_blockhash_fee_structure() { .target_lamports_per_signature = 5000; genesis_config.fee_rate_governor.target_signatures_per_slot = 0; - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(bank, &leader, 2); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -2753,19 +2807,19 @@ fn test_bank_blockhash_compute_unit_fee_structure() { .target_lamports_per_signature = 1000; genesis_config.fee_rate_governor.target_signatures_per_slot = 1; - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); goto_end_of_slot(bank.clone()); let cheap_blockhash = bank.last_blockhash(); let cheap_lamports_per_signature = bank.get_lamports_per_signature(); assert_eq!(cheap_lamports_per_signature, 0); - let bank = Arc::new(Bank::new_from_parent(bank, &leader, 1)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 1); goto_end_of_slot(bank.clone()); let expensive_blockhash = bank.last_blockhash(); let expensive_lamports_per_signature = bank.get_lamports_per_signature(); assert!(cheap_lamports_per_signature < expensive_lamports_per_signature); - let bank = Bank::new_from_parent(bank, &leader, 2); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &leader, 2); // Send a transfer using cheap_blockhash let key = solana_sdk::pubkey::new_rand(); @@ -2921,7 +2975,7 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { #[test] fn test_debits_before_credits() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let keypair = Keypair::new(); let tx0 = system_transaction::transfer( &mint_keypair, @@ -2951,7 +3005,7 @@ fn test_readonly_accounts() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let vote_pubkey0 = solana_sdk::pubkey::new_rand(); let vote_pubkey1 = solana_sdk::pubkey::new_rand(); @@ -3023,7 +3077,7 @@ fn test_readonly_accounts() { #[test] fn test_interleaving_locks() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let alice = Keypair::new(); let bob = Keypair::new(); let amount = genesis_config.rent.minimum_balance(0); @@ -3160,7 +3214,7 @@ fn test_bank_invalid_account_index() { fn test_bank_pay_to_self() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = Keypair::new(); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); bank.transfer(amount, &mint_keypair, &key1.pubkey()) @@ -3181,6 +3235,11 @@ fn new_from_parent(parent: Arc) -> Bank { Bank::new_from_parent(parent, &collector_id, slot) } +fn new_from_parent_with_fork_next_slot(parent: Arc, fork: &RwLock) -> Arc { + let slot = parent.slot() + 1; + new_bank_from_parent_with_bank_forks(fork, parent, &Pubkey::default(), slot) +} + /// Verify that the parent's vector is computed correctly #[test] fn test_bank_parents() { @@ -3195,7 +3254,7 @@ fn test_bank_parents() { #[test] fn test_tx_already_processed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); let mut tx = system_transaction::transfer( @@ -3230,13 +3289,13 @@ fn test_tx_already_processed() { fn test_bank_parent_already_processed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let key1 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); assert_eq!(parent.process_transaction(&tx), Ok(())); - let bank = new_from_parent(parent); + let bank = new_from_parent_with_fork_next_slot(parent, bank_forks.as_ref()); assert_eq!( bank.process_transaction(&tx), Err(TransactionError::AlreadyProcessed) @@ -3249,13 +3308,13 @@ fn test_bank_parent_account_spend() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); let key1 = Keypair::new(); let key2 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx = system_transaction::transfer(&mint_keypair, &key1.pubkey(), amount, genesis_config.hash()); assert_eq!(parent.process_transaction(&tx), Ok(())); - let bank = new_from_parent(parent.clone()); + let bank = new_from_parent_with_fork_next_slot(parent.clone(), bank_forks.as_ref()); let tx = system_transaction::transfer(&key1, &key2.pubkey(), amount, genesis_config.hash()); assert_eq!(bank.process_transaction(&tx), Ok(())); assert_eq!(parent.get_signature_status(&tx.signatures[0]), None); @@ -3264,8 +3323,8 @@ fn test_bank_parent_account_spend() { #[test] fn test_bank_hash_internal_state() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank1 = Bank::new_for_tests(&genesis_config); + let (bank0, _) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let (bank1, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let initial_state = bank0.hash_internal_state(); assert_eq!(bank1.hash_internal_state(), initial_state); @@ -3277,8 +3336,7 @@ fn test_bank_hash_internal_state() { assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state()); // Checkpointing should always result in a new state - let bank1 = Arc::new(bank1); - let bank2 = new_from_parent(bank1.clone()); + let bank2 = new_from_parent_with_fork_next_slot(bank1.clone(), bank_forks_1.as_ref()); assert_ne!(bank0.hash_internal_state(), bank2.hash_internal_state()); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -3296,7 +3354,7 @@ fn test_bank_hash_internal_state_verify() { for pass in 0..3 { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Bank::new_for_tests(&genesis_config); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let pubkey = solana_sdk::pubkey::new_rand(); @@ -3304,9 +3362,13 @@ fn test_bank_hash_internal_state_verify() { bank0.transfer(amount, &mint_keypair, &pubkey).unwrap(); let bank0_state = bank0.hash_internal_state(); - let bank0 = Arc::new(bank0); // Checkpointing should result in a new state while freezing the parent - let bank2 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 1); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 1, + ); assert_ne!(bank0_state, bank2.hash_internal_state()); // Checkpointing should modify the checkpoint's state when freezed assert_ne!(bank0_state, bank0.hash_internal_state()); @@ -3320,7 +3382,12 @@ fn test_bank_hash_internal_state_verify() { bank2.update_accounts_hash_for_tests(); assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); } - let bank3 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 2); + let bank3 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 2, + ); assert_eq!(bank0_state, bank0.hash_internal_state()); if pass == 0 { // this relies on us having set the bank hash in the pass==0 if above @@ -3361,7 +3428,7 @@ fn test_verify_snapshot_bank() { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; bank.transfer( genesis_config.rent.minimum_balance(0), &mint_keypair, @@ -3384,9 +3451,14 @@ fn test_bank_hash_internal_state_same_account_different_fork() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let initial_state = bank0.hash_internal_state(); - let bank1 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); assert_ne!(bank1.hash_internal_state(), initial_state); info!("transfer bank1"); @@ -3396,7 +3468,8 @@ fn test_bank_hash_internal_state_same_account_different_fork() { info!("transfer bank2"); // bank2 should not hash the same as bank1 - let bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), 2); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &Pubkey::default(), 2); bank2.transfer(amount, &mint_keypair, &pubkey).unwrap(); assert_ne!(bank2.hash_internal_state(), initial_state); assert_ne!(bank1.hash_internal_state(), bank2.hash_internal_state()); @@ -3415,8 +3488,8 @@ fn test_hash_internal_state_genesis() { fn test_hash_internal_state_order() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank0 = Bank::new_for_tests(&genesis_config); - let bank1 = Bank::new_for_tests(&genesis_config); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank1 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state()); let key0 = solana_sdk::pubkey::new_rand(); let key1 = solana_sdk::pubkey::new_rand(); @@ -3434,7 +3507,7 @@ fn test_hash_internal_state_error() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key0 = solana_sdk::pubkey::new_rand(); bank.transfer(amount, &mint_keypair, &key0).unwrap(); let orig = bank.hash_internal_state(); @@ -3476,7 +3549,7 @@ fn test_bank_squash() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(2.)); let key1 = Keypair::new(); let key2 = Keypair::new(); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let tx_transfer_mint_to_1 = @@ -3492,7 +3565,7 @@ fn test_bank_squash() { ); trace!("new from parent"); - let bank = new_from_parent(parent.clone()); + let bank = new_from_parent_with_fork_next_slot(parent.clone(), bank_forks.as_ref()); trace!("done new from parent"); assert_eq!( bank.get_signature_status(&tx_transfer_mint_to_1.signatures[0]), @@ -3545,7 +3618,7 @@ fn test_bank_squash() { #[test] fn test_bank_get_account_in_parent_after_squash() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); + let parent = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let amount = genesis_config.rent.minimum_balance(0); let key1 = Keypair::new(); @@ -3563,7 +3636,7 @@ fn test_bank_get_account_in_parent_after_squash() { fn test_bank_get_account_in_parent_after_squash2() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let key1 = Keypair::new(); @@ -3573,15 +3646,31 @@ fn test_bank_get_account_in_parent_after_squash2() { .unwrap(); assert_eq!(bank0.get_balance(&key1.pubkey()), amount); - let bank1 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1)); + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 1, + ); bank1 .transfer(3 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); - let bank2 = Arc::new(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 2)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &Pubkey::default(), + 2, + ); bank2 .transfer(2 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); - let bank3 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 3)); + + let bank3 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 3, + ); bank1.squash(); // This picks up the values from 1 which is the highest root: @@ -3593,16 +3682,27 @@ fn test_bank_get_account_in_parent_after_squash2() { bank3.squash(); assert_eq!(bank1.get_balance(&key1.pubkey()), 4 * amount); - let bank4 = Arc::new(Bank::new_from_parent(bank3.clone(), &Pubkey::default(), 4)); + let bank4 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank3.clone(), + &Pubkey::default(), + 4, + ); bank4 .transfer(4 * amount, &mint_keypair, &key1.pubkey()) .unwrap(); assert_eq!(bank4.get_balance(&key1.pubkey()), 8 * amount); assert_eq!(bank3.get_balance(&key1.pubkey()), 4 * amount); bank4.squash(); - let bank5 = Arc::new(Bank::new_from_parent(bank4.clone(), &Pubkey::default(), 5)); + let bank5 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank4.clone(), + &Pubkey::default(), + 5, + ); bank5.squash(); - let bank6 = Arc::new(Bank::new_from_parent(bank5, &Pubkey::default(), 6)); + let bank6 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank5, &Pubkey::default(), 6); bank6.squash(); // This picks up the values from 4 which is the highest root: @@ -3620,7 +3720,7 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); let amount = genesis_config.rent.minimum_balance(0); - let bank1 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank1, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank1.transfer(amount, &mint_keypair, &pubkey).unwrap(); let result = bank1.get_account_modified_since_parent_with_fixed_root(&pubkey); assert!(result.is_some()); @@ -3628,7 +3728,12 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { assert_eq!(account.lamports(), amount); assert_eq!(slot, 0); - let bank2 = Arc::new(Bank::new_from_parent(bank1.clone(), &Pubkey::default(), 1)); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &Pubkey::default(), + 1, + ); assert!(bank2 .get_account_modified_since_parent_with_fixed_root(&pubkey) .is_none()); @@ -3646,7 +3751,8 @@ fn test_bank_get_account_modified_since_parent_with_fixed_root() { bank1.squash(); - let bank3 = Bank::new_from_parent(bank2, &Pubkey::default(), 3); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &Pubkey::default(), 3); assert_eq!( None, bank3.get_account_modified_since_parent_with_fixed_root(&pubkey) @@ -3953,7 +4059,7 @@ fn test_bank_get_slots_in_epoch() { #[test] fn test_is_delta_true() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); let tx_transfer_mint_to_1 = system_transaction::transfer( &mint_keypair, @@ -3977,7 +4083,7 @@ fn test_is_delta_true() { #[test] fn test_is_empty() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let key1 = Keypair::new(); // The zeroth bank is empty becasue there are no transactions @@ -3997,16 +4103,22 @@ fn test_is_empty() { #[test] fn test_bank_inherit_tx_count() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Bank 1 - let bank1 = Arc::new(Bank::new_from_parent( + let bank1 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank0.clone(), &solana_sdk::pubkey::new_rand(), 1, - )); + ); // Bank 2 - let bank2 = Bank::new_from_parent(bank0.clone(), &solana_sdk::pubkey::new_rand(), 2); + let bank2 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank0.clone(), + &solana_sdk::pubkey::new_rand(), + 2, + ); // transfer a token assert_eq!( @@ -4035,7 +4147,12 @@ fn test_bank_inherit_tx_count() { assert_eq!(bank1.transaction_count(), 1); assert_eq!(bank1.non_vote_transaction_count_since_restart(), 1); - let bank6 = Bank::new_from_parent(bank1.clone(), &solana_sdk::pubkey::new_rand(), 3); + let bank6 = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank1.clone(), + &solana_sdk::pubkey::new_rand(), + 3, + ); assert_eq!(bank1.transaction_count(), 1); assert_eq!(bank1.non_vote_transaction_count_since_restart(), 1); assert_eq!(bank6.transaction_count(), 1); @@ -4071,7 +4188,7 @@ fn test_bank_vote_accounts() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 1); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let vote_accounts = bank.vote_accounts(); assert_eq!(vote_accounts.len(), 1); // bootstrap validator has @@ -4128,7 +4245,7 @@ fn test_bank_cloned_stake_delegations() { 123_000_000_000, ); genesis_config.rent = Rent::default(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let stake_delegations = bank.stakes_cache.stakes().stake_delegations().clone(); assert_eq!(stake_delegations.len(), 1); // bootstrap validator has @@ -4205,7 +4322,7 @@ fn test_bank_fees_account() { #[test] fn test_is_delta_with_no_committables() { let (genesis_config, mint_keypair) = create_genesis_config(8000); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; bank.is_delta.store(false, Relaxed); let keypair1 = Keypair::new(); @@ -4421,7 +4538,7 @@ fn test_get_filtered_indexed_accounts() { #[test] fn test_status_cache_ancestors() { solana_logger::setup(); - let parent = create_simple_test_arc_bank(500); + let parent = create_simple_test_arc_bank(500).0; let bank1 = Arc::new(new_from_parent(parent)); let mut bank = bank1; for _ in 0..MAX_CACHE_ENTRIES * 2 { @@ -4482,6 +4599,7 @@ fn test_add_builtin() { bank.last_blockhash(), ); + let bank = bank.wrap_with_bank_forks_for_tests().0; assert_eq!( bank.process_transaction(&transaction), Err(TransactionError::InstructionError( @@ -4498,7 +4616,7 @@ fn test_add_duplicate_static_program() { mint_keypair, .. } = create_genesis_config_with_leader(500, &solana_sdk::pubkey::new_rand(), 0); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { Err(InstructionError::Custom(42)) @@ -4528,10 +4646,15 @@ fn test_add_duplicate_static_program() { ); let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); + bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); let new_vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); // Vote loader account should not be updated since it was included in the genesis config. assert_eq!(vote_loader_account.data(), new_vote_loader_account.data()); @@ -4640,7 +4763,7 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { #[allow(deprecated)] #[test] fn test_recent_blockhashes_sysvar() { - let mut bank = create_simple_test_arc_bank(500); + let mut bank = create_simple_test_arc_bank(500).0; for i in 1..5 { let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = @@ -4658,7 +4781,7 @@ fn test_recent_blockhashes_sysvar() { #[allow(deprecated)] #[test] fn test_blockhash_queue_sysvar_consistency() { - let bank = create_simple_test_arc_bank(100_000); + let bank = create_simple_test_arc_bank(100_000).0; goto_end_of_slot(bank.clone()); let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); @@ -4720,7 +4843,7 @@ fn test_banks_leak() { solana_logger::setup(); let (mut genesis_config, _) = create_genesis_config(100_000_000_000_000); add_lotsa_stake_accounts(&mut genesis_config); - let mut bank = std::sync::Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut num_banks = 0; let pid = std::process::id(); #[cfg(not(target_os = "linux"))] @@ -4729,7 +4852,7 @@ fn test_banks_leak() { ); loop { num_banks += 1; - bank = std::sync::Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); if num_banks % 100 == 0 { #[cfg(target_os = "linux")] { @@ -4797,6 +4920,8 @@ fn nonce_setup( Ok((custodian_keypair, nonce_keypair)) } +type NonceSetup = (Arc, Keypair, Keypair, Keypair, Arc>); + fn setup_nonce_with_bank( supply_lamports: u64, mut genesis_cfg_fn: F, @@ -4804,7 +4929,7 @@ fn setup_nonce_with_bank( nonce_lamports: u64, nonce_authority: Option, feature_set: FeatureSet, -) -> Result<(Arc, Keypair, Keypair, Keypair)> +) -> Result where F: FnMut(&mut GenesisConfig), { @@ -4813,13 +4938,13 @@ where genesis_cfg_fn(&mut genesis_config); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(feature_set); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Banks 0 and 1 have no fees, wait two blocks before // initializing our nonce accounts for _ in 0..2 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let (custodian_keypair, nonce_keypair) = nonce_setup( @@ -4833,9 +4958,15 @@ where // The setup nonce is not valid to be used until the next bank // so wait one more block goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); - Ok((bank, mint_keypair, custodian_keypair, nonce_keypair)) + Ok(( + bank, + mint_keypair, + custodian_keypair, + nonce_keypair, + bank_forks, + )) } impl Bank { @@ -4848,7 +4979,7 @@ impl Bank { #[test] fn test_check_transaction_for_nonce_ok() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4882,7 +5013,7 @@ fn test_check_transaction_for_nonce_ok() { #[test] fn test_check_transaction_for_nonce_not_nonce_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4914,7 +5045,7 @@ fn test_check_transaction_for_nonce_not_nonce_fail() { #[test] fn test_check_transaction_for_nonce_missing_ix_pubkey_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4947,7 +5078,7 @@ fn test_check_transaction_for_nonce_missing_ix_pubkey_fail() { #[test] fn test_check_transaction_for_nonce_nonce_acc_does_not_exist_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -4981,7 +5112,7 @@ fn test_check_transaction_for_nonce_nonce_acc_does_not_exist_fail() { #[test] fn test_check_transaction_for_nonce_bad_tx_hash_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, 5_000_000, @@ -5012,7 +5143,7 @@ fn test_check_transaction_for_nonce_bad_tx_hash_fail() { #[test] fn test_assign_from_nonce_account_fail() { - let bank = create_simple_test_arc_bank(100_000_000); + let bank = create_simple_test_arc_bank(100_000_000).0; let nonce = Keypair::new(); let nonce_account = AccountSharedData::new_data( 42_424_242, @@ -5038,7 +5169,7 @@ fn test_assign_from_nonce_account_fail() { fn test_nonce_must_be_advanceable() { let mut bank = create_simple_test_bank(100_000_000); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + let bank = bank.wrap_with_bank_forks_for_tests().0; let nonce_keypair = Keypair::new(); let nonce_authority = nonce_keypair.pubkey(); let durable_nonce = DurableNonce::from_blockhash(&bank.last_blockhash()); @@ -5065,15 +5196,16 @@ fn test_nonce_must_be_advanceable() { #[test] fn test_nonce_transaction() { - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5088,7 +5220,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } /* Expect a non-Nonce transfer to fail */ @@ -5157,7 +5289,7 @@ fn test_nonce_transaction() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5199,7 +5331,7 @@ fn test_nonce_transaction() { #[test] fn test_nonce_transaction_with_tx_wide_caps() { let feature_set = FeatureSet::all_enabled(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = setup_nonce_with_bank(10_000_000, |_| {}, 5_000_000, 250_000, None, feature_set).unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); @@ -5215,7 +5347,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } /* Expect a non-Nonce transfer to fail */ @@ -5284,7 +5416,7 @@ fn test_nonce_transaction_with_tx_wide_caps() { /* Kick nonce hash off the blockhash_queue */ for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5326,15 +5458,16 @@ fn test_nonce_transaction_with_tx_wide_caps() { #[test] fn test_nonce_authority() { solana_logger::setup(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5352,7 +5485,7 @@ fn test_nonce_authority() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5387,15 +5520,16 @@ fn test_nonce_authority() { fn test_nonce_payer() { solana_logger::setup(); let nonce_starting_balance = 250_000; - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - nonce_starting_balance, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + nonce_starting_balance, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5410,7 +5544,7 @@ fn test_nonce_payer() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5452,15 +5586,16 @@ fn test_nonce_payer_tx_wide_cap() { let nonce_starting_balance = 250_000 + FeeStructure::default().compute_fee_bins.last().unwrap().fee; let feature_set = FeatureSet::all_enabled(); - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - nonce_starting_balance, - None, - feature_set, - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + nonce_starting_balance, + None, + feature_set, + ) + .unwrap(); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); let custodian_pubkey = custodian_keypair.pubkey(); @@ -5475,7 +5610,7 @@ fn test_nonce_payer_tx_wide_cap() { for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } let nonce_tx = Transaction::new_signed_with_payer( @@ -5518,7 +5653,7 @@ fn test_nonce_fee_calculator_updates() { genesis_config.rent.lamports_per_byte_year = 0; let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees let (custodian_keypair, nonce_keypair) = @@ -5543,7 +5678,7 @@ fn test_nonce_fee_calculator_updates() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } // Nonce transfer @@ -5586,7 +5721,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { genesis_config.rent.lamports_per_byte_year = 0; let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let mut bank = Arc::new(bank); + let (mut bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Deliberately use bank 0 to initialize nonce account, so that nonce account fee_calculator indicates 0 fees let (custodian_keypair, nonce_keypair) = @@ -5611,7 +5746,7 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); } // Nonce transfer @@ -5650,15 +5785,16 @@ fn test_nonce_fee_calculator_updates_tx_wide_cap() { #[test] fn test_check_ro_durable_nonce_fails() { - let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); + let (mut bank, _mint_keypair, custodian_keypair, nonce_keypair, bank_forks) = + setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); let custodian_pubkey = custodian_keypair.pubkey(); let nonce_pubkey = nonce_keypair.pubkey(); @@ -5691,7 +5827,7 @@ fn test_check_ro_durable_nonce_fails() { // Kick nonce hash off the blockhash_queue for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { goto_end_of_slot(bank.clone()); - bank = Arc::new(new_from_parent(bank)); + bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()) } // Caught by the runtime because it is a nonce transaction assert_eq!( @@ -5709,7 +5845,7 @@ fn test_check_ro_durable_nonce_fails() { #[test] fn test_collect_balances() { - let parent = create_simple_test_arc_bank(500); + let parent = create_simple_test_arc_bank(500).0; let bank0 = Arc::new(new_from_parent(parent)); let keypair = Keypair::new(); @@ -5759,8 +5895,8 @@ fn test_pre_post_transaction_balances() { let (mut genesis_config, _mint_keypair) = create_genesis_config(500_000); let fee_rate_governor = FeeRateGovernor::new(5000, 0); genesis_config.fee_rate_governor = fee_rate_governor; - let parent = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank0 = Arc::new(new_from_parent(parent)); + let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank0 = new_from_parent_with_fork_next_slot(parent, bank_forks.as_ref()); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); @@ -5844,7 +5980,11 @@ fn test_pre_post_transaction_balances() { #[test] fn test_transaction_with_duplicate_accounts_in_instruction() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + + let mock_program_id = Pubkey::from([2u8; 32]); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -5866,9 +6006,6 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { Ok(()) }); - let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); - let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); let dup_pubkey = from_pubkey; @@ -5900,10 +6037,11 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { #[test] fn test_transaction_with_program_ids_passed_to_programs() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::from([2u8; 32]); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -5935,7 +6073,7 @@ fn test_transaction_with_program_ids_passed_to_programs() { fn test_account_ids_after_program_ids() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -5956,9 +6094,14 @@ fn test_account_ids_after_program_ids() { tx.message.account_keys.push(solana_sdk::pubkey::new_rand()); let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); + let result = bank.process_transaction(&tx); assert_eq!(result, Ok(())); let account = bank.get_account(&solana_vote_program::id()).unwrap(); @@ -5969,10 +6112,11 @@ fn test_account_ids_after_program_ids() { #[test] fn test_incinerator() { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000_000); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Move to the first normal slot so normal rent behaviour applies - let bank = Bank::new_from_parent( + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank0, &Pubkey::default(), genesis_config.epoch_schedule.first_normal_slot, @@ -5998,7 +6142,12 @@ fn test_incinerator() { fn test_duplicate_account_key() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + solana_vote_program::id(), + MockBuiltin::vm, + ) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6008,8 +6157,6 @@ fn test_duplicate_account_key() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); - let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], @@ -6027,7 +6174,12 @@ fn test_duplicate_account_key() { fn test_process_transaction_with_too_many_account_locks() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let mut bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + solana_vote_program::id(), + MockBuiltin::vm, + ) + .0; let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6037,8 +6189,6 @@ fn test_process_transaction_with_too_many_account_locks() { AccountMeta::new(to_pubkey, false), ]; - bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); - let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], @@ -6103,7 +6253,7 @@ fn test_program_id_as_payer() { #[test] fn test_ref_account_key_after_program_id() { let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_for_tests(&genesis_config); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6114,9 +6264,13 @@ fn test_ref_account_key_after_program_id() { ]; let slot = bank.slot().saturating_add(1); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), slot); bank.add_mockup_builtin(solana_vote_program::id(), MockBuiltin::vm); + let bank = bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler(); let instruction = Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( @@ -6155,6 +6309,7 @@ fn test_fuzz_instructions() { (key, name.as_bytes().to_vec()) }) .collect(); + let bank = bank.wrap_with_bank_forks_for_tests().0; let max_keys = 100; let keys: Vec<_> = (0..max_keys) .enumerate() @@ -6353,11 +6508,10 @@ fn test_same_program_id_uses_unique_executable_accounts() { }); let (genesis_config, mint_keypair) = create_genesis_config(50000); - let mut bank = Bank::new_for_tests(&genesis_config); - - // Add a new program let program1_pubkey = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program1_pubkey, MockBuiltin::vm); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program1_pubkey, MockBuiltin::vm) + .0; // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); @@ -6566,7 +6720,7 @@ fn test_add_builtin_no_overwrite() { let program_id = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, )); @@ -6590,7 +6744,7 @@ fn test_add_builtin_loader_no_overwrite() { let loader_id = solana_sdk::pubkey::new_rand(); let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, )); @@ -6777,7 +6931,7 @@ fn test_add_builtin_account_after_frozen() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -6796,7 +6950,7 @@ fn test_add_builtin_account_replace_none() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -6929,7 +7083,7 @@ fn test_add_precompiled_account_after_frozen() { let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000), + create_simple_test_arc_bank(100_000).0, &Pubkey::default(), slot, ); @@ -7008,7 +7162,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { let (genesis_config, mint_keypair) = create_genesis_config(1_000_000_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = Arc::new(bank); + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); // Setup keypairs and addresses @@ -7178,7 +7332,9 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { // Test initialized program account bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - let bank = bank_client.advance_slot(1, &mint_keypair.pubkey()).unwrap(); + let bank = bank_client + .advance_slot(1, bank_forks.as_ref(), &mint_keypair.pubkey()) + .unwrap(); let message = Message::new( &[Instruction::new_with_bincode( bpf_loader_upgradeable::id(), @@ -7725,7 +7881,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { #[test] fn test_compute_active_feature_set() { - let bank0 = create_simple_test_arc_bank(100_000); + let bank0 = create_simple_test_arc_bank(100_000).0; let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); let test_feature = "TestFeature11111111111111111111111111111111" @@ -7749,25 +7905,26 @@ fn test_compute_active_feature_set() { let feature = Feature::default(); assert_eq!(feature.activated_at, None); bank.store_account(&test_feature, &feature::create_account(&feature, 42)); + let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) + .expect("from_account"); + assert_eq!(feature.activated_at, None); - // Run `compute_active_feature_set` disallowing new activations + // Run `compute_active_feature_set` excluding pending activation let (feature_set, new_activations) = bank.compute_active_feature_set(false); assert!(new_activations.is_empty()); assert!(!feature_set.is_active(&test_feature)); - let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) - .expect("from_account"); - assert_eq!(feature.activated_at, None); - // Run `compute_active_feature_set` allowing new activations - let (feature_set, new_activations) = bank.compute_active_feature_set(true); + // Run `compute_active_feature_set` including pending activation + let (_feature_set, new_activations) = bank.compute_active_feature_set(true); assert_eq!(new_activations.len(), 1); - assert!(feature_set.is_active(&test_feature)); + assert!(new_activations.contains(&test_feature)); + + // Actually activate the pending activation + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) .expect("from_account"); assert_eq!(feature.activated_at, Some(1)); - // Running `compute_active_feature_set` will not cause new activations, but - // `test_feature` is now be active let (feature_set, new_activations) = bank.compute_active_feature_set(true); assert!(new_activations.is_empty()); assert!(feature_set.is_active(&test_feature)); @@ -8031,7 +8188,7 @@ fn test_timestamp_fast() { #[test] fn test_program_is_native_loader() { let (genesis_config, mint_keypair) = create_genesis_config(50000); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -8952,7 +9109,7 @@ fn test_vote_epoch_panic() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let vote_keypair = keypair_from_seed(&[1u8; 32]).unwrap(); let stake_keypair = keypair_from_seed(&[2u8; 32]).unwrap(); @@ -9000,7 +9157,8 @@ fn test_vote_epoch_panic() { )); assert!(result.is_ok()); - let _bank = Bank::new_from_parent( + let _bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), bank, &mint_keypair.pubkey(), genesis_config.epoch_schedule.get_first_slot_in_epoch(1), @@ -9018,7 +9176,7 @@ fn test_tx_log_order() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; *bank.transaction_log_collector_config.write().unwrap() = TransactionLogCollectorConfig { mentioned_addresses: HashSet::new(), filter: TransactionLogCollectorFilter::All, @@ -9105,7 +9263,10 @@ fn test_tx_return_data() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let mock_program_id = Pubkey::from([2u8; 32]); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) + .0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let mock_program_id = Pubkey::from([2u8; 32]); @@ -9123,9 +9284,7 @@ fn test_tx_return_data() { Ok(()) }); - let mock_program_id = Pubkey::from([2u8; 32]); let blockhash = bank.last_blockhash(); - bank.add_mockup_builtin(mock_program_id, MockBuiltin::vm); for index in [ None, @@ -9303,7 +9462,10 @@ fn test_transfer_sysvar() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -9314,9 +9476,6 @@ fn test_transfer_sysvar() { Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); - let blockhash = bank.last_blockhash(); #[allow(deprecated)] let blockhash_sysvar = sysvar::clock::id(); @@ -9393,7 +9552,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { //! - In this case, key5's ref-count should be decremented correctly let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let collector = Pubkey::new_unique(); @@ -9410,7 +9569,8 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { bank0.freeze(); let slot = 1; - let bank1 = Bank::new_from_parent(bank0.clone(), &collector, slot); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0.clone(), &collector, slot); add_root_and_flush_write_cache(&bank0); bank1 .transfer(amount, &mint_keypair, &key1.pubkey()) @@ -9423,7 +9583,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { } let slot = slot + 1; - let bank2 = Bank::new_from_parent(bank0, &collector, slot); + let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank2 .transfer(amount * 2, &mint_keypair, &key2.pubkey()) .unwrap(); @@ -9436,6 +9596,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { bank2.squash(); add_root_and_flush_write_cache(&bank2); + bank_forks.write().unwrap().remove(1); drop(bank1); bank2.clean_accounts_for_tests(); @@ -9512,7 +9673,9 @@ fn test_compute_budget_program_noop() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9528,8 +9691,6 @@ fn test_compute_budget_program_noop() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -9557,7 +9718,9 @@ fn test_compute_request_instruction() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9573,8 +9736,6 @@ fn test_compute_request_instruction() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); let message = Message::new( &[ @@ -9602,7 +9763,10 @@ fn test_failed_compute_request_instruction() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let mut bank = Bank::new_for_tests(&genesis_config); + + let program_id = solana_sdk::pubkey::new_rand(); + let bank = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; let payer0_keypair = Keypair::new(); let payer1_keypair = Keypair::new(); @@ -9625,8 +9789,6 @@ fn test_failed_compute_request_instruction() { ); Ok(()) }); - let program_id = solana_sdk::pubkey::new_rand(); - bank.add_mockup_builtin(program_id, MockBuiltin::vm); // This message will not be executed because the compute budget request is invalid let message0 = Message::new( @@ -9776,7 +9938,7 @@ fn test_call_precomiled_program() { .. } = create_genesis_config_with_leader(42, &Pubkey::new_unique(), 42); activate_all_features(&mut genesis_config); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; // libsecp256k1 // Since libsecp256k1 is still using the old version of rand, this test @@ -10126,7 +10288,7 @@ fn test_an_empty_instruction_without_program() { let message = Message::new(&[ix], Some(&mint_keypair.pubkey())); let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash()); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; assert_eq!( bank.process_transaction(&tx).unwrap_err(), TransactionError::InstructionError(0, InstructionError::UnsupportedProgramId), @@ -10155,6 +10317,7 @@ fn test_accounts_data_size_with_good_transaction() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000.)); let mut bank = Bank::new_for_tests(&genesis_config); bank.activate_feature(&feature_set::cap_accounts_data_len::id()); + let bank = bank.wrap_with_bank_forks_for_tests().0; let transaction = system_transaction::create_account( &mint_keypair, &Keypair::new(), @@ -10204,6 +10367,8 @@ fn test_accounts_data_size_with_bad_transaction() { &solana_sdk::system_program::id(), ); + let bank = bank.wrap_with_bank_forks_for_tests().0; + let accounts_data_size_before = bank.load_accounts_data_size(); let accounts_data_size_delta_before = bank.load_accounts_data_size_delta(); let accounts_data_size_delta_on_chain_before = bank.load_accounts_data_size_delta_on_chain(); @@ -10311,8 +10476,12 @@ fn test_invalid_rent_state_changes_existing_accounts() { ), ); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10394,8 +10563,12 @@ fn test_invalid_rent_state_changes_new_accounts() { let account_data_size = 100; let rent_exempt_minimum = genesis_config.rent.minimum_balance(account_data_size); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10453,8 +10626,12 @@ fn test_drained_created_account() { // Create legacy accounts of various kinds let created_keypair = Keypair::new(); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.add_mockup_builtin(mock_program_id, MockTransferBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockTransferBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); // Create and drain a small data size account @@ -10565,7 +10742,7 @@ fn test_rent_state_changes_sysvars() { Account::from(validator_vote_account), ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; // Ensure transactions with sysvars succeed, even though sysvars appear RentPaying by balance let tx = Transaction::new_signed_with_payer( @@ -10608,7 +10785,7 @@ fn test_invalid_rent_state_changes_fee_payer() { Account::new(rent_exempt_minimum, 0, &system_program::id()), ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10838,7 +11015,7 @@ fn test_rent_state_incinerator() { genesis_config.rent = Rent::default(); let rent_exempt_minimum = genesis_config.rent.minimum_balance(0); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; for amount in [rent_exempt_minimum - 1, rent_exempt_minimum] { bank.transfer(amount, &mint_keypair, &solana_sdk::incinerator::id()) @@ -11063,10 +11240,14 @@ fn test_resize_and_rent() { genesis_config.rent = Rent::default(); activate_all_features(&mut genesis_config); - let mut bank = Bank::new_for_tests(&genesis_config); - let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockReallocBuiltin::vm, + ) + .0; + let recent_blockhash = bank.last_blockhash(); let account_data_size_small = 1024; @@ -11335,9 +11516,13 @@ fn test_accounts_data_size_and_resize_transactions() { mint_keypair, .. } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); - let mut bank = Bank::new_for_tests(&genesis_config); let mock_program_id = Pubkey::new_unique(); - bank.add_mockup_builtin(mock_program_id, MockReallocBuiltin::vm); + let bank = Bank::new_with_mockup_builtin_for_tests( + &genesis_config, + mock_program_id, + MockReallocBuiltin::vm, + ) + .0; let recent_blockhash = bank.last_blockhash(); @@ -11474,15 +11659,14 @@ fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { mut genesis_config, .. } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); genesis_config.rent = Rent::default(); - for feature_id in FeatureSet::default().inactive { - if should_collect_rent - || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id() - { - activate_feature(&mut genesis_config, feature_id); - } + if should_collect_rent { + genesis_config + .accounts + .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); } let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); @@ -11502,14 +11686,13 @@ fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { let info = bank.rent_collector.collect_from_existing_account( &keypair.pubkey(), &mut account, - None, set_exempt_rent_epoch_max, ); assert_eq!(info.account_data_len_reclaimed, data_size as u64); } // Collect rent for real - let should_collect_rent = bank.should_collect_rent(); + assert_eq!(should_collect_rent, bank.should_collect_rent()); let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); bank.collect_rent_eagerly(); let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); @@ -11547,7 +11730,7 @@ fn test_accounts_data_size_from_genesis() { genesis_config.rent = Rent::default(); genesis_config.ticks_per_slot = 3; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); assert_eq!( bank.load_accounts_data_size() as usize, bank.get_total_accounts_stats().unwrap().data_len @@ -11556,7 +11739,12 @@ fn test_accounts_data_size_from_genesis() { // Create accounts over a number of banks and ensure the accounts data size remains correct for _ in 0..10 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::default(), + slot, + ); // Store an account into the bank that is rent-exempt and has data let data_size = rand::thread_rng().gen_range(3333..4444); @@ -11588,7 +11776,7 @@ fn test_cap_accounts_data_allocations_per_transaction() { / MAX_PERMITTED_DATA_LENGTH as usize; let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mut instructions = Vec::new(); let mut keypairs = vec![mint_keypair.insecure_clone()]; @@ -11899,7 +12087,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { fn test_is_in_slot_hashes_history() { use solana_sdk::slot_hashes::MAX_ENTRIES; - let bank0 = create_simple_test_arc_bank(1); + let bank0 = create_simple_test_arc_bank(1).0; assert!(!bank0.is_in_slot_hashes_history(&0)); assert!(!bank0.is_in_slot_hashes_history(&1)); let mut last_bank = bank0; @@ -11921,8 +12109,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { genesis_config .accounts .remove(&feature_set::reject_callx_r10::id()); - let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); - let root_bank = bank_forks.read().unwrap().root_bank(); + let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Test a basic transfer let amount = genesis_config.rent.minimum_balance(0); @@ -11952,7 +12139,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { // Advance the bank so the next transaction can be submitted. goto_end_of_slot(root_bank.clone()); - let bank = Arc::new(new_from_parent(root_bank)); + let bank = new_from_parent_with_fork_next_slot(root_bank, bank_forks.as_ref()); // Compose second instruction using the same program with a different block hash let instruction2 = Instruction::new_with_bytes(program_keypair.pubkey(), &[], Vec::new()); @@ -11981,7 +12168,7 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); // Advance to next epoch, which starts the recompilation phase - let bank = new_from_parent_next_epoch(bank, 1); + let bank = new_from_parent_next_epoch(bank, bank_forks.as_ref(), 1); // Execute after feature is enabled to check it was filtered out and reverified. let result_with_feature_enabled = bank.process_transaction(&transaction2); @@ -12029,12 +12216,17 @@ fn test_bank_verify_accounts_hash_with_base() { bank.fill_bank_with_ticks_for_tests(); }; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // make some banks, do some transactions, ensure there's some zero-lamport accounts for _ in 0..2 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -12048,7 +12240,12 @@ fn test_bank_verify_accounts_hash_with_base() { // make more banks, do more transactions, ensure there's more zero-lamport accounts for _ in 0..2 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -12723,13 +12920,17 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { let node_key = &validator_keypairs[0].node_keypair; let stake_key = &validator_keypairs[0].stake_keypair; - let bank0 = Bank::new_for_tests(&genesis_config); - let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); + let (mut previous_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let num_slots_in_epoch = previous_bank.get_slots_in_epoch(previous_bank.epoch()); assert_eq!(num_slots_in_epoch, 32); - let mut previous_bank = Arc::new(bank0); for slot in 1..=num_slots_in_epoch + 2 { - let bank = Bank::new_from_parent(previous_bank.clone(), &Pubkey::default(), slot); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + previous_bank.clone(), + &Pubkey::default(), + slot, + ); // Fill bank_forks with banks with votes landing in the next slot // So that rewards will be paid out at the epoch boundary, i.e. slot = 32 @@ -12765,7 +12966,7 @@ fn test_program_execution_restricted_for_stake_account_in_reward_period() { // iteration are different. Otherwise, all those transactions will be the same, and will not be // executed by the bank except the first one. bank.register_unique_recent_blockhash_for_test(); - previous_bank = Arc::new(bank); + previous_bank = bank; } } @@ -12918,8 +13119,8 @@ fn test_store_vote_accounts_partitioned_empty() { #[test] fn test_system_instruction_allocate() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let data_len = 2; let amount = genesis_config.rent.minimum_balance(data_len); @@ -12976,7 +13177,7 @@ where // create initial bank and fund the alice account let (genesis_config, mint_keypair) = create_genesis_config(mint_lamports); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank.clone()); bank_client .transfer_and_confirm(mint_lamports, &mint_keypair, &alice_pubkey) @@ -12985,12 +13186,12 @@ where // create zero-lamports account to be cleaned let account = AccountSharedData::new(0, len1, &program); let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); bank.store_account(&bob_pubkey, &account); // transfer some to bogus pubkey just to make previous bank (=slot) really cleanable let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); let bank_client = BankClient::new_shared(bank.clone()); bank_client .transfer_and_confirm( @@ -13002,13 +13203,13 @@ where // super fun time; callback chooses to .clean_accounts(None) or not let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); callback(&bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &collector, slot)); + let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); let bank_client = BankClient::new_shared(bank); let ix = system_instruction::create_account( &alice_pubkey, @@ -13045,8 +13246,8 @@ fn test_create_zero_lamport_without_clean() { #[test] fn test_system_instruction_assign_with_seed() { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let alice_keypair = Keypair::new(); let alice_pubkey = alice_keypair.pubkey(); @@ -13086,8 +13287,8 @@ fn test_system_instruction_unsigned_transaction() { let amount = genesis_config.rent.minimum_balance(0); // Fund to account to bypass AccountNotFound error - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); bank_client .transfer_and_confirm(amount, &alice_keypair, &mallory_pubkey) .unwrap(); @@ -13742,3 +13943,175 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { ); assert_eq!(lock_results[1].0, Err(TransactionError::BlockhashNotFound)); } + +/// Test that rehashing works with skipped rewrites +/// +/// Since `bank_to_xxx_snapshot_archive()` calls `Bank::rehash()`, we must ensure that rehashing +/// works properly when also using `test_skip_rewrites_but_include_in_bank_hash`. +#[test] +fn test_rehash_with_skipped_rewrites() { + let accounts_db_config = AccountsDbConfig { + test_skip_rewrites_but_include_in_bank_hash: true, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let bank = Arc::new(Bank::new_with_paths( + &GenesisConfig::default(), + Arc::new(RuntimeConfig::default()), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + Arc::new(AtomicBool::new(false)), + )); + // This test is only meaningful while the bank hash contains rewrites. + // Once this feature is enabled, it may be possible to remove this test entirely. + assert!(!bank.bank_hash_skips_rent_rewrites()); + + // Store an account *in this bank* that will be checked for rent collection *in the next bank* + let pubkey = { + let rent_collection_partition = bank + .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) + .last() + .copied() + .unwrap(); + let pubkey_range = + accounts_partition::pubkey_range_from_partition(rent_collection_partition); + *pubkey_range.end() + }; + let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); + // The account's rent epoch must be set to EXEMPT + // in order for its rewrite to be skipped by rent collection. + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // Create a new bank that will do rent collection on the account stored in the previous slot + let bank = Arc::new(Bank::new_from_parent( + bank.clone(), + &Pubkey::new_unique(), + bank.slot() + 1, + )); + + // Freeze the bank to trigger rent collection and hash calculation + bank.freeze(); + + // Ensure the bank hash is the same before and after rehashing + let bank_hash = bank.hash(); + bank.rehash(); + let bank_rehash = bank.hash(); + assert_eq!(bank_rehash, bank_hash); +} + +/// Test that skipped_rewrites are properly rebuilt when booting from a snapshot +/// that was generated by a node skipping rewrites. +#[test] +fn test_rebuild_skipped_rewrites() { + let genesis_config = GenesisConfig::default(); + let accounts_db_config = AccountsDbConfig { + test_skip_rewrites_but_include_in_bank_hash: true, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let bank = Arc::new(Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config.clone()), + None, + Arc::new(AtomicBool::new(false)), + )); + // This test is only meaningful while the bank hash contains rewrites. + // Once this feature is enabled, it may be possible to remove this test entirely. + assert!(!bank.bank_hash_skips_rent_rewrites()); + + // Store an account *in this bank* that will be checked for rent collection *in the next bank* + let pubkey = { + let rent_collection_partition = bank + .variable_cycle_partitions_between_slots(bank.slot(), bank.slot() + 1) + .last() + .copied() + .unwrap(); + let pubkey_range = + accounts_partition::pubkey_range_from_partition(rent_collection_partition); + *pubkey_range.end() + }; + let mut account = AccountSharedData::new(123_456_789, 0, &Pubkey::default()); + // The account's rent epoch must be set to EXEMPT + // in order for its rewrite to be skipped by rent collection. + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // Create a new bank that will do rent collection on the account stored in the previous slot + let bank = Arc::new(Bank::new_from_parent( + bank.clone(), + &Pubkey::new_unique(), + bank.slot() + 1, + )); + + // This fn is called within freeze(), but freeze() *consumes* Self::skipped_rewrites! + // For testing, we want to know what's in the skipped rewrites, so we perform + // rent collection manually. + bank.collect_rent_eagerly(); + let actual_skipped_rewrites = bank.skipped_rewrites.lock().unwrap().clone(); + // Ensure skipped rewrites now includes the account we stored above + assert!(actual_skipped_rewrites.contains_key(&pubkey)); + // Ensure the calculated skipped rewrites match the actual ones + let calculated_skipped_rewrites = bank.calculate_skipped_rewrites(); + assert_eq!(calculated_skipped_rewrites, actual_skipped_rewrites); + + // required in order to snapshot the bank + bank.fill_bank_with_ticks_for_tests(); + + // Now take a snapshot! + let (_tmp_dir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); + let bank_snapshots_dir = TempDir::new().unwrap(); + let full_snapshot_archives_dir = TempDir::new().unwrap(); + let incremental_snapshot_archives_dir = TempDir::new().unwrap(); + let full_snapshot_archive = snapshot_bank_utils::bank_to_full_snapshot_archive( + bank_snapshots_dir.path(), + &bank, + None, + full_snapshot_archives_dir.path(), + incremental_snapshot_archives_dir.path(), + snapshot_utils::ArchiveFormat::Tar, + snapshot_utils::DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, + snapshot_utils::DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN, + ) + .unwrap(); + + // Rebuild the bank and ensure it passes verification + let (snapshot_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + &[accounts_dir], + bank_snapshots_dir.path(), + &full_snapshot_archive, + None, + &genesis_config, + &RuntimeConfig::default(), + None, + None, + AccountSecondaryIndexes::default(), + None, + AccountShrinkThreshold::default(), + false, + false, + false, + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + Arc::new(AtomicBool::new(false)), + ) + .unwrap(); + snapshot_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); + assert_eq!(bank.as_ref(), &snapshot_bank); + + // Ensure the snapshot bank's skipped rewrites match the original bank's + let snapshot_skipped_rewrites = snapshot_bank.calculate_skipped_rewrites(); + assert_eq!(snapshot_skipped_rewrites, actual_skipped_rewrites); +} diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index e2d7012ecca908..7fe6418d4110b2 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -4,7 +4,6 @@ use { solana_sdk::{ account::Account, client::{AsyncClient, Client, SyncClient}, - clock, commitment_config::CommitmentConfig, epoch_info::EpochInfo, fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -27,6 +26,8 @@ use { time::{Duration, Instant}, }, }; +#[cfg(feature = "dev-context-only-utils")] +use {crate::bank_forks::BankForks, solana_sdk::clock, std::sync::RwLock}; pub struct BankClient { bank: Arc, @@ -330,12 +331,24 @@ impl BankClient { self.bank.set_sysvar_for_tests(sysvar); } - pub fn advance_slot(&mut self, by: u64, collector_id: &Pubkey) -> Option> { - self.bank = Arc::new(Bank::new_from_parent( + #[cfg(feature = "dev-context-only-utils")] + pub fn advance_slot( + &mut self, + by: u64, + bank_forks: &RwLock, + collector_id: &Pubkey, + ) -> Option> { + let new_bank = Bank::new_from_parent( self.bank.clone(), collector_id, self.bank.slot().checked_add(by)?, - )); + ); + self.bank = bank_forks + .write() + .unwrap() + .insert(new_bank) + .clone_without_scheduler(); + self.set_sysvar_for_tests(&clock::Clock { slot: self.bank.slot(), ..clock::Clock::default() @@ -361,8 +374,8 @@ mod tests { let jane_doe_keypair = Keypair::new(); let jane_pubkey = jane_doe_keypair.pubkey(); let doe_keypairs = vec![&john_doe_keypair, &jane_doe_keypair]; - let bank = Bank::new_for_tests(&genesis_config); - let bank_client = BankClient::new(bank); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let bank_client = BankClient::new_shared(bank); let amount = genesis_config.rent.minimum_balance(0); // Create 2-2 Multisig Transfer instruction. diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index 96844da6351257..d8d6144d89d1d7 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -1,13 +1,18 @@ +#[cfg(feature = "dev-context-only-utils")] use { crate::{ bank::Bank, genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, }, + solana_sdk::{pubkey::Pubkey, signature::Signer}, +}; +use { solana_accounts_db::transaction_results::TransactionResults, - solana_sdk::{pubkey::Pubkey, signature::Signer, transaction::SanitizedTransaction}, + solana_sdk::transaction::SanitizedTransaction, solana_vote::{vote_parser, vote_sender_types::ReplayVoteSender}, }; +#[cfg(feature = "dev-context-only-utils")] pub fn setup_bank_and_vote_pubkeys_for_tests( num_vote_accounts: usize, stake: u64, diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index dde82f2a63f890..35b46e420f0fd8 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -259,6 +259,7 @@ impl BankWithScheduler { self.inner.bank.register_tick(hash, &self.inner.scheduler); } + #[cfg(feature = "dev-context-only-utils")] pub fn fill_bank_with_ticks_for_tests(&self) { self.do_fill_bank_with_ticks_for_tests(&self.inner.scheduler); } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 078da133979f64..8ee9f60d553549 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -941,8 +941,6 @@ where .set(rent_paying_accounts_by_partition) .unwrap(); - accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule, snapshot_slot); - handle.join().unwrap(); measure_notify.stop(); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 278d6d68da8bc1..f638b7e975a776 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -446,9 +446,6 @@ mod serde_snapshot_tests { let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner); let pubkey2 = solana_sdk::pubkey::new_rand(); - let filler_account = AccountSharedData::new(some_lamport, no_data, &owner); - let filler_account_pubkey = solana_sdk::pubkey::new_rand(); - let accounts = AccountsDb::new_single_for_tests(); let mut current_slot = 1; @@ -459,12 +456,6 @@ mod serde_snapshot_tests { accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); - // Store the account a few times. - // use to be: store enough accounts such that an additional store for slot 2 is created. - // but we use the write cache now - for _ in 0..3 { - accounts.store_for_tests(current_slot, &[(&filler_account_pubkey, &filler_account)]); - } accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 62ac8285b1cea9..ed0a4dab893c12 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -13,16 +13,16 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{AccountsPackage, AccountsPackageKind, SnapshotKind, SnapshotPackage}, snapshot_utils::{ - self, archive_snapshot_package, build_storage_from_snapshot_dir, - delete_contents_of_path, deserialize_snapshot_data_file, - deserialize_snapshot_data_files, get_bank_snapshot_dir, get_highest_bank_snapshot_post, - get_highest_full_snapshot_archive_info, get_highest_incremental_snapshot_archive_info, - get_snapshot_file_name, get_storages_to_serialize, hard_link_storages_to_snapshot, - serialize_snapshot_data_file, verify_and_unarchive_snapshots, - verify_unpacked_snapshots_dir_and_version, write_snapshot_version_file, - AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, BankSnapshotType, SnapshotError, - SnapshotRootPaths, SnapshotVersion, StorageAndNextAppendVecId, - UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, + self, archive_snapshot_package, delete_contents_of_path, + deserialize_snapshot_data_file, deserialize_snapshot_data_files, get_bank_snapshot_dir, + get_highest_bank_snapshot_post, get_highest_full_snapshot_archive_info, + get_highest_incremental_snapshot_archive_info, get_snapshot_file_name, + get_storages_to_serialize, hard_link_storages_to_snapshot, + rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, + verify_and_unarchive_snapshots, verify_unpacked_snapshots_dir_and_version, + write_snapshot_version_file, AddBankSnapshotError, ArchiveFormat, BankSnapshotInfo, + BankSnapshotType, SnapshotError, SnapshotRootPaths, SnapshotVersion, + StorageAndNextAppendVecId, UnpackedSnapshotsDirAndVersion, VerifySlotDeltasError, }, status_cache, }, @@ -202,18 +202,18 @@ fn serialize_status_cache( }) } -#[derive(Debug, Default)] -pub struct BankFromArchiveTimings { - pub rebuild_bank_from_snapshots_us: u64, - pub full_snapshot_untar_us: u64, - pub incremental_snapshot_untar_us: u64, - pub verify_snapshot_bank_us: u64, +#[derive(Debug)] +pub struct BankFromArchivesTimings { + pub untar_full_snapshot_archive_us: u64, + pub untar_incremental_snapshot_archive_us: u64, + pub rebuild_bank_us: u64, + pub verify_bank_us: u64, } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct BankFromDirTimings { - pub rebuild_bank_from_snapshot_us: u64, - pub build_storage_us: u64, + pub rebuild_storages_us: u64, + pub rebuild_bank_us: u64, } /// Utility for parsing out bank specific information from a snapshot archive. This utility can be used @@ -276,7 +276,7 @@ pub fn bank_from_snapshot_archives( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result<(Bank, BankFromArchiveTimings)> { +) -> snapshot_utils::Result<(Bank, BankFromArchivesTimings)> { info!( "Loading bank from full snapshot archive: {}, and incremental snapshot archive: {:?}", full_snapshot_archive_info.path().display(), @@ -375,37 +375,29 @@ pub fn bank_from_snapshot_archives( } measure_verify.stop(); - let timings = BankFromArchiveTimings { - rebuild_bank_from_snapshots_us: measure_rebuild.as_us(), - full_snapshot_untar_us: unarchived_full_snapshot.measure_untar.as_us(), - incremental_snapshot_untar_us: unarchived_incremental_snapshot + let timings = BankFromArchivesTimings { + untar_full_snapshot_archive_us: unarchived_full_snapshot.measure_untar.as_us(), + untar_incremental_snapshot_archive_us: unarchived_incremental_snapshot .map_or(0, |unarchive_preparation_result| { unarchive_preparation_result.measure_untar.as_us() }), - verify_snapshot_bank_us: measure_verify.as_us(), + rebuild_bank_us: measure_rebuild.as_us(), + verify_bank_us: measure_verify.as_us(), }; datapoint_info!( "bank_from_snapshot_archives", ( - "full_snapshot_untar_us", - timings.full_snapshot_untar_us, + "untar_full_snapshot_archive_us", + timings.untar_full_snapshot_archive_us, i64 ), ( - "incremental_snapshot_untar_us", - timings.incremental_snapshot_untar_us, - i64 - ), - ( - "rebuild_bank_from_snapshots_us", - timings.rebuild_bank_from_snapshots_us, - i64 - ), - ( - "verify_snapshot_bank_us", - timings.verify_snapshot_bank_us, + "untar_incremental_snapshot_archive_us", + timings.untar_incremental_snapshot_archive_us, i64 ), + ("rebuild_bank_us", timings.rebuild_bank_us, i64), + ("verify_bank_us", timings.verify_bank_us, i64), ); Ok((bank, timings)) } @@ -506,11 +498,15 @@ pub fn bank_from_snapshot_dir( let next_append_vec_id = Arc::new(AtomicAppendVecId::new(0)); - let (storage, measure_build_storage) = measure!( - build_storage_from_snapshot_dir(bank_snapshot, account_paths, next_append_vec_id.clone())?, - "build storage from snapshot dir" + let (storage, measure_rebuild_storages) = measure!( + rebuild_storages_from_snapshot_dir( + bank_snapshot, + account_paths, + next_append_vec_id.clone() + )?, + "rebuild storages from snapshot dir" ); - info!("{}", measure_build_storage); + info!("{}", measure_rebuild_storages); let next_append_vec_id = Arc::try_unwrap(next_append_vec_id).expect("this is the only strong reference"); @@ -518,46 +514,39 @@ pub fn bank_from_snapshot_dir( storage, next_append_vec_id, }; - let mut measure_rebuild = Measure::start("rebuild bank from snapshot"); - let bank = rebuild_bank_from_snapshot( - bank_snapshot, - account_paths, - storage_and_next_append_vec_id, - genesis_config, - runtime_config, - debug_keys, - additional_builtins, - account_secondary_indexes, - limit_load_slot_count_from_snapshot, - shrink_ratio, - verify_index, - accounts_db_config, - accounts_update_notifier, - exit, - )?; - measure_rebuild.stop(); - info!("{}", measure_rebuild); + let (bank, measure_rebuild_bank) = measure!( + rebuild_bank_from_snapshot( + bank_snapshot, + account_paths, + storage_and_next_append_vec_id, + genesis_config, + runtime_config, + debug_keys, + additional_builtins, + account_secondary_indexes, + limit_load_slot_count_from_snapshot, + shrink_ratio, + verify_index, + accounts_db_config, + accounts_update_notifier, + exit, + )?, + "rebuild bank from snapshot" + ); + info!("{}", measure_rebuild_bank); // Skip bank.verify_snapshot_bank. Subsequent snapshot requests/accounts hash verification requests // will calculate and check the accounts hash, so we will still have safety/correctness there. bank.set_initial_accounts_hash_verification_completed(); let timings = BankFromDirTimings { - rebuild_bank_from_snapshot_us: measure_rebuild.as_us(), - build_storage_us: measure_build_storage.as_us(), + rebuild_storages_us: measure_rebuild_storages.as_us(), + rebuild_bank_us: measure_rebuild_bank.as_us(), }; datapoint_info!( "bank_from_snapshot_dir", - ( - "build_storage_from_snapshot_dir_us", - timings.build_storage_us, - i64 - ), - ( - "rebuild_bank_from_snapshot_us", - timings.rebuild_bank_from_snapshot_us, - i64 - ), + ("rebuild_storages_us", timings.rebuild_storages_us, i64), + ("rebuild_bank_us", timings.rebuild_bank_us, i64), ); Ok((bank, timings)) } @@ -1204,6 +1193,7 @@ pub fn package_and_archive_incremental_snapshot( )) } +#[cfg(feature = "dev-context-only-utils")] pub fn create_snapshot_dirs_for_tests( genesis_config: &GenesisConfig, bank_snapshots_dir: impl AsRef, @@ -1260,6 +1250,7 @@ mod tests { use { super::*, crate::{ + bank_forks::BankForks, genesis_utils, snapshot_utils::{ clean_orphaned_account_snapshot_dirs, create_all_accounts_run_and_snapshot_dirs, @@ -1283,9 +1274,23 @@ mod tests { system_transaction, transaction::SanitizedTransaction, }, - std::sync::{atomic::Ordering, Arc}, + std::sync::{atomic::Ordering, Arc, RwLock}, }; + fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, + ) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() + } + /// Test roundtrip of bank to a full snapshot, then back again. This test creates the simplest /// bank possible, so the contents of the snapshot archive will be quite minimal. #[test] @@ -1353,7 +1358,7 @@ mod tests { let key5 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1368,7 +1373,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1383,7 +1389,8 @@ mod tests { } let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1392,7 +1399,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1401,7 +1409,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1471,7 +1480,7 @@ mod tests { let key5 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1486,7 +1495,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1520,7 +1530,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1529,7 +1540,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1538,7 +1550,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1593,7 +1606,7 @@ mod tests { let key3 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1608,7 +1621,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1642,7 +1656,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -1651,7 +1666,8 @@ mod tests { } let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) .unwrap(); @@ -1660,7 +1676,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) .unwrap(); @@ -1743,13 +1760,14 @@ mod tests { let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); let lamports_to_transfer = sol_to_lamports(123_456.); - let bank0 = Arc::new(Bank::new_with_paths_for_tests( + let (bank0, bank_forks) = Bank::new_with_paths_for_tests( &genesis_config, Arc::::default(), vec![accounts_dir.clone()], AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), - )); + ) + .wrap_with_bank_forks_for_tests(); bank0 .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) .unwrap(); @@ -1758,7 +1776,8 @@ mod tests { } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 .transfer(lamports_to_transfer, &key2, &key1.pubkey()) .unwrap(); @@ -1780,7 +1799,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); let blockhash = bank2.last_blockhash(); let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &key1, @@ -1847,7 +1867,8 @@ mod tests { ); let slot = slot + 1; - let bank3 = Arc::new(Bank::new_from_parent(bank2, &collector, slot)); + let bank3 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); // Update Account2 so that it no longer holds a reference to slot2 bank3 .transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey()) @@ -1857,7 +1878,8 @@ mod tests { } let slot = slot + 1; - let bank4 = Arc::new(Bank::new_from_parent(bank3, &collector, slot)); + let bank4 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); while !bank4.is_complete() { bank4.register_unique_tick(); } @@ -1925,13 +1947,14 @@ mod tests { let key1 = Keypair::new(); let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); while !bank0.is_complete() { bank0.register_unique_tick(); } let slot = 1; - let bank1 = Arc::new(Bank::new_from_parent(bank0, &collector, slot)); + let bank1 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); while !bank1.is_complete() { bank1.register_unique_tick(); } @@ -1953,7 +1976,8 @@ mod tests { .unwrap(); let slot = slot + 1; - let bank2 = Arc::new(Bank::new_from_parent(bank1, &collector, slot)); + let bank2 = + new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) .unwrap(); @@ -2192,12 +2216,18 @@ mod tests { bank.fill_bank_with_ticks_for_tests(); }; - let mut bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); + let (mut bank, bank_forks) = + Bank::new_with_bank_forks_for_tests(&genesis_config_info.genesis_config); // make some banks, do some transactions, ensure there's some zero-lamport accounts for _ in 0..5 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } @@ -2223,7 +2253,12 @@ mod tests { // make more banks, do more transactions, ensure there's more zero-lamport accounts for _ in 0..5 { let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); + bank = new_bank_from_parent_with_bank_forks( + bank_forks.as_ref(), + bank, + &Pubkey::new_unique(), + slot, + ); do_transfers(&bank); } diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 6685cd269daea6..8053a13d2c137e 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -148,7 +148,7 @@ impl AccountsPackage { expected_capitalization: bank.capitalization(), accounts_hash_for_testing, accounts: bank.accounts(), - epoch_schedule: *bank.epoch_schedule(), + epoch_schedule: bank.epoch_schedule().clone(), rent_collector: bank.rent_collector().clone(), is_incremental_accounts_hash_feature_enabled, snapshot_info, diff --git a/runtime/src/snapshot_package/compare.rs b/runtime/src/snapshot_package/compare.rs index 75e75119cf0a6e..d951d818c37975 100644 --- a/runtime/src/snapshot_package/compare.rs +++ b/runtime/src/snapshot_package/compare.rs @@ -33,22 +33,22 @@ pub fn cmp_accounts_package_kinds_by_priority( a: &AccountsPackageKind, b: &AccountsPackageKind, ) -> Ordering { - use AccountsPackageKind::*; + use AccountsPackageKind as Kind; match (a, b) { // Epoch Accounts Hash packages - (EpochAccountsHash, EpochAccountsHash) => Equal, - (EpochAccountsHash, _) => Greater, - (_, EpochAccountsHash) => Less, + (Kind::EpochAccountsHash, Kind::EpochAccountsHash) => Equal, + (Kind::EpochAccountsHash, _) => Greater, + (_, Kind::EpochAccountsHash) => Less, // Snapshot packages - (Snapshot(snapshot_kind_a), Snapshot(snapshot_kind_b)) => { + (Kind::Snapshot(snapshot_kind_a), Kind::Snapshot(snapshot_kind_b)) => { cmp_snapshot_kinds_by_priority(snapshot_kind_a, snapshot_kind_b) } - (Snapshot(_), _) => Greater, - (_, Snapshot(_)) => Less, + (Kind::Snapshot(_), _) => Greater, + (_, Kind::Snapshot(_)) => Less, // Accounts Hash Verifier packages - (AccountsHashVerifier, AccountsHashVerifier) => Equal, + (Kind::AccountsHashVerifier, Kind::AccountsHashVerifier) => Equal, } } @@ -58,12 +58,12 @@ pub fn cmp_accounts_package_kinds_by_priority( /// If two `IncrementalSnapshot`s are compared, their base slots are the tiebreaker. #[must_use] pub fn cmp_snapshot_kinds_by_priority(a: &SnapshotKind, b: &SnapshotKind) -> Ordering { - use SnapshotKind::*; + use SnapshotKind as Kind; match (a, b) { - (FullSnapshot, FullSnapshot) => Equal, - (FullSnapshot, IncrementalSnapshot(_)) => Greater, - (IncrementalSnapshot(_), FullSnapshot) => Less, - (IncrementalSnapshot(base_slot_a), IncrementalSnapshot(base_slot_b)) => { + (Kind::FullSnapshot, Kind::FullSnapshot) => Equal, + (Kind::FullSnapshot, Kind::IncrementalSnapshot(_)) => Greater, + (Kind::IncrementalSnapshot(_), Kind::FullSnapshot) => Less, + (Kind::IncrementalSnapshot(base_slot_a), Kind::IncrementalSnapshot(base_slot_b)) => { base_slot_a.cmp(base_slot_b) } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index c890c3196f936d..88be65216cbb00 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1211,20 +1211,6 @@ pub(crate) fn get_storages_to_serialize( .collect::>() } -#[derive(Debug, Default)] -pub struct BankFromArchiveTimings { - pub rebuild_bank_from_snapshots_us: u64, - pub full_snapshot_untar_us: u64, - pub incremental_snapshot_untar_us: u64, - pub verify_snapshot_bank_us: u64, -} - -#[derive(Debug, Default)] -pub struct BankFromDirTimings { - pub rebuild_bank_from_snapshot_us: u64, - pub build_storage_us: u64, -} - // From testing, 4 seems to be a sweet spot for ranges of 60M-360M accounts and 16-64 cores. This may need to be tuned later. const PARALLEL_UNTAR_READERS_DEFAULT: usize = 4; @@ -1461,9 +1447,11 @@ fn streaming_snapshot_dir_files( Ok(()) } -/// Perform the common tasks when deserialize a snapshot. Handles reading snapshot file, reading the version file, -/// and then returning those fields plus the rebuilt storage -pub fn build_storage_from_snapshot_dir( +/// Performs the common tasks when deserializing a snapshot +/// +/// Handles reading the snapshot file and version file, +/// then returning those fields plus the rebuilt storages. +pub fn rebuild_storages_from_snapshot_dir( snapshot_info: &BankSnapshotInfo, account_paths: &[PathBuf], next_append_vec_id: Arc, diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 7088e6438e1c22..2ae9127cef1013 100755 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -5,12 +5,14 @@ use { solana_runtime::{ bank::Bank, bank_client::BankClient, + bank_forks::BankForks, genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, }, solana_sdk::{ account::from_account, account_utils::StateMut, client::SyncClient, + clock::Slot, epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, hash::Hash, message::Message, @@ -28,19 +30,37 @@ use { vote_instruction, vote_state::{Vote, VoteInit, VoteState, VoteStateVersions}, }, - std::sync::Arc, + std::sync::{Arc, RwLock}, }; +fn new_bank_from_parent_with_bank_forks( + bank_forks: &RwLock, + parent: Arc, + collector_id: &Pubkey, + slot: Slot, +) -> Arc { + let bank = Bank::new_from_parent(parent, collector_id, slot); + bank_forks + .write() + .unwrap() + .insert(bank) + .clone_without_scheduler() +} + /// get bank at next epoch + `n` slots -fn next_epoch_and_n_slots(bank: Arc, mut n: usize) -> Arc { +fn next_epoch_and_n_slots( + bank: Arc, + bank_forks: &RwLock, + mut n: usize, +) -> Arc { bank.squash(); let slot = bank.get_slots_in_epoch(bank.epoch()) + bank.slot(); - let mut bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let mut bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); while n > 0 { bank.squash(); let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); n -= 1; } @@ -49,6 +69,7 @@ fn next_epoch_and_n_slots(bank: Arc, mut n: usize) -> Arc { fn fill_epoch_with_votes( mut bank: Arc, + bank_forks: &RwLock, vote_keypair: &Keypair, mint_keypair: &Keypair, ) -> Arc { @@ -58,7 +79,7 @@ fn fill_epoch_with_votes( while bank.epoch() != old_epoch + 1 { bank.squash(); let slot = bank.slot() + 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); let bank_client = BankClient::new_shared(bank.clone()); let parent = bank.parent().unwrap(); @@ -125,7 +146,7 @@ fn test_stake_create_and_split_single_signature() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -201,7 +222,7 @@ fn test_stake_create_and_split_to_existing_system_account() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -288,9 +309,8 @@ fn test_stake_account_lifetime() { ); genesis_config.epoch_schedule = EpochSchedule::new(MINIMUM_SLOTS_PER_EPOCH); genesis_config.rent = Rent::default(); - let bank = Bank::new_for_tests(&genesis_config); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mint_pubkey = mint_keypair.pubkey(); - let mut bank = Arc::new(bank); // Need to set the EAH to Valid so that `Bank::new_from_parent()` doesn't panic during freeze // when parent is in the EAH calculation window. bank.rc @@ -392,12 +412,12 @@ fn test_stake_account_lifetime() { break; } // Cycle thru banks until we're fully warmed up - bank = next_epoch_and_n_slots(bank, 0); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 0); } // Reward redemption // Submit enough votes to generate rewards - bank = fill_epoch_with_votes(bank, &vote_keypair, &mint_keypair); + bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); // Test that votes and credits are there let account = bank.get_account(&vote_pubkey).expect("account not found"); @@ -410,13 +430,13 @@ fn test_stake_account_lifetime() { // one vote per slot, might be more slots than 32 in the epoch assert!(vote_state.credits() >= 1); - bank = fill_epoch_with_votes(bank, &vote_keypair, &mint_keypair); + bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); let pre_staked = get_staked(&bank, &stake_pubkey); let pre_balance = bank.get_balance(&stake_pubkey); // next epoch bank plus one additional slot should pay rewards - bank = next_epoch_and_n_slots(bank, 1); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); // Test that balance increased, and that the balance got staked let staked = get_staked(&bank, &stake_pubkey); @@ -490,7 +510,7 @@ fn test_stake_account_lifetime() { .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) .is_err()); - let mut bank = next_epoch_and_n_slots(bank, 1); + let mut bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); let bank_client = BankClient::new_shared(bank.clone()); @@ -536,7 +556,7 @@ fn test_stake_account_lifetime() { if get_staked(&bank, &split_stake_pubkey) == 0 { break; } - bank = next_epoch_and_n_slots(bank, 1); + bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); } let bank_client = BankClient::new_shared(bank.clone()); @@ -577,9 +597,8 @@ fn test_create_stake_account_from_seed() { &solana_sdk::pubkey::new_rand(), 1_000_000, ); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let mint_pubkey = mint_keypair.pubkey(); - let bank = Arc::new(bank); let bank_client = BankClient::new_shared(bank.clone()); let seed = "test-string"; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8142c3012694b1..639f4f17d959b5 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.73.0" +channel = "1.74.0" diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index 7702c59d93b517..06b5411c196d17 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -9,8 +9,10 @@ source ci/_ source scripts/patch-crates.sh source scripts/read-cargo-variable.sh +anchor_version=$1 solana_ver=$(readCargoVariable version Cargo.toml) solana_dir=$PWD +cargo="$solana_dir"/cargo cargo_build_sbf="$solana_dir"/cargo-build-sbf cargo_test_sbf="$solana_dir"/cargo-test-sbf @@ -43,15 +45,20 @@ anchor() { set -x rm -rf anchor git clone https://github.com/coral-xyz/anchor.git + cd anchor || exit 1 + + # checkout tag + if [[ -n "$anchor_version" ]]; then + git checkout "$anchor_version" + fi + # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml anchor/ - cd anchor + cp "$solana_dir"/rust-toolchain.toml . update_solana_dependencies . "$solana_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - cargo build - cargo test + $cargo test anchor_dir=$PWD anchor_ver=$(readCargoVariable version "$anchor_dir"/lang/Cargo.toml) @@ -73,8 +80,9 @@ mango() { patch_crates_io_solana Cargo.toml "$solana_dir" patch_crates_io_anchor Cargo.toml "$anchor_dir" - cargo build - cargo test + cd program + $cargo build + $cargo test $cargo_build_sbf $cargo_test_sbf ) @@ -83,19 +91,17 @@ mango() { metaplex() { ( set -x - rm -rf metaplex-program-library - git clone https://github.com/metaplex-foundation/metaplex-program-library - # copy toolchain file to use solana's rust version - cp "$solana_dir"/rust-toolchain.toml metaplex-program-library/ - cd metaplex-program-library + rm -rf mpl-token-metadata + git clone https://github.com/metaplex-foundation/mpl-token-metadata + # copy toolchain file to use solana's rust version + cp "$solana_dir"/rust-toolchain.toml mpl-token-metadata/ + cd mpl-token-metadata/programs/token-metadata/program update_solana_dependencies . "$solana_ver" - update_anchor_dependencies . "$anchor_ver" patch_crates_io_solana Cargo.toml "$solana_dir" - patch_crates_io_anchor Cargo.toml "$anchor_dir" - cargo build - cargo test + $cargo build + $cargo test $cargo_build_sbf $cargo_test_sbf ) diff --git a/scripts/patch-crates.sh b/scripts/patch-crates.sh index 813a0a32a6175c..91a3010c8a0bd7 100644 --- a/scripts/patch-crates.sh +++ b/scripts/patch-crates.sh @@ -7,11 +7,15 @@ update_solana_dependencies() { while IFS='' read -r line; do tomls+=("$line"); done < <(find "$project_root" -name Cargo.toml) sed -i -e "s#\(solana-program = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-program = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-program-test = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-program-test = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-sdk = \"\).*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-sdk = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-client = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-client = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-cli-config = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? + sed -i -e "s#\(solana-cli-config = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-clap-utils = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-clap-utils = { version = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? sed -i -e "s#\(solana-account-decoder = \"\)[^\"]*\(\"\)#\1=$solana_ver\2#g" "${tomls[@]}" || return $? @@ -30,6 +34,7 @@ patch_crates_io_solana() { solana-account-decoder = { path = "$solana_dir/account-decoder" } solana-clap-utils = { path = "$solana_dir/clap-utils" } solana-client = { path = "$solana_dir/client" } +solana-cli-config = { path = "$solana_dir/cli-config" } solana-program = { path = "$solana_dir/sdk/program" } solana-program-test = { path = "$solana_dir/program-test" } solana-sdk = { path = "$solana_dir/sdk" } diff --git a/sdk/cargo-build-sbf/Cargo.toml b/sdk/cargo-build-sbf/Cargo.toml index 0d96c4f94ebc53..36ce44d0d5c7d3 100644 --- a/sdk/cargo-build-sbf/Cargo.toml +++ b/sdk/cargo-build-sbf/Cargo.toml @@ -27,6 +27,7 @@ tar = { workspace = true } assert_cmd = { workspace = true } predicates = { workspace = true } serial_test = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } [features] program = [] diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index f72dcdfcf8eb2f..157592dc37bcaa 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -430,7 +430,6 @@ pub fn derive_clone_zeroed(input: proc_macro::TokenStream) -> proc_macro::TokenS // implementations on `Copy` types are simply wrappers of `Copy`. // This is not the case here, and intentionally so because we want to // guarantee zeroed padding. - #[allow(clippy::incorrect_clone_impl_on_copy_type)] fn clone(&self) -> Self { let mut value = std::mem::MaybeUninit::::uninit(); unsafe { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index f608ed61943826..ccd18701eefcc4 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -15,6 +15,7 @@ rust-version = "1.72.0" # solana platform-tools rust version bincode = { workspace = true } blake3 = { workspace = true, features = ["digest", "traits-preview"] } borsh = { workspace = true } +borsh0-10 = { package = "borsh", version = "0.10.3" } borsh0-9 = { package = "borsh", version = "0.9.3" } bs58 = { workspace = true } bv = { workspace = true, features = ["serde"] } diff --git a/sdk/program/src/blake3.rs b/sdk/program/src/blake3.rs index d8351b06c6ad0d..cc50318e336c41 100644 --- a/sdk/program/src/blake3.rs +++ b/sdk/program/src/blake3.rs @@ -31,6 +31,7 @@ const MAX_BASE58_LEN: usize = 44; Hash, AbiExample, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub [u8; HASH_BYTES]); diff --git a/sdk/program/src/borsh.rs b/sdk/program/src/borsh.rs index 90ce42f661f82f..0041aa80602946 100644 --- a/sdk/program/src/borsh.rs +++ b/sdk/program/src/borsh.rs @@ -8,7 +8,7 @@ //! be removed in a future release //! //! [borsh]: https://borsh.io/ -use borsh::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; +use borsh0_10::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; /// Get the worst-case packed length for the given BorshSchema /// @@ -19,6 +19,7 @@ use borsh::{maybestd::io::Error, BorshDeserialize, BorshSchema, BorshSerialize}; note = "Please use `borsh0_10::get_packed_len` instead" )] pub fn get_packed_len() -> usize { + #[allow(deprecated)] crate::borsh0_10::get_packed_len::() } @@ -36,6 +37,7 @@ pub fn get_packed_len() -> usize { note = "Please use `borsh0_10::try_from_slice_unchecked` instead" )] pub fn try_from_slice_unchecked(data: &[u8]) -> Result { + #[allow(deprecated)] crate::borsh0_10::try_from_slice_unchecked::(data) } @@ -50,10 +52,11 @@ pub fn try_from_slice_unchecked(data: &[u8]) -> Result(instance: &T) -> Result { + #[allow(deprecated)] crate::borsh0_10::get_instance_packed_len(instance) } -macro_rules! impl_get_packed_len { +macro_rules! impl_get_packed_len_v0 { ($borsh:ident $(,#[$meta:meta])?) => { /// Get the worst-case packed length for the given BorshSchema /// @@ -113,10 +116,72 @@ macro_rules! impl_get_packed_len { } } } -pub(crate) use impl_get_packed_len; +pub(crate) use impl_get_packed_len_v0; -macro_rules! impl_try_from_slice_unchecked { +macro_rules! impl_get_packed_len_v1 { ($borsh:ident $(,#[$meta:meta])?) => { + /// Get the worst-case packed length for the given BorshSchema + /// + /// Note: due to the serializer currently used by Borsh, this function cannot + /// be used on-chain in the Solana SBF execution environment. + $(#[$meta])? + pub fn get_packed_len() -> usize { + let container = $borsh::schema_container_of::(); + get_declaration_packed_len(container.declaration(), &container) + } + + /// Get packed length for the given BorshSchema Declaration + fn get_declaration_packed_len( + declaration: &str, + container: &$borsh::schema::BorshSchemaContainer, + ) -> usize { + match container.get_definition(declaration) { + Some($borsh::schema::Definition::Sequence { length_width, length_range, elements }) if *length_width == 0 => { + *length_range.end() as usize * get_declaration_packed_len(elements, container) + } + Some($borsh::schema::Definition::Enum { tag_width, variants }) => { + (*tag_width as usize) + variants + .iter() + .map(|(_, _, declaration)| get_declaration_packed_len(declaration, container)) + .max() + .unwrap_or(0) + } + Some($borsh::schema::Definition::Struct { fields }) => match fields { + $borsh::schema::Fields::NamedFields(named_fields) => named_fields + .iter() + .map(|(_, declaration)| get_declaration_packed_len(declaration, container)) + .sum(), + $borsh::schema::Fields::UnnamedFields(declarations) => declarations + .iter() + .map(|declaration| get_declaration_packed_len(declaration, container)) + .sum(), + $borsh::schema::Fields::Empty => 0, + }, + Some($borsh::schema::Definition::Sequence { + .. + }) => panic!("Missing support for Definition::Sequence"), + Some($borsh::schema::Definition::Tuple { elements }) => elements + .iter() + .map(|element| get_declaration_packed_len(element, container)) + .sum(), + Some($borsh::schema::Definition::Primitive(size)) => *size as usize, + None => match declaration { + "bool" | "u8" | "i8" => 1, + "u16" | "i16" => 2, + "u32" | "i32" => 4, + "u64" | "i64" => 8, + "u128" | "i128" => 16, + "nil" => 0, + _ => panic!("Missing primitive type: {declaration}"), + }, + } + } + } +} +pub(crate) use impl_get_packed_len_v1; + +macro_rules! impl_try_from_slice_unchecked { + ($borsh:ident, $borsh_io:ident $(,#[$meta:meta])?) => { /// Deserializes without checking that the entire slice has been consumed /// /// Normally, `try_from_slice` checks the length of the final slice to ensure @@ -127,7 +192,7 @@ macro_rules! impl_try_from_slice_unchecked { /// user passes a buffer destined for a different type, the error won't get caught /// as easily. $(#[$meta])? - pub fn try_from_slice_unchecked(data: &[u8]) -> Result { + pub fn try_from_slice_unchecked(data: &[u8]) -> Result { let mut data_mut = data; let result = T::deserialize(&mut data_mut)?; Ok(result) @@ -137,21 +202,21 @@ macro_rules! impl_try_from_slice_unchecked { pub(crate) use impl_try_from_slice_unchecked; macro_rules! impl_get_instance_packed_len { - ($borsh:ident $(,#[$meta:meta])?) => { + ($borsh:ident, $borsh_io:ident $(,#[$meta:meta])?) => { /// Helper struct which to count how much data would be written during serialization #[derive(Default)] struct WriteCounter { count: usize, } - impl $borsh::maybestd::io::Write for WriteCounter { - fn write(&mut self, data: &[u8]) -> Result { + impl $borsh_io::Write for WriteCounter { + fn write(&mut self, data: &[u8]) -> Result { let amount = data.len(); self.count += amount; Ok(amount) } - fn flush(&mut self) -> Result<(), $borsh::maybestd::io::Error> { + fn flush(&mut self) -> Result<(), $borsh_io::Error> { Ok(()) } } @@ -163,7 +228,7 @@ macro_rules! impl_get_instance_packed_len { /// length only from the type's schema, this can be used when an instance already /// exists, to figure out how much space to allocate in an account. $(#[$meta])? - pub fn get_instance_packed_len(instance: &T) -> Result { + pub fn get_instance_packed_len(instance: &T) -> Result { let mut counter = WriteCounter::default(); instance.serialize(&mut counter)?; Ok(counter.count) @@ -174,11 +239,13 @@ pub(crate) use impl_get_instance_packed_len; #[cfg(test)] macro_rules! impl_tests { - ($borsh:ident) => { + ($borsh:ident, $borsh_io:ident) => { + extern crate alloc; use { super::*, std::{collections::HashMap, mem::size_of}, - $borsh::{maybestd::io::ErrorKind, BorshDeserialize, BorshSerialize}, + $borsh::{BorshDeserialize, BorshSerialize}, + $borsh_io::ErrorKind, }; type Child = [u8; 64]; diff --git a/sdk/program/src/borsh0_10.rs b/sdk/program/src/borsh0_10.rs index f29640885e14d6..c7d190f820b366 100644 --- a/sdk/program/src/borsh0_10.rs +++ b/sdk/program/src/borsh0_10.rs @@ -2,16 +2,40 @@ //! Utilities for the [borsh] serialization format, version 0.10. //! //! [borsh]: https://borsh.io/ -use crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len, impl_try_from_slice_unchecked, +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, + }, + borsh0_10::maybestd::io, }; -impl_get_packed_len!(borsh); -impl_try_from_slice_unchecked!(borsh); -impl_get_instance_packed_len!(borsh); +impl_get_packed_len_v0!( + borsh0_10, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" + )] +); +impl_try_from_slice_unchecked!( + borsh0_10, + io, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" + )] +); +impl_get_instance_packed_len!( + borsh0_10, + io, + #[deprecated( + since = "1.18.0", + note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" + )] +); #[cfg(test)] +#[allow(deprecated)] mod tests { - use crate::borsh::impl_tests; - impl_tests!(borsh); + use {crate::borsh::impl_tests, borsh0_10::maybestd::io}; + impl_tests!(borsh0_10, io); } diff --git a/sdk/program/src/borsh0_9.rs b/sdk/program/src/borsh0_9.rs index dd9e401db189c9..d7d1e97013f898 100644 --- a/sdk/program/src/borsh0_9.rs +++ b/sdk/program/src/borsh0_9.rs @@ -5,35 +5,40 @@ //! borsh 0.9, even though this crate canonically uses borsh 0.10. //! //! [borsh]: https://borsh.io/ -use crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len, impl_try_from_slice_unchecked, +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, + }, + borsh0_9::maybestd::io, }; -impl_get_packed_len!( +impl_get_packed_len_v0!( borsh0_9, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::get_packed_len` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" )] ); impl_try_from_slice_unchecked!( borsh0_9, + io, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::try_from_slice_unchecked` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" )] ); impl_get_instance_packed_len!( borsh0_9, + io, #[deprecated( since = "1.17.0", - note = "Please upgrade to Borsh 0.10 and use `borsh0_10::get_instance_packed_len` instead" + note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" )] ); #[cfg(test)] #[allow(deprecated)] mod tests { - use crate::borsh::impl_tests; - impl_tests!(borsh0_9); + use {crate::borsh::impl_tests, borsh0_9::maybestd::io}; + impl_tests!(borsh0_9, io); } diff --git a/sdk/program/src/borsh1.rs b/sdk/program/src/borsh1.rs new file mode 100644 index 00000000000000..a44ea522494232 --- /dev/null +++ b/sdk/program/src/borsh1.rs @@ -0,0 +1,20 @@ +#![allow(clippy::arithmetic_side_effects)] +//! Utilities for the [borsh] serialization format, version 1. +//! +//! [borsh]: https://borsh.io/ +use { + crate::borsh::{ + impl_get_instance_packed_len, impl_get_packed_len_v1, impl_try_from_slice_unchecked, + }, + borsh::io, +}; + +impl_get_packed_len_v1!(borsh); +impl_try_from_slice_unchecked!(borsh, io); +impl_get_instance_packed_len!(borsh, io); + +#[cfg(test)] +mod tests { + use {crate::borsh::impl_tests, borsh::io}; + impl_tests!(borsh, io); +} diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index 672b0f15359f6e..b9de3310b76e74 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -29,7 +29,7 @@ pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; #[repr(C)] -#[derive(Debug, CloneZeroed, Copy, PartialEq, Eq, Deserialize, Serialize, AbiExample)] +#[derive(Debug, CloneZeroed, PartialEq, Eq, Deserialize, Serialize, AbiExample)] #[serde(rename_all = "camelCase")] pub struct EpochSchedule { /// The maximum number of slots in each epoch. diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index 27d481b62b5441..288f696df31b93 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -46,6 +46,7 @@ const MAX_BASE58_LEN: usize = 44; Pod, Zeroable, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub(crate) [u8; HASH_BYTES]); diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index e68fc198a36642..21b3e774ae0b22 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -364,6 +364,7 @@ impl Instruction { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// pub struct MyInstruction { /// pub lamports: u64, /// } @@ -391,7 +392,7 @@ impl Instruction { data: &T, accounts: Vec, ) -> Self { - let data = data.try_to_vec().unwrap(); + let data = borsh::to_vec(data).unwrap(); Self { program_id, accounts, @@ -466,10 +467,10 @@ impl Instruction { /// # pubkey::Pubkey, /// # instruction::{AccountMeta, Instruction}, /// # }; - /// # use borsh::{BorshSerialize, BorshDeserialize}; - /// # use anyhow::Result; + /// # use borsh::{io::Error, BorshSerialize, BorshDeserialize}; /// # /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// pub struct MyInstruction { /// pub lamports: u64, /// } @@ -479,7 +480,7 @@ impl Instruction { /// from: &Pubkey, /// to: &Pubkey, /// lamports: u64, - /// ) -> Result { + /// ) -> Result { /// let instr = MyInstruction { lamports }; /// /// let mut instr_in_bytes: Vec = Vec::new(); @@ -558,6 +559,7 @@ impl AccountMeta { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// # pub struct MyInstruction; /// # /// # let instruction = MyInstruction; @@ -593,6 +595,7 @@ impl AccountMeta { /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// # pub struct MyInstruction; /// # /// # let instruction = MyInstruction; diff --git a/sdk/program/src/keccak.rs b/sdk/program/src/keccak.rs index 17829485c2bdac..6a1cfaf1113b7b 100644 --- a/sdk/program/src/keccak.rs +++ b/sdk/program/src/keccak.rs @@ -29,6 +29,7 @@ const MAX_BASE58_LEN: usize = 44; Hash, AbiExample, )] +#[borsh(crate = "borsh")] #[repr(transparent)] pub struct Hash(pub [u8; HASH_BYTES]); diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 0dfc8c3247cc03..50438337436702 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -479,6 +479,7 @@ pub mod blake3; pub mod borsh; pub mod borsh0_10; pub mod borsh0_9; +pub mod borsh1; pub mod bpf_loader; pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index e81c7c485ff5f6..c472c6ca4dbba1 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -26,7 +26,7 @@ use { }; lazy_static! { - // Copied keys over since direct references create cyclical dependency. + // This will be deprecated and so this list shouldn't be modified pub static ref BUILTIN_PROGRAMS_KEYS: [Pubkey; 10] = { let parse = |s| Pubkey::from_str(s).unwrap(); [ @@ -193,6 +193,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, @@ -264,6 +265,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, @@ -363,6 +365,7 @@ impl Message { /// // another crate so it can be shared between the on-chain program and /// // the client. /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] /// enum BankInstruction { /// Initialize, /// Deposit { lamports: u64 }, diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 6eb7e9ecd71981..0840ee16b901d7 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -3,7 +3,7 @@ #![allow(clippy::arithmetic_side_effects)] use { crate::{decode_error::DecodeError, instruction::InstructionError, msg, pubkey::PubkeyError}, - borsh::maybestd::io::Error as BorshIoError, + borsh::io::Error as BorshIoError, num_traits::{FromPrimitive, ToPrimitive}, std::convert::TryFrom, thiserror::Error, diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index ebbe5295036fc0..122c74dbeffdba 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -84,6 +84,7 @@ impl From for PubkeyError { Serialize, Zeroable, )] +#[borsh(crate = "borsh")] pub struct Pubkey(pub(crate) [u8; 32]); impl crate::sanitize::Sanitize for Pubkey {} @@ -328,6 +329,7 @@ impl Pubkey { /// // The computed address of the PDA will be passed to this program via /// // the `accounts` vector of the `Instruction` type. /// #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// # #[borsh(crate = "borsh")] /// pub struct InstructionData { /// pub vault_bump_seed: u8, /// pub lamports: u64, @@ -409,6 +411,7 @@ impl Pubkey { /// # use anyhow::Result; /// # /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// # #[borsh(crate = "borsh")] /// # struct InstructionData { /// # pub vault_bump_seed: u8, /// # pub lamports: u64, @@ -668,47 +671,70 @@ impl fmt::Display for Pubkey { } } +impl borsh0_10::de::BorshDeserialize for Pubkey { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self(borsh0_10::BorshDeserialize::deserialize_reader( + reader, + )?)) + } +} impl borsh0_9::de::BorshDeserialize for Pubkey { fn deserialize(buf: &mut &[u8]) -> ::core::result::Result { Ok(Self(borsh0_9::BorshDeserialize::deserialize(buf)?)) } } -impl borsh0_9::BorshSchema for Pubkey -where - [u8; 32]: borsh0_9::BorshSchema, -{ - fn declaration() -> borsh0_9::schema::Declaration { - "Pubkey".to_string() - } - fn add_definitions_recursively( - definitions: &mut borsh0_9::maybestd::collections::HashMap< - borsh0_9::schema::Declaration, - borsh0_9::schema::Definition, - >, - ) { - let fields = borsh0_9::schema::Fields::UnnamedFields(<[_]>::into_vec( - borsh0_9::maybestd::boxed::Box::new([ - <[u8; 32] as borsh0_9::BorshSchema>::declaration(), - ]), - )); - let definition = borsh0_9::schema::Definition::Struct { fields }; - ::add_definition( - ::declaration(), - definition, - definitions, - ); - <[u8; 32] as borsh0_9::BorshSchema>::add_definitions_recursively(definitions); - } + +macro_rules! impl_borsh_schema { + ($borsh:ident) => { + impl $borsh::BorshSchema for Pubkey + where + [u8; 32]: $borsh::BorshSchema, + { + fn declaration() -> $borsh::schema::Declaration { + "Pubkey".to_string() + } + fn add_definitions_recursively( + definitions: &mut $borsh::maybestd::collections::HashMap< + $borsh::schema::Declaration, + $borsh::schema::Definition, + >, + ) { + let fields = $borsh::schema::Fields::UnnamedFields(<[_]>::into_vec( + $borsh::maybestd::boxed::Box::new([ + <[u8; 32] as $borsh::BorshSchema>::declaration(), + ]), + )); + let definition = $borsh::schema::Definition::Struct { fields }; + ::add_definition( + ::declaration(), + definition, + definitions, + ); + <[u8; 32] as $borsh::BorshSchema>::add_definitions_recursively(definitions); + } + } + }; } -impl borsh0_9::ser::BorshSerialize for Pubkey { - fn serialize( - &self, - writer: &mut W, - ) -> ::core::result::Result<(), borsh0_9::maybestd::io::Error> { - borsh0_9::BorshSerialize::serialize(&self.0, writer)?; - Ok(()) - } +impl_borsh_schema!(borsh0_10); +impl_borsh_schema!(borsh0_9); + +macro_rules! impl_borsh_serialize { + ($borsh:ident) => { + impl $borsh::ser::BorshSerialize for Pubkey { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), $borsh::maybestd::io::Error> { + $borsh::BorshSerialize::serialize(&self.0, writer)?; + Ok(()) + } + } + }; } +impl_borsh_serialize!(borsh0_10); +impl_borsh_serialize!(borsh0_9); #[cfg(test)] mod tests { diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index 7257b9a2073ec7..f2c52a4d5a98ee 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -8,7 +8,7 @@ use {crate::clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; /// Configuration of network rent. #[repr(C)] -#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Copy, Debug, AbiExample)] +#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Debug, AbiExample)] pub struct Rent { /// Rental rate in lamports/byte-year. pub lamports_per_byte_year: u64, diff --git a/sdk/program/src/secp256k1_recover.rs b/sdk/program/src/secp256k1_recover.rs index 5bca285c2f8849..8e2e3be058536f 100644 --- a/sdk/program/src/secp256k1_recover.rs +++ b/sdk/program/src/secp256k1_recover.rs @@ -78,6 +78,7 @@ pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; Hash, AbiExample, )] +#[borsh(crate = "borsh")] pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]); impl Secp256k1Pubkey { @@ -254,6 +255,7 @@ impl Secp256k1Pubkey { /// use borsh::{BorshDeserialize, BorshSerialize}; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct DemoSecp256k1RecoverInstruction { /// pub message: Vec, /// pub signature: [u8; 64], @@ -348,6 +350,7 @@ impl Secp256k1Pubkey { /// }; /// # use borsh::{BorshDeserialize, BorshSerialize}; /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec, /// # pub signature: [u8; 64], diff --git a/sdk/program/src/stake/stake_flags.rs b/sdk/program/src/stake/stake_flags.rs index a7d9d828fe2814..aa044ff928acb7 100644 --- a/sdk/program/src/stake/stake_flags.rs +++ b/sdk/program/src/stake/stake_flags.rs @@ -1,7 +1,6 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; /// Additional flags for stake state. -#[allow(dead_code)] #[derive( Serialize, Deserialize, @@ -18,12 +17,55 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; Hash, Debug, )] +#[borsh(crate = "borsh")] pub struct StakeFlags { bits: u8, } +impl borsh0_10::de::BorshDeserialize for StakeFlags { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + bits: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for StakeFlags { + fn declaration() -> borsh0_10::schema::Declaration { + "StakeFlags".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([( + "bits".to_string(), + ::declaration(), + )]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for StakeFlags { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.bits, writer)?; + Ok(()) + } +} /// Currently, only bit 1 is used. The other 7 bits are reserved for future usage. -#[allow(dead_code)] impl StakeFlags { /// Stake must be fully activated before deactivation is allowed (bit 1). pub const MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED: Self = @@ -52,7 +94,6 @@ impl StakeFlags { } } -#[allow(dead_code)] impl Default for StakeFlags { fn default() -> Self { StakeFlags::empty() diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 4f94f73b3f2dd5..11652446bbd2ee 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -14,7 +14,7 @@ use { }, stake_history::{StakeHistory, StakeHistoryEntry}, }, - borsh::{maybestd::io, BorshDeserialize, BorshSchema, BorshSerialize}, + borsh::{io, BorshDeserialize, BorshSchema, BorshSerialize}, std::collections::HashSet, }; @@ -34,6 +34,49 @@ pub fn warmup_cooldown_rate(current_epoch: Epoch, new_rate_activation_epoch: Opt } } +macro_rules! impl_borsh_stake_state { + ($borsh:ident) => { + impl $borsh::BorshDeserialize for StakeState { + fn deserialize_reader(reader: &mut R) -> io::Result { + let enum_value: u32 = $borsh::BorshDeserialize::deserialize_reader(reader)?; + match enum_value { + 0 => Ok(StakeState::Uninitialized), + 1 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeState::Initialized(meta)) + } + 2 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake: Stake = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeState::Stake(meta, stake)) + } + 3 => Ok(StakeState::RewardsPool), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "Invalid enum value", + )), + } + } + } + impl $borsh::BorshSerialize for StakeState { + fn serialize(&self, writer: &mut W) -> io::Result<()> { + match self { + StakeState::Uninitialized => writer.write_all(&0u32.to_le_bytes()), + StakeState::Initialized(meta) => { + writer.write_all(&1u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer) + } + StakeState::Stake(meta, stake) => { + writer.write_all(&2u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer)?; + $borsh::BorshSerialize::serialize(&stake, writer) + } + StakeState::RewardsPool => writer.write_all(&3u32.to_le_bytes()), + } + } + } + }; +} #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Clone, Copy, AbiExample)] #[allow(clippy::large_enum_variant)] #[deprecated( @@ -47,45 +90,8 @@ pub enum StakeState { Stake(Meta, Stake), RewardsPool, } -impl BorshDeserialize for StakeState { - fn deserialize_reader(reader: &mut R) -> io::Result { - let enum_value = u32::deserialize_reader(reader)?; - match enum_value { - 0 => Ok(StakeState::Uninitialized), - 1 => { - let meta = Meta::deserialize_reader(reader)?; - Ok(StakeState::Initialized(meta)) - } - 2 => { - let meta: Meta = BorshDeserialize::deserialize_reader(reader)?; - let stake: Stake = BorshDeserialize::deserialize_reader(reader)?; - Ok(StakeState::Stake(meta, stake)) - } - 3 => Ok(StakeState::RewardsPool), - _ => Err(io::Error::new( - io::ErrorKind::InvalidData, - "Invalid enum value", - )), - } - } -} -impl BorshSerialize for StakeState { - fn serialize(&self, writer: &mut W) -> io::Result<()> { - match self { - StakeState::Uninitialized => writer.write_all(&0u32.to_le_bytes()), - StakeState::Initialized(meta) => { - writer.write_all(&1u32.to_le_bytes())?; - meta.serialize(writer) - } - StakeState::Stake(meta, stake) => { - writer.write_all(&2u32.to_le_bytes())?; - meta.serialize(writer)?; - stake.serialize(writer) - } - StakeState::RewardsPool => writer.write_all(&3u32.to_le_bytes()), - } - } -} +impl_borsh_stake_state!(borsh); +impl_borsh_stake_state!(borsh0_10); impl StakeState { /// The fixed number of bytes used to serialize each stake account pub const fn size_of() -> usize { @@ -136,49 +142,54 @@ pub enum StakeStateV2 { Stake(Meta, Stake, StakeFlags), RewardsPool, } - -impl BorshDeserialize for StakeStateV2 { - fn deserialize_reader(reader: &mut R) -> io::Result { - let enum_value = u32::deserialize_reader(reader)?; - match enum_value { - 0 => Ok(StakeStateV2::Uninitialized), - 1 => { - let meta = Meta::deserialize_reader(reader)?; - Ok(StakeStateV2::Initialized(meta)) - } - 2 => { - let meta: Meta = BorshDeserialize::deserialize_reader(reader)?; - let stake: Stake = BorshDeserialize::deserialize_reader(reader)?; - let stake_flags: StakeFlags = BorshDeserialize::deserialize_reader(reader)?; - Ok(StakeStateV2::Stake(meta, stake, stake_flags)) +macro_rules! impl_borsh_stake_state_v2 { + ($borsh:ident) => { + impl $borsh::BorshDeserialize for StakeStateV2 { + fn deserialize_reader(reader: &mut R) -> io::Result { + let enum_value: u32 = $borsh::BorshDeserialize::deserialize_reader(reader)?; + match enum_value { + 0 => Ok(StakeStateV2::Uninitialized), + 1 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeStateV2::Initialized(meta)) + } + 2 => { + let meta: Meta = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake: Stake = $borsh::BorshDeserialize::deserialize_reader(reader)?; + let stake_flags: StakeFlags = + $borsh::BorshDeserialize::deserialize_reader(reader)?; + Ok(StakeStateV2::Stake(meta, stake, stake_flags)) + } + 3 => Ok(StakeStateV2::RewardsPool), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + "Invalid enum value", + )), + } } - 3 => Ok(StakeStateV2::RewardsPool), - _ => Err(io::Error::new( - io::ErrorKind::InvalidData, - "Invalid enum value", - )), } - } -} - -impl BorshSerialize for StakeStateV2 { - fn serialize(&self, writer: &mut W) -> io::Result<()> { - match self { - StakeStateV2::Uninitialized => writer.write_all(&0u32.to_le_bytes()), - StakeStateV2::Initialized(meta) => { - writer.write_all(&1u32.to_le_bytes())?; - meta.serialize(writer) - } - StakeStateV2::Stake(meta, stake, stake_flags) => { - writer.write_all(&2u32.to_le_bytes())?; - meta.serialize(writer)?; - stake.serialize(writer)?; - stake_flags.serialize(writer) + impl $borsh::BorshSerialize for StakeStateV2 { + fn serialize(&self, writer: &mut W) -> io::Result<()> { + match self { + StakeStateV2::Uninitialized => writer.write_all(&0u32.to_le_bytes()), + StakeStateV2::Initialized(meta) => { + writer.write_all(&1u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer) + } + StakeStateV2::Stake(meta, stake, stake_flags) => { + writer.write_all(&2u32.to_le_bytes())?; + $borsh::BorshSerialize::serialize(&meta, writer)?; + $borsh::BorshSerialize::serialize(&stake, writer)?; + $borsh::BorshSerialize::serialize(&stake_flags, writer) + } + StakeStateV2::RewardsPool => writer.write_all(&3u32.to_le_bytes()), + } } - StakeStateV2::RewardsPool => writer.write_all(&3u32.to_le_bytes()), } - } + }; } +impl_borsh_stake_state_v2!(borsh); +impl_borsh_stake_state_v2!(borsh0_10); impl StakeStateV2 { /// The fixed number of bytes used to serialize each stake account @@ -241,6 +252,7 @@ pub enum StakeAuthorize { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Lockup { /// UnixTimestamp at which this stake will allow withdrawal, unless the /// transaction is signed by the custodian @@ -252,7 +264,6 @@ pub struct Lockup { /// lockup constraints pub custodian: Pubkey, } - impl Lockup { pub fn is_in_force(&self, clock: &Clock, custodian: Option<&Pubkey>) -> bool { if custodian == Some(&self.custodian) { @@ -261,6 +272,65 @@ impl Lockup { self.unix_timestamp > clock.unix_timestamp || self.epoch > clock.epoch } } +impl borsh0_10::de::BorshDeserialize for Lockup { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + unix_timestamp: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + custodian: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Lockup { + fn declaration() -> borsh0_10::schema::Declaration { + "Lockup".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "unix_timestamp".to_string(), + ::declaration(), + ), + ( + "epoch".to_string(), + ::declaration(), + ), + ( + "custodian".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Lockup { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.unix_timestamp, writer)?; + borsh0_10::BorshSerialize::serialize(&self.epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.custodian, writer)?; + Ok(()) + } +} #[derive( Default, @@ -276,6 +346,7 @@ impl Lockup { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Authorized { pub staker: Pubkey, pub withdrawer: Pubkey, @@ -341,6 +412,58 @@ impl Authorized { Ok(()) } } +impl borsh0_10::de::BorshDeserialize for Authorized { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + staker: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + withdrawer: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Authorized { + fn declaration() -> borsh0_10::schema::Declaration { + "Authorized".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "staker".to_string(), + ::declaration(), + ), + ( + "withdrawer".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Authorized { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.staker, writer)?; + borsh0_10::BorshSerialize::serialize(&self.withdrawer, writer)?; + Ok(()) + } +} #[derive( Default, @@ -356,6 +479,7 @@ impl Authorized { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Meta { pub rent_exempt_reserve: u64, pub authorized: Authorized, @@ -398,6 +522,65 @@ impl Meta { } } } +impl borsh0_10::de::BorshDeserialize for Meta { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + rent_exempt_reserve: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + authorized: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + lockup: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Meta { + fn declaration() -> borsh0_10::schema::Declaration { + "Meta".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "rent_exempt_reserve".to_string(), + ::declaration(), + ), + ( + "authorized".to_string(), + ::declaration(), + ), + ( + "lockup".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Meta { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.rent_exempt_reserve, writer)?; + borsh0_10::BorshSerialize::serialize(&self.authorized, writer)?; + borsh0_10::BorshSerialize::serialize(&self.lockup, writer)?; + Ok(()) + } +} #[derive( Debug, @@ -411,6 +594,7 @@ impl Meta { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Delegation { /// to whom the stake is delegated pub voter_pubkey: Pubkey, @@ -644,6 +828,79 @@ impl Delegation { } } } +impl borsh0_10::de::BorshDeserialize for Delegation { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + voter_pubkey: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + stake: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + activation_epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + deactivation_epoch: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + warmup_cooldown_rate: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Delegation { + fn declaration() -> borsh0_10::schema::Declaration { + "Delegation".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "voter_pubkey".to_string(), + ::declaration(), + ), + ( + "stake".to_string(), + ::declaration(), + ), + ( + "activation_epoch".to_string(), + ::declaration(), + ), + ( + "deactivation_epoch".to_string(), + ::declaration(), + ), + ( + "warmup_cooldown_rate".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Delegation { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.voter_pubkey, writer)?; + borsh0_10::BorshSerialize::serialize(&self.stake, writer)?; + borsh0_10::BorshSerialize::serialize(&self.activation_epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.deactivation_epoch, writer)?; + borsh0_10::BorshSerialize::serialize(&self.warmup_cooldown_rate, writer)?; + Ok(()) + } +} #[derive( Debug, @@ -658,6 +915,7 @@ impl Delegation { BorshSchema, BorshSerialize, )] +#[borsh(crate = "borsh")] pub struct Stake { pub delegation: Delegation, /// credits observed is credits from vote account state when delegated or redeemed @@ -703,11 +961,63 @@ impl Stake { } } } +impl borsh0_10::de::BorshDeserialize for Stake { + fn deserialize_reader( + reader: &mut R, + ) -> ::core::result::Result { + Ok(Self { + delegation: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + credits_observed: borsh0_10::BorshDeserialize::deserialize_reader(reader)?, + }) + } +} +impl borsh0_10::BorshSchema for Stake { + fn declaration() -> borsh0_10::schema::Declaration { + "Stake".to_string() + } + fn add_definitions_recursively( + definitions: &mut borsh0_10::maybestd::collections::HashMap< + borsh0_10::schema::Declaration, + borsh0_10::schema::Definition, + >, + ) { + let fields = borsh0_10::schema::Fields::NamedFields(<[_]>::into_vec( + borsh0_10::maybestd::boxed::Box::new([ + ( + "delegation".to_string(), + ::declaration(), + ), + ( + "credits_observed".to_string(), + ::declaration(), + ), + ]), + )); + let definition = borsh0_10::schema::Definition::Struct { fields }; + Self::add_definition( + ::declaration(), + definition, + definitions, + ); + ::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } +} +impl borsh0_10::ser::BorshSerialize for Stake { + fn serialize( + &self, + writer: &mut W, + ) -> ::core::result::Result<(), borsh0_10::maybestd::io::Error> { + borsh0_10::BorshSerialize::serialize(&self.delegation, writer)?; + borsh0_10::BorshSerialize::serialize(&self.credits_observed, writer)?; + Ok(()) + } +} #[cfg(test)] mod test { use { - super::*, crate::borsh0_10::try_from_slice_unchecked, assert_matches::assert_matches, + super::*, crate::borsh1::try_from_slice_unchecked, assert_matches::assert_matches, bincode::serialize, }; @@ -719,7 +1029,7 @@ mod test { fn check_borsh_serialization(stake: StakeStateV2) { let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeStateV2::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized, borsh_serialized); } @@ -850,7 +1160,7 @@ mod test { ); let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeStateV2::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized[FLAG_OFFSET], expected); assert_eq!(borsh_serialized[FLAG_OFFSET], expected); @@ -872,7 +1182,7 @@ mod test { fn check_borsh_serialization(stake: StakeState) { let bincode_serialized = serialize(&stake).unwrap(); - let borsh_serialized = StakeState::try_to_vec(&stake).unwrap(); + let borsh_serialized = borsh::to_vec(&stake).unwrap(); assert_eq!(bincode_serialized, borsh_serialized); } diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 74646f7fb7d331..da2065ccce9d69 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -378,6 +378,7 @@ pub enum SystemInstruction { /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -594,6 +595,7 @@ pub fn create_account_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -804,6 +806,7 @@ pub fn assign_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -1023,6 +1026,7 @@ pub fn transfer_with_seed( /// }; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct CreateAccountInstruction { /// /// The PDA seed used to distinguish the new account from other PDAs /// pub new_account_seed: [u8; 16], @@ -1220,6 +1224,7 @@ pub fn allocate_with_seed( /// /// - 1: system_program - executable /// /// - *: to - writable /// #[derive(BorshSerialize, BorshDeserialize, Debug)] +/// # #[borsh(crate = "borsh")] /// pub struct TransferLamportsToManyInstruction { /// pub bank_pda_bump_seed: u8, /// pub amount_list: Vec, diff --git a/sdk/program/src/sysvar/mod.rs b/sdk/program/src/sysvar/mod.rs index 1bb7c12b33a728..5a5afec8bcf8a3 100644 --- a/sdk/program/src/sysvar/mod.rs +++ b/sdk/program/src/sysvar/mod.rs @@ -100,6 +100,7 @@ pub mod slot_history; pub mod stake_history; lazy_static! { + // This will be deprecated and so this list shouldn't be modified pub static ref ALL_IDS: Vec = vec![ clock::id(), epoch_schedule::id(), @@ -113,8 +114,6 @@ lazy_static! { slot_history::id(), stake_history::id(), instructions::id(), - epoch_rewards::id(), - last_restart_slot::id(), ]; } @@ -138,12 +137,6 @@ macro_rules! declare_sysvar_id( check_id(pubkey) } } - - #[cfg(test)] - #[test] - fn test_sysvar_id() { - assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name); - } ) ); @@ -164,12 +157,6 @@ macro_rules! declare_deprecated_sysvar_id( check_id(pubkey) } } - - #[cfg(test)] - #[test] - fn test_sysvar_id() { - assert!($crate::sysvar::is_sysvar_id(&id()), "sysvar::is_sysvar_id() doesn't know about {}", $name); - } ) ); diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 327560f075ac5c..d94d7dc2c6b6e5 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -4,8 +4,10 @@ use qualifier_attr::qualifiers; use { crate::{ + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::{Epoch, INITIAL_RENT_EPOCH}, lamports::LamportsError, + loader_v4, pubkey::Pubkey, }, serde::{ @@ -754,6 +756,14 @@ pub fn create_is_signer_account_infos<'a>( .collect() } +/// Replacement for the executable flag: An account being owned by one of these contains a program. +pub const PROGRAM_OWNERS: &[Pubkey] = &[ + bpf_loader_upgradeable::id(), + bpf_loader::id(), + bpf_loader_deprecated::id(), + loader_v4::id(), +]; + #[cfg(test)] pub mod tests { use super::*; diff --git a/sdk/src/compute_budget.rs b/sdk/src/compute_budget.rs index 84d0c3766023c6..cf9ad7d436b929 100644 --- a/sdk/src/compute_budget.rs +++ b/sdk/src/compute_budget.rs @@ -63,9 +63,9 @@ impl ComputeBudgetInstruction { /// Serialize Instruction using borsh, this is only used in runtime::cost_model::tests but compilation /// can't be restricted as it's used across packages - // #[cfg(test)] - pub fn pack(self) -> Result, std::io::Error> { - self.try_to_vec() + #[cfg(feature = "dev-context-only-utils")] + pub fn pack(self) -> Result, borsh::io::Error> { + borsh::to_vec(&self) } /// Create a `ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit` `Instruction` diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index a8c486e1004f8a..2373eebf7f6d09 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -732,6 +732,10 @@ pub mod enable_zk_transfer_with_fee { solana_sdk::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); } +pub mod drop_legacy_shreds { + solana_sdk::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); +} + pub mod allow_commission_decrease_at_any_time { solana_sdk::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); } @@ -914,6 +918,7 @@ lazy_static! { (validate_fee_collector_account::id(), "validate fee collector account #33888"), (disable_rent_fees_collection::id(), "Disable rent fees collection #33945"), (enable_zk_transfer_with_fee::id(), "enable Zk Token proof program transfer with fee"), + (drop_legacy_shreds::id(), "drops legacy shreds #34328"), (allow_commission_decrease_at_any_time::id(), "Allow commission decrease at any time in epoch #33843"), /*************** ADD NEW FEATURES HERE ***************/ ] diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index eaaea2d481cbd9..1b6763f8ea145f 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -46,15 +46,16 @@ pub use solana_program::address_lookup_table_account; pub use solana_program::program_stubs; pub use solana_program::{ account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, borsh0_9, - bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, - custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, - decode_error, ed25519_program, epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, - incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, - loader_v4, loader_v4_instruction, message, msg, native_token, nonce, poseidon, program, - program_error, program_memory, program_option, program_pack, rent, sanitize, sdk_ids, - secp256k1_program, secp256k1_recover, serde_varint, serialize_utils, short_vec, slot_hashes, - slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, - system_program, sysvar, unchecked_div_by_const, vote, wasm_bindgen, + borsh1, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, + custom_heap_default, custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, + declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_schedule, + fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, + loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, + msg, native_token, nonce, poseidon, program, program_error, program_memory, program_option, + program_pack, rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover, serde_varint, + serialize_utils, short_vec, slot_hashes, slot_history, stable_layout, stake, stake_history, + syscalls, system_instruction, system_program, sysvar, unchecked_div_by_const, vote, + wasm_bindgen, }; pub mod account; diff --git a/sdk/src/quic.rs b/sdk/src/quic.rs index d304d8fe6c528b..6e9e0a656c3396 100644 --- a/sdk/src/quic.rs +++ b/sdk/src/quic.rs @@ -1,5 +1,6 @@ +#![cfg(feature = "full")] //! Definitions related to Solana over QUIC. -use std::time::Duration; +use {crate::signer::keypair::Keypair, std::time::Duration}; pub const QUIC_PORT_OFFSET: u16 = 6; // Empirically found max number of concurrent streams @@ -35,3 +36,7 @@ pub const QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO: u64 = 128; /// The receive window for QUIC connection from maximum staked nodes is /// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] pub const QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO: u64 = 512; + +pub trait NotifyKeyUpdate { + fn update_key(&self, key: &Keypair) -> Result<(), Box>; +} diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 77cbb831fb0561..7df7fc96d67933 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -489,7 +489,7 @@ impl InstructionContext { self.instruction_accounts.len() as IndexOfAccount } - /// Assert that enough account were supplied to this Instruction + /// Assert that enough accounts were supplied to this Instruction pub fn check_number_of_instruction_accounts( &self, expected_at_least: IndexOfAccount, @@ -1152,7 +1152,7 @@ pub struct ExecutionRecord { impl From for ExecutionRecord { fn from(context: TransactionContext) -> Self { let accounts = Rc::try_unwrap(context.accounts) - .expect("transaction_context.accounts has unexpectd outstanding refs"); + .expect("transaction_context.accounts has unexpected outstanding refs"); let touched_account_count = accounts.touched_count() as u64; let accounts = accounts.into_accounts(); Self { diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 71431037f57471..35e76524d9017a 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -21,6 +21,7 @@ solana-tpu-client = { workspace = true } [dev-dependencies] solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 27aa1bea400ac4..896600e93123cb 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -892,28 +892,37 @@ mod test { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(4); - let bank = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let tpu_address = "127.0.0.1:0".parse().unwrap(); let config = Config { leader_forward_count: 1, ..Config::default() }; - let root_bank = Arc::new(Bank::new_from_parent( + let root_bank = Bank::new_from_parent( bank_forks.read().unwrap().working_bank(), &Pubkey::default(), 1, - )); + ); + let root_bank = bank_forks + .write() + .unwrap() + .insert(root_bank) + .clone_without_scheduler(); + let rooted_signature = root_bank .transfer(1, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); - let working_bank = Arc::new(Bank::new_from_parent( - root_bank.clone(), - &Pubkey::default(), - 2, - )); + let working_bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + root_bank.clone(), + &Pubkey::default(), + 2, + )) + .clone_without_scheduler(); let non_rooted_signature = working_bank .transfer(2, &mint_keypair, &mint_keypair.pubkey()) @@ -1158,19 +1167,24 @@ mod test { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(4); - let bank = Bank::new_for_tests(&genesis_config); - let bank_forks = BankForks::new_rw_arc(bank); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let tpu_address = "127.0.0.1:0".parse().unwrap(); let config = Config { leader_forward_count: 1, ..Config::default() }; - let root_bank = Arc::new(Bank::new_from_parent( + let root_bank = Bank::new_from_parent( bank_forks.read().unwrap().working_bank(), &Pubkey::default(), 1, - )); + ); + let root_bank = bank_forks + .write() + .unwrap() + .insert(root_bank) + .clone_without_scheduler(); + let rooted_signature = root_bank .transfer(1, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); @@ -1184,11 +1198,15 @@ mod test { AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); root_bank.store_account(&nonce_address, &nonce_account); - let working_bank = Arc::new(Bank::new_from_parent( - root_bank.clone(), - &Pubkey::default(), - 2, - )); + let working_bank = bank_forks + .write() + .unwrap() + .insert(Bank::new_from_parent( + root_bank.clone(), + &Pubkey::default(), + 2, + )) + .clone_without_scheduler(); let non_rooted_signature = working_bank .transfer(2, &mint_keypair, &mint_keypair.pubkey()) .unwrap(); diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 23a84addb4b70b..228be023b68e72 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -21,7 +21,7 @@ solana-stake-program = { workspace = true } solana-version = { workspace = true } [dev-dependencies] -solana-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index b40cb1dcec5b30..caf04fc438f8cf 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -292,11 +292,12 @@ mod tests { stake::state::StakeStateV2, }, solana_stake_program::stake_state, + std::sync::Arc, }; - fn create_bank(lamports: u64) -> (Bank, Keypair, u64, u64) { + fn create_bank(lamports: u64) -> (Arc, Keypair, u64, u64) { let (genesis_config, mint_keypair) = create_genesis_config(lamports); - let bank = Bank::new_for_tests(&genesis_config); + let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; let stake_rent = bank.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()); let system_rent = bank.get_minimum_balance_for_rent_exemption(0); (bank, mint_keypair, stake_rent, system_rent) @@ -355,7 +356,7 @@ mod tests { fn test_new_derived_stake_account() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -392,7 +393,7 @@ mod tests { fn test_authorize_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -454,7 +455,7 @@ mod tests { fn test_lockup_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -545,7 +546,7 @@ mod tests { fn test_rebase_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); @@ -608,7 +609,7 @@ mod tests { fn test_move_stake_accounts() { let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); - let bank_client = BankClient::new(bank); + let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); let fee_payer_pubkey = fee_payer_keypair.pubkey(); diff --git a/storage-bigtable/init-bigtable.sh b/storage-bigtable/init-bigtable.sh index 3b988e2ef65f79..43ea293bb793ba 100755 --- a/storage-bigtable/init-bigtable.sh +++ b/storage-bigtable/init-bigtable.sh @@ -16,7 +16,7 @@ if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then cbt+=(-project emulator) fi -for table in blocks tx tx-by-addr; do +for table in blocks entries tx tx-by-addr; do ( set -x "${cbt[@]}" createtable $table diff --git a/storage-bigtable/src/access_token.rs b/storage-bigtable/src/access_token.rs index f4d5e9ade98bee..8881f594acedcd 100644 --- a/storage-bigtable/src/access_token.rs +++ b/storage-bigtable/src/access_token.rs @@ -91,41 +91,49 @@ impl AccessToken { } /// Call this function regularly to ensure the access token does not expire - pub async fn refresh(&self) { + pub fn refresh(&self) { // Check if it's time to try a token refresh - { - let token_r = self.token.read().unwrap(); - if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 { - return; - } + let token_r = self.token.read().unwrap(); + if token_r.1.elapsed().as_secs() < token_r.0.expires_in() as u64 / 2 { + debug!("Token is not expired yet"); + return; + } + drop(token_r); - #[allow(deprecated)] - if self - .refresh_active - .compare_and_swap(false, true, Ordering::Relaxed) - { - // Refresh already pending - return; - } + // Refresh already is progress + let refresh_progress = + self.refresh_active + .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed); + if refresh_progress.is_err() { + debug!("Token update is already in progress"); + return; } - info!("Refreshing token"); - match time::timeout( - time::Duration::from_secs(5), - Self::get_token(&self.credentials, &self.scope), - ) - .await - { - Ok(new_token) => match (new_token, self.token.write()) { - (Ok(new_token), Ok(mut token_w)) => *token_w = new_token, - (Ok(_new_token), Err(err)) => warn!("{}", err), - (Err(err), _) => warn!("{}", err), - }, - Err(_) => { - warn!("Token refresh timeout") + let credentials = self.credentials.clone(); + let scope = self.scope.clone(); + let refresh_active = Arc::clone(&self.refresh_active); + let token = Arc::clone(&self.token); + tokio::spawn(async move { + match time::timeout( + time::Duration::from_secs(5), + Self::get_token(&credentials, &scope), + ) + .await + { + Ok(new_token) => match new_token { + Ok(new_token) => { + let mut token_w = token.write().unwrap(); + *token_w = new_token; + } + Err(err) => error!("Failed to fetch new token: {}", err), + }, + Err(_timeout) => { + warn!("Token refresh timeout") + } } - } - self.refresh_active.store(false, Ordering::Relaxed); + refresh_active.store(false, Ordering::Relaxed); + info!("Token refreshed"); + }); } /// Return an access token suitable for use in an HTTP authorization header diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index e6df6d868b5f8e..3eeee6f6eb2cda 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -410,9 +410,9 @@ impl) -> InterceptedRequestResult> BigTable { Ok(rows) } - async fn refresh_access_token(&self) { + fn refresh_access_token(&self) { if let Some(ref access_token) = self.access_token { - access_token.refresh().await; + access_token.refresh(); } } @@ -434,7 +434,7 @@ impl) -> InterceptedRequestResult> BigTable { if rows_limit == 0 { return Ok(vec![]); } - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client .read_rows(ReadRowsRequest { @@ -479,7 +479,7 @@ impl) -> InterceptedRequestResult> BigTable { /// Check whether a row key exists in a `table` pub async fn row_key_exists(&mut self, table_name: &str, row_key: RowKey) -> Result { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -524,7 +524,7 @@ impl) -> InterceptedRequestResult> BigTable { if rows_limit == 0 { return Ok(vec![]); } - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client .read_rows(ReadRowsRequest { @@ -558,7 +558,7 @@ impl) -> InterceptedRequestResult> BigTable { table_name: &str, row_keys: &[RowKey], ) -> Result> { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -594,7 +594,7 @@ impl) -> InterceptedRequestResult> BigTable { table_name: &str, row_key: RowKey, ) -> Result { - self.refresh_access_token().await; + self.refresh_access_token(); let response = self .client @@ -623,7 +623,7 @@ impl) -> InterceptedRequestResult> BigTable { /// Delete one or more `table` rows async fn delete_rows(&mut self, table_name: &str, row_keys: &[RowKey]) -> Result<()> { - self.refresh_access_token().await; + self.refresh_access_token(); let mut entries = vec![]; for row_key in row_keys { @@ -669,7 +669,7 @@ impl) -> InterceptedRequestResult> BigTable { family_name: &str, row_data: &[(&RowKey, RowData)], ) -> Result<()> { - self.refresh_access_token().await; + self.refresh_access_token(); let mut entries = vec![]; for (row_key, row_data) in row_data { @@ -746,6 +746,14 @@ impl) -> InterceptedRequestResult> BigTable { .collect()) } + pub async fn get_protobuf_cell

(&mut self, table: &str, key: RowKey) -> Result

+ where + P: prost::Message + Default, + { + let row_data = self.get_single_row_data(table, key.clone()).await?; + deserialize_protobuf_cell_data(&row_data, table, key.to_string()) + } + pub async fn get_protobuf_or_bincode_cell( &mut self, table: &str, diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 0b8ed4d3a593c3..1feba4d93fbb21 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -15,12 +15,13 @@ use { timing::AtomicInterval, transaction::{TransactionError, VersionedTransaction}, }, - solana_storage_proto::convert::{generated, tx_by_addr}, + solana_storage_proto::convert::{entries, generated, tx_by_addr}, solana_transaction_status::{ extract_and_fmt_memos, ConfirmedBlock, ConfirmedTransactionStatusWithSignature, - ConfirmedTransactionWithStatusMeta, Reward, TransactionByAddrInfo, + ConfirmedTransactionWithStatusMeta, EntrySummary, Reward, TransactionByAddrInfo, TransactionConfirmationStatus, TransactionStatus, TransactionStatusMeta, - TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta, + TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, + VersionedTransactionWithStatusMeta, }, std::{ collections::{HashMap, HashSet}, @@ -91,6 +92,10 @@ fn slot_to_blocks_key(slot: Slot) -> String { slot_to_key(slot) } +fn slot_to_entries_key(slot: Slot) -> String { + slot_to_key(slot) +} + fn slot_to_tx_by_addr_key(slot: Slot) -> String { slot_to_key(!slot) } @@ -606,6 +611,25 @@ impl LedgerStorage { Ok(block_exists) } + /// Fetches a vector of block entries via a multirow fetch + pub async fn get_entries(&self, slot: Slot) -> Result> { + trace!( + "LedgerStorage::get_block_entries request received: {:?}", + slot + ); + self.stats.increment_num_queries(); + let mut bigtable = self.connection.client(); + let entry_cell_data = bigtable + .get_protobuf_cell::("entries", slot_to_entries_key(slot)) + .await + .map_err(|err| match err { + bigtable::Error::RowNotFound => Error::BlockNotFound(slot), + _ => err.into(), + })?; + let entries = entry_cell_data.entries.into_iter().map(Into::into); + Ok(entries) + } + pub async fn get_signature_status(&self, signature: &Signature) -> Result { trace!( "LedgerStorage::get_signature_status request received: {:?}", @@ -883,9 +907,32 @@ impl LedgerStorage { "LedgerStorage::upload_confirmed_block request received: {:?}", slot ); + self.upload_confirmed_block_with_entries( + slot, + VersionedConfirmedBlockWithEntries { + block: confirmed_block, + entries: vec![], + }, + ) + .await + } + + pub async fn upload_confirmed_block_with_entries( + &self, + slot: Slot, + confirmed_block: VersionedConfirmedBlockWithEntries, + ) -> Result<()> { + trace!( + "LedgerStorage::upload_confirmed_block_with_entries request received: {:?}", + slot + ); let mut by_addr: HashMap<&Pubkey, Vec> = HashMap::new(); + let VersionedConfirmedBlockWithEntries { + block: confirmed_block, + entries, + } = confirmed_block; - let mut tx_cells = vec![]; + let mut tx_cells = Vec::with_capacity(confirmed_block.transactions.len()); for (index, transaction_with_meta) in confirmed_block.transactions.iter().enumerate() { let VersionedTransactionWithStatusMeta { meta, transaction } = transaction_with_meta; let err = meta.status.clone().err(); @@ -934,6 +981,14 @@ impl LedgerStorage { }) .collect(); + let num_entries = entries.len(); + let entry_cell = ( + slot_to_entries_key(slot), + entries::Entries { + entries: entries.into_iter().enumerate().map(Into::into).collect(), + }, + ); + let mut tasks = vec![]; if !tx_cells.is_empty() { @@ -955,6 +1010,14 @@ impl LedgerStorage { })); } + if num_entries > 0 { + let conn = self.connection.clone(); + tasks.push(tokio::spawn(async move { + conn.put_protobuf_cells_with_retry::("entries", &[entry_cell]) + .await + })); + } + let mut bytes_written = 0; let mut maybe_first_err: Option = None; @@ -995,6 +1058,7 @@ impl LedgerStorage { "storage-bigtable-upload-block", ("slot", slot, i64), ("transactions", num_transactions, i64), + ("entries", num_entries, i64), ("bytes", bytes_written, i64), ); Ok(()) @@ -1088,6 +1152,13 @@ impl LedgerStorage { vec![] }; + let entries_exist = self + .connection + .client() + .row_key_exists("entries", slot_to_entries_key(slot)) + .await + .is_ok_and(|x| x); + if !dry_run { if !address_slot_rows.is_empty() { self.connection @@ -1101,17 +1172,24 @@ impl LedgerStorage { .await?; } + if entries_exist { + self.connection + .delete_rows_with_retry("entries", &[slot_to_entries_key(slot)]) + .await?; + } + self.connection .delete_rows_with_retry("blocks", &[slot_to_blocks_key(slot)]) .await?; } info!( - "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows", + "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows, {} entry row", if dry_run { "[dry run] " } else { "" }, slot, tx_deletion_rows.len(), - address_slot_rows.len() + address_slot_rows.len(), + if entries_exist { "with" } else {"WITHOUT"} ); Ok(()) diff --git a/storage-proto/build.rs b/storage-proto/build.rs index 947f562c1c6f74..583a95650e6f3a 100644 --- a/storage-proto/build.rs +++ b/storage-proto/build.rs @@ -6,7 +6,11 @@ fn main() -> Result<(), std::io::Error> { } let proto_base_path = std::path::PathBuf::from("proto"); - let proto_files = ["confirmed_block.proto", "transaction_by_addr.proto"]; + let proto_files = [ + "confirmed_block.proto", + "entries.proto", + "transaction_by_addr.proto", + ]; let mut protos = Vec::new(); for proto_file in &proto_files { let proto = proto_base_path.join(proto_file); diff --git a/storage-proto/proto/entries.proto b/storage-proto/proto/entries.proto new file mode 100644 index 00000000000000..64108925ad0ca6 --- /dev/null +++ b/storage-proto/proto/entries.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package solana.storage.Entries; + +message Entries { + repeated Entry entries = 1; +} + +message Entry { + uint32 index = 1; + uint64 num_hashes = 2; + bytes hash = 3; + uint64 num_transactions = 4; + uint32 starting_transaction_index = 5; +} diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 7ca5728d398ec1..e9070951942e2c 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -15,7 +15,7 @@ use { transaction_context::TransactionReturnData, }, solana_transaction_status::{ - ConfirmedBlock, InnerInstruction, InnerInstructions, Reward, RewardType, + ConfirmedBlock, EntrySummary, InnerInstruction, InnerInstructions, Reward, RewardType, TransactionByAddrInfo, TransactionStatusMeta, TransactionTokenBalance, TransactionWithStatusMeta, VersionedConfirmedBlock, VersionedTransactionWithStatusMeta, }, @@ -41,6 +41,11 @@ pub mod tx_by_addr { )); } +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod entries { + include!(concat!(env!("OUT_DIR"), "/solana.storage.entries.rs")); +} + impl From> for generated::Rewards { fn from(rewards: Vec) -> Self { Self { @@ -1189,6 +1194,29 @@ impl TryFrom for Vec { } } +impl From<(usize, EntrySummary)> for entries::Entry { + fn from((index, entry_summary): (usize, EntrySummary)) -> Self { + entries::Entry { + index: index as u32, + num_hashes: entry_summary.num_hashes, + hash: entry_summary.hash.as_ref().into(), + num_transactions: entry_summary.num_transactions, + starting_transaction_index: entry_summary.starting_transaction_index as u32, + } + } +} + +impl From for EntrySummary { + fn from(entry: entries::Entry) -> Self { + EntrySummary { + num_hashes: entry.num_hashes, + hash: Hash::new(&entry.hash), + num_transactions: entry.num_transactions, + starting_transaction_index: entry.starting_transaction_index as usize, + } + } +} + #[cfg(test)] mod test { use {super::*, enum_iterator::all}; diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index fee0db110f11ec..41a15feaf524b7 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -10,7 +10,7 @@ use { solana_perf::packet::PacketBatch, solana_sdk::{ packet::PACKET_DATA_SIZE, - quic::{QUIC_MAX_TIMEOUT, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS}, + quic::{NotifyKeyUpdate, QUIC_MAX_TIMEOUT, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS}, signature::Keypair, }, std::{ @@ -36,6 +36,12 @@ impl SkipClientVerification { } } +pub struct SpawnServerResult { + pub endpoint: Endpoint, + pub thread: thread::JoinHandle<()>, + pub key_updater: Arc, +} + impl rustls::server::ClientCertVerifier for SkipClientVerification { fn client_auth_root_subjects(&self) -> &[DistinguishedName] { &[] @@ -113,6 +119,19 @@ pub enum QuicServerError { TlsError(#[from] rustls::Error), } +pub struct EndpointKeyUpdater { + endpoint: Endpoint, + gossip_host: IpAddr, +} + +impl NotifyKeyUpdate for EndpointKeyUpdater { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + let (config, _) = configure_server(key, self.gossip_host)?; + self.endpoint.set_server_config(Some(config)); + Ok(()) + } +} + #[derive(Default)] pub struct StreamStats { pub(crate) total_connections: AtomicUsize, @@ -404,7 +423,7 @@ pub fn spawn_server( max_unstaked_connections: usize, wait_for_chunk_timeout: Duration, coalesce: Duration, -) -> Result<(Endpoint, thread::JoinHandle<()>), QuicServerError> { +) -> Result { let runtime = rt(); let (endpoint, _stats, task) = { let _guard = runtime.enter(); @@ -431,7 +450,15 @@ pub fn spawn_server( } }) .unwrap(); - Ok((endpoint, handle)) + let updater = EndpointKeyUpdater { + endpoint: endpoint.clone(), + gossip_host, + }; + Ok(SpawnServerResult { + endpoint, + thread: handle, + key_updater: Arc::new(updater), + }) } #[cfg(test)] @@ -457,7 +484,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, @@ -513,7 +544,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, @@ -556,7 +591,11 @@ mod test { let ip = "127.0.0.1".parse().unwrap(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (_, t) = spawn_server( + let SpawnServerResult { + endpoint: _, + thread: t, + key_updater: _, + } = spawn_server( "quic_streamer_test", s, &keypair, diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index e807f80c9692f0..9f994eee9a19df 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -777,12 +777,14 @@ impl TestValidator { validator_stake_lamports, validator_identity_lamports, config.fee_rate_governor.clone(), - config.rent, + config.rent.clone(), solana_sdk::genesis_config::ClusterType::Development, accounts.into_iter().collect(), ); genesis_config.epoch_schedule = config .epoch_schedule + .as_ref() + .cloned() .unwrap_or_else(EpochSchedule::without_warmup); if let Some(ticks_per_slot) = config.ticks_per_slot { diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index 5d69e9e291b6b5..afbae094e9a718 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -238,6 +238,7 @@ fn run_transactions_dos( config.signers = vec![payer_keypairs[0], &program_keypair]; config.command = CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location), + fee_payer_signer_index: 0, program_signer_index: Some(1), program_pubkey: None, buffer_signer_index: None, diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 3c830f591403fe..a4837acb3e107d 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -13,7 +13,9 @@ edition = { workspace = true } Inflector = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } -borsh = { workspace = true } +# Update this borsh dependency to the workspace version once +# spl-associated-token-account is upgraded and used in the monorepo. +borsh = { version = "0.10.3" } bs58 = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 8562ab6525a069..e205c10bf6608f 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -43,6 +43,7 @@ tokio = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } [[bench]] name = "cluster_info" diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index e8a316420b42d8..a947f212296fb7 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -435,10 +435,21 @@ async fn send_datagram_task( connection: Connection, mut receiver: AsyncReceiver, ) -> Result<(), Error> { - while let Some(bytes) = receiver.recv().await { - connection.send_datagram(bytes)?; + tokio::pin! { + let connection_closed = connection.closed(); + } + loop { + tokio::select! { + biased; + bytes = receiver.recv() => { + match bytes { + None => return Ok(()), + Some(bytes) => connection.send_datagram(bytes)?, + } + } + err = &mut connection_closed => return Err(Error::from(err)), + } } - Ok(()) } async fn make_connection_task( diff --git a/udp-client/src/lib.rs b/udp-client/src/lib.rs index 06eeca00185898..bb0c897a3dbc7a 100644 --- a/udp-client/src/lib.rs +++ b/udp-client/src/lib.rs @@ -15,6 +15,7 @@ use { }, connection_cache_stats::ConnectionCacheStats, }, + solana_sdk::signature::Keypair, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, sync::Arc, @@ -112,4 +113,8 @@ impl ConnectionManager for UdpConnectionManager { fn new_connection_config(&self) -> Self::NewConnectionConfig { UdpConfig::new().unwrap() } + + fn update_key(&self, _key: &Keypair) -> Result<(), Box> { + Ok(()) + } } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 845bdda7eeab6b..6c7f691c27b5fa 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -67,6 +67,7 @@ thiserror = { workspace = true } [dev-dependencies] solana-account-decoder = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 69584822097c45..67f2309a9c98bc 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -682,6 +682,12 @@ impl AdminRpcImpl { })?; } + for n in post_init.notifies.iter() { + if let Err(err) = n.update_key(&identity_keypair) { + error!("Error updating network layer keypair: {err}"); + } + } + solana_metrics::set_host_id(identity_keypair.pubkey().to_string()); post_init .cluster_info @@ -888,6 +894,7 @@ mod tests { bank_forks: bank_forks.clone(), vote_account, repair_whitelist, + notifies: Vec::new(), }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 34b7359b2f1eef..eb680e8069e9e1 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1279,23 +1279,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { May be specified multiple times. \ [default: [ledger]/accounts_index]"), ) - .arg(Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_count) - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.") - .hidden(hidden_unless_forced())) - .arg(Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_size) - .requires("accounts_filler_count") - .help("Size per filler account in bytes.") - .hidden(hidden_unless_forced())) .arg( Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1957,8 +1940,6 @@ pub struct DefaultArgs { pub contact_debug_interval: String, - pub accounts_filler_count: String, - pub accounts_filler_size: String, pub accountsdb_repl_threads: String, pub snapshot_version: SnapshotVersion, @@ -2032,8 +2013,6 @@ impl DefaultArgs { .to_string(), rpc_pubsub_worker_threads: "4".to_string(), accountsdb_repl_threads: num_cpus::get().to_string(), - accounts_filler_count: "0".to_string(), - accounts_filler_size: "0".to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: diff --git a/validator/src/main.rs b/validator/src/main.rs index 0037fea465d50f..fc39971c3008f4 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -8,10 +8,7 @@ use { log::*, rand::{seq::SliceRandom, thread_rng}, solana_accounts_db::{ - accounts_db::{ - AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage, - FillerAccountsConfig, - }, + accounts_db::{AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, AccountsIndexConfig, IndexLimitMb, @@ -1186,16 +1183,10 @@ pub fn main() { .ok() .map(|mb| mb * MB); - let filler_accounts_config = FillerAccountsConfig { - count: value_t_or_exit!(matches, "accounts_filler_count", usize), - size: value_t_or_exit!(matches, "accounts_filler_size", usize), - }; - let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, write_cache_limit_bytes: value_t!(matches, "accounts_db_cache_limit_mb", u64) .ok() .map(|mb| mb * MB as u64), diff --git a/zk-token-sdk/src/errors.rs b/zk-token-sdk/src/errors.rs index 328a68ca755437..ad43b680dc9b0d 100644 --- a/zk-token-sdk/src/errors.rs +++ b/zk-token-sdk/src/errors.rs @@ -36,6 +36,8 @@ pub enum ProofVerificationError { ElGamal(#[from] ElGamalError), #[error("Invalid proof context")] ProofContext, + #[error("illegal commitment length")] + IllegalCommitmentLength, } #[derive(Clone, Debug, Eq, PartialEq)] diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs index 96fbc18a6b612e..4036be9a94c940 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u128.rs @@ -5,6 +5,7 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -46,7 +47,7 @@ impl BatchedRangeProofU128Data { .try_fold(0_usize, |acc, &x| acc.checked_add(x)) .ok_or(ProofGenerationError::IllegalAmountBitLength)?; - // `u64::BITS` is 128, which fits in a single byte and should not overflow to `usize` for + // `u128::BITS` is 128, which fits in a single byte and should not overflow to `usize` for // an overwhelming number of platforms. However, to be extra cautious, use `try_from` and // `unwrap` here. A simple case `u128::BITS as usize` can silently overflow. let expected_bit_length = usize::try_from(u128::BITS).unwrap(); @@ -77,6 +78,12 @@ impl ZkProofData for BatchedRangeProofU128Data { #[cfg(not(target_os = "solana"))] fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs index 1a6dad66656487..1bdba644f3c296 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u256.rs @@ -5,6 +5,7 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -74,6 +75,12 @@ impl ZkProofData for BatchedRangeProofU256Data { #[cfg(not(target_os = "solana"))] fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs index 2a9fae57e46d6f..94b76b5beff89d 100644 --- a/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs +++ b/zk-token-sdk/src/instruction/batched_range_proof/batched_range_proof_u64.rs @@ -5,6 +5,7 @@ use { crate::{ encryption::pedersen::{PedersenCommitment, PedersenOpening}, errors::{ProofGenerationError, ProofVerificationError}, + instruction::batched_range_proof::MAX_COMMITMENTS, range_proof::RangeProof, }, std::convert::TryInto, @@ -76,6 +77,12 @@ impl ZkProofData for BatchedRangeProofU64Data { #[cfg(not(target_os = "solana"))] fn verify_proof(&self) -> Result<(), ProofVerificationError> { let (commitments, bit_lengths) = self.context.try_into()?; + let num_commitments = commitments.len(); + + if num_commitments > MAX_COMMITMENTS || num_commitments != bit_lengths.len() { + return Err(ProofVerificationError::IllegalCommitmentLength); + } + let mut transcript = self.context_data().new_transcript(); let proof: RangeProof = self.proof.try_into()?; diff --git a/zk-token-sdk/src/instruction/transfer/with_fee.rs b/zk-token-sdk/src/instruction/transfer/with_fee.rs index a0cc7ae746dab7..4bc9a154376840 100644 --- a/zk-token-sdk/src/instruction/transfer/with_fee.rs +++ b/zk-token-sdk/src/instruction/transfer/with_fee.rs @@ -41,6 +41,8 @@ use { const MAX_FEE_BASIS_POINTS: u64 = 10_000; #[cfg(not(target_os = "solana"))] const ONE_IN_BASIS_POINTS: u128 = MAX_FEE_BASIS_POINTS as u128; +#[cfg(not(target_os = "solana"))] +const MAX_DELTA_RANGE: u64 = MAX_FEE_BASIS_POINTS - 1; #[cfg(not(target_os = "solana"))] const TRANSFER_SOURCE_AMOUNT_BITS: usize = 64; @@ -51,7 +53,7 @@ const TRANSFER_AMOUNT_LO_NEGATED_BITS: usize = 16; #[cfg(not(target_os = "solana"))] const TRANSFER_AMOUNT_HI_BITS: usize = 32; #[cfg(not(target_os = "solana"))] -const TRANSFER_DELTA_BITS: usize = 48; +const TRANSFER_DELTA_BITS: usize = 16; #[cfg(not(target_os = "solana"))] const FEE_AMOUNT_LO_BITS: usize = 16; #[cfg(not(target_os = "solana"))] @@ -62,6 +64,7 @@ lazy_static::lazy_static! { pub static ref COMMITMENT_MAX: PedersenCommitment = Pedersen::encode((1_u64 << TRANSFER_AMOUNT_LO_NEGATED_BITS) - 1); pub static ref COMMITMENT_MAX_FEE_BASIS_POINTS: PedersenCommitment = Pedersen::encode(MAX_FEE_BASIS_POINTS); + pub static ref COMMITMENT_MAX_DELTA_RANGE: PedersenCommitment = Pedersen::encode(MAX_DELTA_RANGE); } /// The instruction data that is needed for the `ProofInstruction::TransferWithFee` instruction. @@ -557,24 +560,41 @@ impl TransferWithFeeProof { // generate the range proof let opening_claimed_negated = &PedersenOpening::default() - &opening_claimed; + + let combined_amount = combine_lo_hi_u64( + transfer_amount_lo, + transfer_amount_hi, + TRANSFER_AMOUNT_LO_BITS, + ); + let amount_sub_fee = combined_amount + .checked_sub(combined_fee_amount) + .ok_or(ProofGenerationError::FeeCalculation)?; + let amount_sub_fee_opening = combined_opening - combined_fee_opening; + + let delta_negated = MAX_DELTA_RANGE + .checked_sub(delta_fee) + .ok_or(ProofGenerationError::FeeCalculation)?; + let range_proof = RangeProof::new( vec![ source_new_balance, transfer_amount_lo, transfer_amount_hi, delta_fee, - MAX_FEE_BASIS_POINTS - delta_fee, + delta_negated, fee_amount_lo, fee_amount_hi, + amount_sub_fee, ], vec![ TRANSFER_SOURCE_AMOUNT_BITS, // 64 TRANSFER_AMOUNT_LO_BITS, // 16 TRANSFER_AMOUNT_HI_BITS, // 32 - TRANSFER_DELTA_BITS, // 48 - TRANSFER_DELTA_BITS, // 48 + TRANSFER_DELTA_BITS, // 16 + TRANSFER_DELTA_BITS, // 16 FEE_AMOUNT_LO_BITS, // 16 FEE_AMOUNT_HI_BITS, // 32 + TRANSFER_SOURCE_AMOUNT_BITS, // 64 ], vec![ &opening_source, @@ -584,6 +604,7 @@ impl TransferWithFeeProof { &opening_claimed_negated, opening_fee_lo, opening_fee_hi, + &amount_sub_fee_opening, ], transcript, )?; @@ -708,7 +729,8 @@ impl TransferWithFeeProof { // verify range proof let new_source_commitment = self.new_source_commitment.try_into()?; - let claimed_commitment_negated = &(*COMMITMENT_MAX_FEE_BASIS_POINTS) - &claimed_commitment; + let claimed_commitment_negated = &(*COMMITMENT_MAX_DELTA_RANGE) - &claimed_commitment; + let amount_sub_fee_commitment = combined_commitment - combined_fee_commitment; range_proof.verify( vec![ @@ -719,15 +741,17 @@ impl TransferWithFeeProof { &claimed_commitment_negated, fee_ciphertext_lo.get_commitment(), fee_ciphertext_hi.get_commitment(), + &amount_sub_fee_commitment, ], vec![ TRANSFER_SOURCE_AMOUNT_BITS, // 64 TRANSFER_AMOUNT_LO_BITS, // 16 TRANSFER_AMOUNT_HI_BITS, // 32 - TRANSFER_DELTA_BITS, // 48 - TRANSFER_DELTA_BITS, // 48 + TRANSFER_DELTA_BITS, // 16 + TRANSFER_DELTA_BITS, // 16 FEE_AMOUNT_LO_BITS, // 16 FEE_AMOUNT_HI_BITS, // 32 + TRANSFER_SOURCE_AMOUNT_BITS, // 64 ], transcript, )?; diff --git a/zk-token-sdk/src/range_proof/errors.rs b/zk-token-sdk/src/range_proof/errors.rs index fb08cb7543973c..f0c872f7aa3494 100644 --- a/zk-token-sdk/src/range_proof/errors.rs +++ b/zk-token-sdk/src/range_proof/errors.rs @@ -2,7 +2,18 @@ use {crate::errors::TranscriptError, thiserror::Error}; #[derive(Error, Clone, Debug, Eq, PartialEq)] -pub enum RangeProofGenerationError {} +pub enum RangeProofGenerationError { + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, + #[error("amounts, commitments, openings, or bit lengths vectors have different lengths")] + VectorLengthMismatch, + #[error("invalid bit size")] + InvalidBitSize, + #[error("insufficient generators for the proof")] + GeneratorLengthMismatch, + #[error("inner product length mismatch")] + InnerProductLengthMismatch, +} #[derive(Error, Clone, Debug, Eq, PartialEq)] pub enum RangeProofVerificationError { @@ -20,4 +31,14 @@ pub enum RangeProofVerificationError { InvalidBitSize, #[error("insufficient generators for the proof")] InvalidGeneratorsLength, + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, + #[error("commitments and bit lengths vectors have different lengths")] + VectorLengthMismatch, +} + +#[derive(Error, Clone, Debug, Eq, PartialEq)] +pub enum RangeProofGeneratorError { + #[error("maximum generator length exceeded")] + MaximumGeneratorLengthExceeded, } diff --git a/zk-token-sdk/src/range_proof/generators.rs b/zk-token-sdk/src/range_proof/generators.rs index bc0ce24fc857b1..a993d753dcad0c 100644 --- a/zk-token-sdk/src/range_proof/generators.rs +++ b/zk-token-sdk/src/range_proof/generators.rs @@ -1,4 +1,5 @@ use { + crate::range_proof::errors::RangeProofGeneratorError, curve25519_dalek::{ digest::{ExtendableOutput, Update, XofReader}, ristretto::RistrettoPoint, @@ -6,6 +7,9 @@ use { sha3::{Sha3XofReader, Shake256}, }; +#[cfg(not(target_os = "solana"))] +const MAX_GENERATOR_LENGTH: usize = u32::MAX as usize; + /// Generators for Pedersen vector commitments that are used for inner-product proofs. struct GeneratorsChain { reader: Sha3XofReader, @@ -67,37 +71,44 @@ pub struct BulletproofGens { } impl BulletproofGens { - pub fn new(gens_capacity: usize) -> Self { + pub fn new(gens_capacity: usize) -> Result { let mut gens = BulletproofGens { gens_capacity: 0, G_vec: Vec::new(), H_vec: Vec::new(), }; - gens.increase_capacity(gens_capacity); - gens + gens.increase_capacity(gens_capacity)?; + Ok(gens) } /// Increases the generators' capacity to the amount specified. /// If less than or equal to the current capacity, does nothing. - pub fn increase_capacity(&mut self, new_capacity: usize) { + pub fn increase_capacity( + &mut self, + new_capacity: usize, + ) -> Result<(), RangeProofGeneratorError> { if self.gens_capacity >= new_capacity { - return; + return Ok(()); + } + + if new_capacity > MAX_GENERATOR_LENGTH { + return Err(RangeProofGeneratorError::MaximumGeneratorLengthExceeded); } - let label = [b'G']; self.G_vec.extend( - &mut GeneratorsChain::new(&[label, [b'G']].concat()) + &mut GeneratorsChain::new(&[b'G']) .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.H_vec.extend( - &mut GeneratorsChain::new(&[label, [b'H']].concat()) + &mut GeneratorsChain::new(&[b'H']) .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.gens_capacity = new_capacity; + Ok(()) } #[allow(non_snake_case)] diff --git a/zk-token-sdk/src/range_proof/inner_product.rs b/zk-token-sdk/src/range_proof/inner_product.rs index 0a648e3ea38bb5..44e8e0674a3d6a 100644 --- a/zk-token-sdk/src/range_proof/inner_product.rs +++ b/zk-token-sdk/src/range_proof/inner_product.rs @@ -1,6 +1,9 @@ use { crate::{ - range_proof::{errors::RangeProofVerificationError, util}, + range_proof::{ + errors::{RangeProofGenerationError, RangeProofVerificationError}, + util, + }, transcript::TranscriptProtocol, }, core::iter, @@ -45,7 +48,7 @@ impl InnerProductProof { mut a_vec: Vec, mut b_vec: Vec, transcript: &mut Transcript, - ) -> Self { + ) -> Result { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. @@ -57,15 +60,20 @@ impl InnerProductProof { let mut n = G.len(); // All of the input vectors must have the same length. - assert_eq!(G.len(), n); - assert_eq!(H.len(), n); - assert_eq!(a.len(), n); - assert_eq!(b.len(), n); - assert_eq!(G_factors.len(), n); - assert_eq!(H_factors.len(), n); + if G.len() != n + || H.len() != n + || a.len() != n + || b.len() != n + || G_factors.len() != n + || H_factors.len() != n + { + return Err(RangeProofGenerationError::GeneratorLengthMismatch); + } // All of the input vectors must have a length that is a power of two. - assert!(n.is_power_of_two()); + if !n.is_power_of_two() { + return Err(RangeProofGenerationError::InvalidBitSize); + } transcript.innerproduct_domain_separator(n as u64); @@ -76,18 +84,21 @@ impl InnerProductProof { // If it's the first iteration, unroll the Hprime = H*y_inv scalar mults // into multiscalar muls, for performance. if n != 1 { - n /= 2; + n = n.checked_div(2).unwrap(); let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = util::inner_product(a_L, b_R); - let c_R = util::inner_product(a_R, b_L); + let c_L = util::inner_product(a_L, b_R) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; + let c_R = util::inner_product(a_R, b_L) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; let L = RistrettoPoint::multiscalar_mul( a_L.iter() - .zip(G_factors[n..2 * n].iter()) + // `n` was previously divided in half and therefore, it cannot overflow. + .zip(G_factors[n..n.checked_mul(2).unwrap()].iter()) .map(|(a_L_i, g)| a_L_i * g) .chain( b_R.iter() @@ -105,7 +116,7 @@ impl InnerProductProof { .map(|(a_R_i, g)| a_R_i * g) .chain( b_L.iter() - .zip(H_factors[n..2 * n].iter()) + .zip(H_factors[n..n.checked_mul(2).unwrap()].iter()) .map(|(b_L_i, h)| b_L_i * h), ) .chain(iter::once(c_R)), @@ -126,11 +137,17 @@ impl InnerProductProof { a_L[i] = a_L[i] * u + u_inv * a_R[i]; b_L[i] = b_L[i] * u_inv + u * b_R[i]; G_L[i] = RistrettoPoint::multiscalar_mul( - &[u_inv * G_factors[i], u * G_factors[n + i]], + &[ + u_inv * G_factors[i], + u * G_factors[n.checked_add(i).unwrap()], + ], &[G_L[i], G_R[i]], ); H_L[i] = RistrettoPoint::multiscalar_mul( - &[u * H_factors[i], u_inv * H_factors[n + i]], + &[ + u * H_factors[i], + u_inv * H_factors[n.checked_add(i).unwrap()], + ], &[H_L[i], H_R[i]], ) } @@ -142,14 +159,16 @@ impl InnerProductProof { } while n != 1 { - n /= 2; + n = n.checked_div(2).unwrap(); let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = util::inner_product(a_L, b_R); - let c_R = util::inner_product(a_R, b_L); + let c_L = util::inner_product(a_L, b_R) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; + let c_R = util::inner_product(a_R, b_L) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; let L = RistrettoPoint::multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), @@ -185,12 +204,12 @@ impl InnerProductProof { H = H_L; } - InnerProductProof { + Ok(InnerProductProof { L_vec, R_vec, a: a[0], b: b[0], - } + }) } /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and @@ -205,12 +224,12 @@ impl InnerProductProof { transcript: &mut Transcript, ) -> Result<(Vec, Vec, Vec), RangeProofVerificationError> { let lg_n = self.L_vec.len(); - if lg_n >= 32 { + if lg_n == 0 || lg_n >= 32 { // 4 billion multiplications should be enough for anyone // and this check prevents overflow in 1< = bp_gens.G(n).cloned().collect(); let H: Vec = bp_gens.H(n).cloned().collect(); @@ -418,7 +440,7 @@ mod tests { let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); - let c = util::inner_product(&a, &b); + let c = util::inner_product(&a, &b).unwrap(); let G_factors: Vec = iter::repeat(Scalar::one()).take(n).collect(); @@ -451,7 +473,8 @@ mod tests { a.clone(), b.clone(), &mut prover_transcript, - ); + ) + .unwrap(); assert!(proof .verify( diff --git a/zk-token-sdk/src/range_proof/mod.rs b/zk-token-sdk/src/range_proof/mod.rs index 9022ccc312e089..6658c350495473 100644 --- a/zk-token-sdk/src/range_proof/mod.rs +++ b/zk-token-sdk/src/range_proof/mod.rs @@ -75,14 +75,26 @@ impl RangeProof { ) -> Result { // amounts, bit-lengths, openings must be same length vectors let m = amounts.len(); - assert_eq!(bit_lengths.len(), m); - assert_eq!(openings.len(), m); + if bit_lengths.len() != m || openings.len() != m { + return Err(RangeProofGenerationError::VectorLengthMismatch); + } + + // each bit length must be greater than 0 for the proof to make sense + if bit_lengths + .iter() + .any(|bit_length| *bit_length == 0 || *bit_length > u64::BITS as usize) + { + return Err(RangeProofGenerationError::InvalidBitSize); + } // total vector dimension to compute the ultimate inner product proof for let nm: usize = bit_lengths.iter().sum(); - assert!(nm.is_power_of_two()); + if !nm.is_power_of_two() { + return Err(RangeProofGenerationError::VectorLengthMismatch); + } - let bp_gens = BulletproofGens::new(nm); + let bp_gens = BulletproofGens::new(nm) + .map_err(|_| RangeProofGenerationError::MaximumGeneratorLengthExceeded)?; // bit-decompose values and generate their Pedersen vector commitment let a_blinding = Scalar::random(&mut OsRng); @@ -92,7 +104,10 @@ impl RangeProof { for (amount_i, n_i) in amounts.iter().zip(bit_lengths.iter()) { for j in 0..(*n_i) { let (G_ij, H_ij) = gens_iter.next().unwrap(); - let v_ij = Choice::from(((amount_i >> j) & 1) as u8); + + // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, + // casting is lossless and right shift can be safely unwrapped + let v_ij = Choice::from((amount_i.checked_shr(j as u32).unwrap() & 1) as u8); let mut point = -H_ij; point.conditional_assign(G_ij, v_ij); A += point; @@ -137,7 +152,9 @@ impl RangeProof { let mut exp_2 = Scalar::one(); for j in 0..(*n_i) { - let a_L_j = Scalar::from((amount_i >> j) & 1); + // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, + // casting is lossless and right shift can be safely unwrapped + let a_L_j = Scalar::from(amount_i.checked_shr(j as u32).unwrap() & 1); let a_R_j = a_L_j - Scalar::one(); l_poly.0[i] = a_L_j - z; @@ -147,13 +164,17 @@ impl RangeProof { exp_y *= y; exp_2 = exp_2 + exp_2; - i += 1; + + // `i` is capped by the sum of vectors in `bit_lengths` + i = i.checked_add(1).unwrap(); } exp_z *= z; } // define t(x) = = t_0 + t_1*x + t_2*x - let t_poly = l_poly.inner_product(&r_poly); + let t_poly = l_poly + .inner_product(&r_poly) + .ok_or(RangeProofGenerationError::InnerProductLengthMismatch)?; // generate Pedersen commitment for the coefficients t_1 and t_2 let (T_1, t_1_blinding) = Pedersen::new(t_poly.1); @@ -215,7 +236,7 @@ impl RangeProof { l_vec, r_vec, transcript, - ); + )?; Ok(RangeProof { A, @@ -237,11 +258,14 @@ impl RangeProof { transcript: &mut Transcript, ) -> Result<(), RangeProofVerificationError> { // commitments and bit-lengths must be same length vectors - assert_eq!(comms.len(), bit_lengths.len()); + if comms.len() != bit_lengths.len() { + return Err(RangeProofVerificationError::VectorLengthMismatch); + } let m = bit_lengths.len(); let nm: usize = bit_lengths.iter().sum(); - let bp_gens = BulletproofGens::new(nm); + let bp_gens = BulletproofGens::new(nm) + .map_err(|_| RangeProofVerificationError::MaximumGeneratorLengthExceeded)?; if !nm.is_power_of_two() { return Err(RangeProofVerificationError::InvalidBitSize); diff --git a/zk-token-sdk/src/range_proof/util.rs b/zk-token-sdk/src/range_proof/util.rs index c551abd8f3a15c..4a76543d475bc0 100644 --- a/zk-token-sdk/src/range_proof/util.rs +++ b/zk-token-sdk/src/range_proof/util.rs @@ -11,20 +11,20 @@ impl VecPoly1 { VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) } - pub fn inner_product(&self, rhs: &VecPoly1) -> Poly2 { + pub fn inner_product(&self, rhs: &VecPoly1) -> Option { // Uses Karatsuba's method let l = self; let r = rhs; - let t0 = inner_product(&l.0, &r.0); - let t2 = inner_product(&l.1, &r.1); + let t0 = inner_product(&l.0, &r.0)?; + let t2 = inner_product(&l.1, &r.1)?; let l0_plus_l1 = add_vec(&l.0, &l.1); let r0_plus_r1 = add_vec(&r.0, &r.1); - let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + let t1 = inner_product(&l0_plus_l1, &r0_plus_r1)? - t0 - t2; - Poly2(t0, t1, t2) + Some(Poly2(t0, t1, t2)) } pub fn eval(&self, x: Scalar) -> Vec { @@ -98,16 +98,16 @@ pub fn read32(data: &[u8]) -> [u8; 32] { /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] -/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. -pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { +/// Errors if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Option { let mut out = Scalar::zero(); if a.len() != b.len() { - panic!("inner_product(a,b): lengths of vectors do not match"); + return None; } for i in 0..a.len() { out += a[i] * b[i]; } - out + Some(out) } /// Takes the sum of all the powers of `x`, up to `n`