-
Notifications
You must be signed in to change notification settings - Fork 835
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[subsystem-benchmark] Add approval-voting benchmark to CI #4216
Changes from 4 commits
b8e9215
455a0b1
63be961
9081364
b64100a
2c718e7
aaf0c81
704b63c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
// Copyright (C) Parity Technologies (UK) Ltd. | ||
// This file is part of Polkadot. | ||
|
||
// Polkadot is free software: you can redistribute it and/or modify | ||
// it under the terms of the GNU General Public License as published by | ||
// the Free Software Foundation, either version 3 of the License, or | ||
// (at your option) any later version. | ||
|
||
// Polkadot is distributed in the hope that it will be useful, | ||
// but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
// GNU General Public License for more details. | ||
|
||
// You should have received a copy of the GNU General Public License | ||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. | ||
|
||
// TODO | ||
//! availability-read regression tests | ||
//! | ||
//! Availability read benchmark based on Kusama parameters and scale. | ||
//! | ||
//! Subsystems involved: | ||
//! - availability-distribution | ||
//! - bitfield-distribution | ||
//! - availability-store | ||
|
||
use polkadot_subsystem_bench::{ | ||
self, | ||
approval::{bench_approvals, prepare_test, ApprovalsOptions}, | ||
configuration::TestConfiguration, | ||
usage::BenchmarkUsage, | ||
utils::save_to_file, | ||
}; | ||
use std::io::Write; | ||
|
||
const BENCH_COUNT: usize = 10; | ||
|
||
fn main() -> Result<(), String> { | ||
let mut messages = vec![]; | ||
let mut config = TestConfiguration::default(); | ||
// A single node effort roughly | ||
config.n_cores = 100; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are we approving 100 candidates per block ? If so, then the comment above is not correct. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this would generate assignments and approvals for 100 occupied core each block |
||
config.n_validators = 500; | ||
config.num_blocks = 10; | ||
config.peer_bandwidth = 524288000000; | ||
config.bandwidth = 524288000000; | ||
config.latency = None; | ||
config.connectivity = 100; | ||
config.generate_pov_sizes(); | ||
let options = ApprovalsOptions { | ||
last_considered_tranche: 89, | ||
coalesce_mean: 3.0, | ||
coalesce_std_dev: 1.0, | ||
coalesce_tranche_diff: 12, | ||
enable_assignments_v2: true, | ||
stop_when_approved: false, | ||
workdir_prefix: "/tmp".to_string(), | ||
num_no_shows_per_candidate: 0, | ||
}; | ||
|
||
println!("Benchmarking..."); | ||
let usages: Vec<BenchmarkUsage> = (0..BENCH_COUNT) | ||
.map(|n| { | ||
print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); | ||
std::io::stdout().flush().unwrap(); | ||
let (mut env, state) = prepare_test(config.clone(), options.clone(), false); | ||
env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state)) | ||
}) | ||
.collect(); | ||
println!("\rDone!{}", " ".repeat(BENCH_COUNT)); | ||
|
||
let average_usage = BenchmarkUsage::average(&usages); | ||
save_to_file( | ||
"charts/availability-distribution-regression-bench.json", | ||
average_usage.to_chart_json().map_err(|e| e.to_string())?, | ||
) | ||
.map_err(|e| e.to_string())?; | ||
println!("{}", average_usage); | ||
|
||
// We expect no variance for received and sent | ||
// but use 0.001 because we operate with floats | ||
messages.extend(average_usage.check_network_usage(&[ | ||
("Received from peers", 52944.7000, 0.001), | ||
("Sent to peers", 63532.2000, 0.001), | ||
])); | ||
messages.extend(average_usage.check_cpu_usage(&[ | ||
("approval-distribution", 5.9513, 0.1), | ||
("approval-voting", 7.8114, 0.1), | ||
])); | ||
|
||
if messages.is_empty() { | ||
Ok(()) | ||
} else { | ||
eprintln!("{}", messages.join("\n")); | ||
Err("Regressions found".to_string()) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there any TODO left ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No, it shouldn't be.