mirror of
https://github.com/fluencelabs/aquavm
synced 2024-12-04 23:20:18 +00:00
AquaVM benchmarks (#283)
* Fix stale benchmarks * Data (de)serialization and execution benchmarks: Two kind of benchmark: relatively short, but with huge call results, and long trace of small call results. Moreover, there are two case for each: with same data to be merged with comparison, and data from different par branches merged w/o comparison.
This commit is contained in:
parent
c3cea695c8
commit
0eb37800b6
4
.github/workflows/aquavm.yml
vendored
4
.github/workflows/aquavm.yml
vendored
@ -49,7 +49,9 @@ jobs:
|
||||
- name: "cargo test"
|
||||
run: |
|
||||
cargo test --release
|
||||
# The memory sanitizer on cargo test has false positive even on empty project.
|
||||
# Check that it does compile
|
||||
cargo bench --no-run
|
||||
# The `memory` sanitizer on cargo test has false positive even on empty project.
|
||||
for san in address leak; do
|
||||
RUSTFLAGS="-Z sanitizer=$san" cargo test --features test_with_native_code --target x86_64-unknown-linux-gnu
|
||||
done
|
||||
|
@ -48,7 +48,8 @@ wasm-bindgen = "=0.2.65"
|
||||
air-test-utils = { path = "../crates/air-lib/test-utils" }
|
||||
fluence-app-service = "0.17.2"
|
||||
|
||||
criterion = "0.3.3"
|
||||
# the feature just silence a warning in the criterion 0.3.x.
|
||||
criterion = { version = "0.3.3", features = ["html_reports"] }
|
||||
csv = "1.1.5"
|
||||
once_cell = "1.4.1"
|
||||
env_logger = "0.7.1"
|
||||
@ -66,3 +67,11 @@ harness = false
|
||||
[[bench]]
|
||||
name = "create_service_benchmark"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "data_big_benchmark"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "data_long_benchmark"
|
||||
harness = false
|
||||
|
@ -15,7 +15,8 @@ thread_local!(static SCRIPT: String = String::from(
|
||||
);
|
||||
|
||||
fn current_peer_id_call() -> Result<RawAVMOutcome, String> {
|
||||
VM.with(|vm| SCRIPT.with(|script| vm.borrow_mut().call(script, "", "", "")))
|
||||
let run_parameters = TestRunParameters::new("test_peer_id", 0, 1);
|
||||
VM.with(|vm| SCRIPT.with(|script| vm.borrow_mut().call(script, "", "", run_parameters)))
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
|
@ -41,24 +41,27 @@ fn chat_sent_message_benchmark() -> Result<RawAVMOutcome, String> {
|
||||
)
|
||||
"#;
|
||||
|
||||
let result = CLIENT_1_VM.with(|vm| vm.borrow_mut().call(script, "", "", "")).unwrap();
|
||||
let run_parameters = TestRunParameters::new("A", 0, 1);
|
||||
let result = CLIENT_1_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", "", run_parameters.clone()))
|
||||
.unwrap();
|
||||
let result = RELAY_1_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, run_parameters.clone()))
|
||||
.unwrap();
|
||||
let result = REMOTE_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, run_parameters.clone()))
|
||||
.unwrap();
|
||||
let res_data = result.data.clone();
|
||||
let res1 = RELAY_1_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", res_data, ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", res_data, run_parameters.clone()))
|
||||
.unwrap();
|
||||
CLIENT_1_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", res1.data, ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", res1.data, run_parameters.clone()))
|
||||
.unwrap();
|
||||
let res2 = RELAY_2_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", result.data, run_parameters.clone()))
|
||||
.unwrap();
|
||||
CLIENT_2_VM.with(|vm| vm.borrow_mut().call(script, "", res2.data, ""))
|
||||
CLIENT_2_VM.with(|vm| vm.borrow_mut().call(script, "", res2.data, run_parameters.clone()))
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
|
@ -76,10 +76,12 @@ fn create_service_benchmark() -> Result<RawAVMOutcome, String> {
|
||||
)
|
||||
)"#;
|
||||
|
||||
let run_parameters1 = TestRunParameters::new("set_variables", 0, 1);
|
||||
let run_parameters2 = run_parameters1.clone();
|
||||
let result = SET_VARIABLES_VM
|
||||
.with(|vm| vm.borrow_mut().call(script, "", "", ""))
|
||||
.with(|vm| vm.borrow_mut().call(script, "", "", run_parameters1))
|
||||
.unwrap();
|
||||
VM.with(|vm| vm.borrow_mut().call(script, "", result.data, ""))
|
||||
VM.with(|vm| vm.borrow_mut().call(script, "", result.data, run_parameters2))
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
|
1
air/benches/data/anomaly_big.json
Normal file
1
air/benches/data/anomaly_big.json
Normal file
File diff suppressed because one or more lines are too long
1
air/benches/data/anomaly_long.json
Normal file
1
air/benches/data/anomaly_long.json
Normal file
File diff suppressed because one or more lines are too long
71
air/benches/data_big_benchmark.rs
Normal file
71
air/benches/data_big_benchmark.rs
Normal file
@ -0,0 +1,71 @@
|
||||
use air_test_utils::prelude::*;
|
||||
|
||||
use criterion::criterion_group;
|
||||
use criterion::criterion_main;
|
||||
use criterion::Criterion;
|
||||
use serde_json::Value;
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
thread_local!(static VM: RefCell<TestRunner> = RefCell::new(create_avm(unit_call_service(), "test_peer_id")));
|
||||
const SCRIPT: &str = r#"
|
||||
(par
|
||||
(seq
|
||||
(seq
|
||||
(call "one_peer_id" ("service1" "call1") [] res1)
|
||||
(call "two_peer_id" ("service2" "call2") [] res2))
|
||||
(seq
|
||||
(call "three_peer_id" ("service3" "call3") [] res3)
|
||||
(call "test_peer_id" ("service" "call") [res1 res2 res3])))
|
||||
(seq
|
||||
(seq
|
||||
(call "one_peer_id" ("service1" "call1") [] res1)
|
||||
(call "two_peer_id" ("service2" "call2") [] res2))
|
||||
(seq
|
||||
(call "three_peer_id" ("service3" "call3") [] res3)
|
||||
(call "test_peer_id" ("service" "call") [res1 res2 res3]))))"#;
|
||||
|
||||
// this is the data with smaller number of huge values; it contains only calls and
|
||||
// is to be modified in different ways.
|
||||
const VALUES_DATA: &str = include_str!("data/anomaly_big.json");
|
||||
|
||||
fn data_big_calls(prev_data: &str, current_data: &str) -> Result<RawAVMOutcome, String> {
|
||||
let run_parameters = TestRunParameters::new("test_peer_id", 0, 1);
|
||||
VM.with(|vm| vm.borrow_mut().call(SCRIPT, prev_data, current_data, run_parameters))
|
||||
}
|
||||
|
||||
fn build_par_data(data: &mut Value, hangs_left: bool) {
|
||||
let trace = data.get_mut("trace").unwrap().as_array_mut().unwrap();
|
||||
let trace_len = trace.len();
|
||||
let par = if hangs_left {
|
||||
json!({"par": [trace_len, 0]})
|
||||
} else {
|
||||
json!({"par": [0, trace_len]})
|
||||
};
|
||||
trace.insert(0, par);
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
let mut data_right = serde_json::from_str::<Value>(VALUES_DATA).unwrap();
|
||||
let mut data_left = data_right.clone();
|
||||
|
||||
build_par_data(&mut data_right, false);
|
||||
build_par_data(&mut data_left, true);
|
||||
|
||||
let json_data_right = serde_json::to_string(&data_right).unwrap();
|
||||
std::mem::drop(data_right);
|
||||
let json_data_left = serde_json::to_string(&data_left).unwrap();
|
||||
std::mem::drop(data_left);
|
||||
|
||||
// the traces contain different par branches
|
||||
c.bench_function("data_big_calls wo merge", |b| {
|
||||
b.iter(|| data_big_calls(&json_data_right, &json_data_left).unwrap())
|
||||
});
|
||||
// the traces contain same par branch and are merged
|
||||
c.bench_function("data_big_calls with merge", |b| {
|
||||
b.iter(|| data_big_calls(&json_data_right, &json_data_right).unwrap())
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
56
air/benches/data_long_benchmark.rs
Normal file
56
air/benches/data_long_benchmark.rs
Normal file
@ -0,0 +1,56 @@
|
||||
use air_test_utils::prelude::*;
|
||||
|
||||
use criterion::criterion_group;
|
||||
use criterion::criterion_main;
|
||||
use criterion::Criterion;
|
||||
use serde_json::Value;
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
thread_local!(static VM: RefCell<TestRunner> = RefCell::new(create_avm(unit_call_service(), "test_peer_id")));
|
||||
// effectively, we measure just loading time
|
||||
const SCRIPT: &str = "(par (null) (null))";
|
||||
|
||||
// this is the data with large number of smaller values
|
||||
const VALUES_DATA: &str = include_str!("data/anomaly_long.json");
|
||||
|
||||
fn data_long_calls(prev_data: &str, current_data: &str) -> Result<RawAVMOutcome, String> {
|
||||
let run_parameters = TestRunParameters::new("test_peer_id", 0, 1);
|
||||
VM.with(|vm| vm.borrow_mut().call(SCRIPT, prev_data, current_data, run_parameters))
|
||||
}
|
||||
|
||||
fn build_par_data(data: &mut Value, hangs_left: bool) {
|
||||
let trace = data.get_mut("trace").unwrap().as_array_mut().unwrap();
|
||||
let trace_len = trace.len();
|
||||
let par = if hangs_left {
|
||||
json!({"par": [trace_len, 0]})
|
||||
} else {
|
||||
json!({"par": [0, trace_len]})
|
||||
};
|
||||
trace.insert(0, par);
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
let mut data_right = serde_json::from_str::<Value>(VALUES_DATA).unwrap();
|
||||
let mut data_left = data_right.clone();
|
||||
|
||||
build_par_data(&mut data_right, false);
|
||||
build_par_data(&mut data_left, true);
|
||||
|
||||
let json_data_right = serde_json::to_string(&data_right).unwrap();
|
||||
std::mem::drop(data_right);
|
||||
let json_data_left = serde_json::to_string(&data_left).unwrap();
|
||||
std::mem::drop(data_left);
|
||||
|
||||
// the traces contain different par branches
|
||||
c.bench_function("data_long_calls wo merge", |b| {
|
||||
b.iter(|| data_long_calls(&json_data_right, &json_data_left).unwrap())
|
||||
});
|
||||
// the traces contain same par branch and arge merged
|
||||
c.bench_function("data_long_calls with merge", |b| {
|
||||
b.iter(|| data_long_calls(&json_data_right, &json_data_right).unwrap())
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
@ -122,11 +122,11 @@ fn parse_deep(c: &mut Criterion) {
|
||||
.map(|(i, code)| (i, code.len()))
|
||||
.collect();
|
||||
|
||||
c.bench_function_over_inputs(
|
||||
"parse generated script",
|
||||
move |b, (i, _)| {
|
||||
let mut group = c.benchmark_group("parse generated script");
|
||||
for (i, _) in index {
|
||||
group.bench_function(i.to_string(), |b| {
|
||||
let parser = parser.clone();
|
||||
let code = &source_code[*i];
|
||||
let code = &source_code[i];
|
||||
b.iter(move || {
|
||||
let mut validator = VariableValidator::new();
|
||||
let lexer = AIRLexer::new(code);
|
||||
@ -136,9 +136,8 @@ fn parse_deep(c: &mut Criterion) {
|
||||
.parse("", &mut Vec::new(), &mut validator, lexer)
|
||||
.expect("success")
|
||||
});
|
||||
},
|
||||
index,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_dashboard_script(c: &mut Criterion) {
|
||||
@ -170,7 +169,5 @@ criterion_group!(
|
||||
parse_to_fail,
|
||||
parse_dashboard_script,
|
||||
parse_deep,
|
||||
clone_parser,
|
||||
clone_parser_rc,
|
||||
);
|
||||
criterion_main!(parser);
|
||||
|
Loading…
Reference in New Issue
Block a user