diff --git a/presets/benchmarks/example.toml b/presets/benchmarks/example.toml index 2457793..0843879 100644 --- a/presets/benchmarks/example.toml +++ b/presets/benchmarks/example.toml @@ -12,6 +12,7 @@ kmax = 1000 set_perc = 100 get_perc = 0 del_perc = 0 +scan_perc = 0 repeat = 1 dist = "incrementp" @@ -20,6 +21,7 @@ timeout = 0.2 set_perc = 50 get_perc = 50 del_perc = 0 +scan_perc = 0 dist = "zipfian" [[benchmark]] @@ -27,4 +29,5 @@ timeout = 0.2 set_perc = 50 get_perc = 50 del_perc = 0 +scan_perc = 0 dist = "uniform" diff --git a/presets/benchmarks/example_scan.toml b/presets/benchmarks/example_scan.toml new file mode 100644 index 0000000..dd4cc9a --- /dev/null +++ b/presets/benchmarks/example_scan.toml @@ -0,0 +1,34 @@ +[global] +threads = 8 +repeat = 5 +qd = 100 +batch = 10 +scan = 10 +klen = 8 +vlen = 16 +kmin = 0 +kmax = 1000 + +[[benchmark]] +set_perc = 100 +get_perc = 0 +del_perc = 0 +scan_perc = 0 +repeat = 1 +dist = "incrementp" + +[[benchmark]] +timeout = 0.2 +set_perc = 50 +get_perc = 25 +del_perc = 0 +scan_perc = 25 +dist = "zipfian" + +[[benchmark]] +timeout = 0.2 +set_perc = 50 +get_perc = 25 +del_perc = 0 +scan_perc = 25 +dist = "uniform" diff --git a/src/bench.rs b/src/bench.rs index ec8206e..2f586bc 100644 --- a/src/bench.rs +++ b/src/bench.rs @@ -353,6 +353,7 @@ impl Benchmark { /// is missing. For the usage of each option, please refer to [`BenchmarkOpt`]. #[derive(Deserialize, Clone, Debug)] pub struct GlobalOpt { + // benchmark pub threads: Option, pub repeat: Option, pub qd: Option, @@ -360,6 +361,8 @@ pub struct GlobalOpt { pub report: Option, pub latency: Option, pub cdf: Option, + // workload + pub scan: Option, pub klen: Option, pub vlen: Option, pub kmin: Option, @@ -376,6 +379,7 @@ impl Default for GlobalOpt { report: None, latency: None, cdf: None, + scan: None, klen: None, vlen: None, kmin: None, @@ -403,6 +407,12 @@ impl GlobalOpt { .cdf .clone() .or_else(|| Some(self.cdf.clone().unwrap_or(false))); + // the workload options (fall back to defaults) + opt.workload.scan = opt + .workload + .scan + .clone() + .or_else(|| Some(self.scan.clone().unwrap_or(10))); // the workload options (must be specified) opt.workload.klen = opt .workload @@ -765,6 +775,9 @@ fn bench_worker_regular( Operation::Delete { key } => { handle.delete(&key[..]); } + Operation::Scan { key, n } => { + let _ = handle.scan(&key[..], n); + } } let op_end = latency_tick(); if let Some(ref mut l) = latency { @@ -1099,6 +1112,7 @@ mod tests { report = "finish" latency = true cdf = true + scan = 500 klen = 8 vlen = 16 kmin = 100 @@ -1106,9 +1120,10 @@ mod tests { [[benchmark]] timeout = 10.0 - set_perc = 100 - get_perc = 0 - del_perc = 0 + set_perc = 50 + get_perc = 30 + del_perc = 10 + scan_perc = 10 dist = "incrementp" "#; @@ -1116,10 +1131,12 @@ mod tests { assert_eq!(bg.len(), 1); let wopt = WorkloadOpt { - set_perc: 100, - get_perc: 0, - del_perc: 0, + set_perc: 50, + get_perc: 30, + del_perc: 10, + scan_perc: 10, dist: "incrementp".to_string(), + scan: Some(500), klen: Some(8), vlen: Some(16), kmin: Some(100), @@ -1143,6 +1160,57 @@ mod tests { assert_eq!(*bg[0], benchmark) } + #[test] + fn global_options_defaults_are_applied() { + let opt = r#" + [map] + name = "nullmap" + + [[benchmark]] + set_perc = 50 + get_perc = 30 + del_perc = 10 + scan_perc = 10 + klen = 8 + vlen = 16 + kmin = 1 + kmax = 1000 + dist = "shufflep" + "#; + + let (_, bg) = init(opt); + assert_eq!(bg.len(), 1); + + let wopt = WorkloadOpt { + set_perc: 50, + get_perc: 30, + del_perc: 10, + scan_perc: 10, + dist: "shufflep".to_string(), + scan: Some(10), + klen: Some(8), + vlen: Some(16), + kmin: Some(1), + kmax: Some(1000), + zipf_theta: None, + zipf_hotspot: None, + }; + + let benchmark = Benchmark { + threads: 1, + repeat: 1, + qd: 1, + batch: 1, + report: ReportMode::All, + latency: false, + cdf: false, + len: Length::Exhaust, + wopt, + }; + + assert_eq!(*bg[0], benchmark) + } + #[test] #[should_panic(expected = "should be positive")] fn invalid_threads() { @@ -1162,6 +1230,7 @@ mod tests { set_perc = 100 get_perc = 0 del_perc = 0 + scan_perc = 0 dist = "incrementp" "#; @@ -1187,6 +1256,7 @@ mod tests { set_perc = 100 get_perc = 0 del_perc = 0 + scan_perc = 0 dist = "incrementp" "#; @@ -1211,6 +1281,7 @@ mod tests { set_perc = 100 get_perc = 0 del_perc = 0 + scan_perc = 0 dist = "incrementp" report = "alll" "#; @@ -1237,6 +1308,7 @@ mod tests { set_perc = 100 get_perc = 0 del_perc = 0 + scan_perc = 0 dist = "incrementp" "#; @@ -1262,6 +1334,7 @@ mod tests { set_perc = 100 get_perc = 0 del_perc = 0 + scan_perc = 0 dist = "incrementp" "#; @@ -1273,6 +1346,11 @@ mod tests { "/presets/benchmarks/example.toml" )); + const EXAMPLE_SCAN_BENCH: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/presets/benchmarks/example_scan.toml" + )); + fn example(map_opt: &str) { let _ = env_logger::try_init(); let opt = map_opt.to_string() + "\n" + EXAMPLE_BENCH; @@ -1280,6 +1358,13 @@ mod tests { map.bench(&phases); } + fn example_scan(map_opt: &str) { + let _ = env_logger::try_init(); + let opt = map_opt.to_string() + "\n" + EXAMPLE_SCAN_BENCH; + let (map, phases) = init(&opt); + map.bench(&phases); + } + #[test] fn example_null() { const OPT: &str = include_str!(concat!( @@ -1289,6 +1374,15 @@ mod tests { example(OPT); } + #[test] + fn example_scan_null() { + const OPT: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/presets/stores/null.toml" + )); + example_scan(OPT); + } + #[test] fn example_null_async() { const OPT: &str = include_str!(concat!( @@ -1298,6 +1392,15 @@ mod tests { example(OPT); } + #[test] + fn example_scan_null_async() { + const OPT: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/presets/stores/null_async.toml" + )); + example_scan(OPT); + } + #[test] fn example_mutex_hashmap() { const OPT: &str = include_str!(concat!( @@ -1408,6 +1511,21 @@ mod tests { ); example(&opt); } + + #[test] + #[cfg(feature = "rocksdb")] + fn example_scan_rocksdb() { + let tmp_dir = tempfile::tempdir().unwrap(); + let opt = format!( + r#" + [map] + name = "rocksdb" + path = "{}" + "#, + tmp_dir.path().to_str().unwrap().to_string() + ); + example_scan(&opt); + } } // }}} tests diff --git a/src/lib.rs b/src/lib.rs index f687000..4f5d33e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,10 @@ pub trait KVMapHandle { /// Removing a key if it exists. fn delete(&mut self, key: &[u8]); - // fn read_modify_write(&mut self, key: &[u8]); + /// Querying a range starting from the first key greater than or equal to the given key. + /// + /// Note: For simplicity, it returns only the values. + fn scan(&mut self, key: &[u8], n: usize) -> Vec>; } /// A single operation that is applied to the key-value store. @@ -104,6 +107,9 @@ pub enum Operation { /// Removing a key if it exists. Delete { key: Box<[u8]> }, + + /// Querying a range starting from the first key greater than or equal to the given key. + Scan { key: Box<[u8]>, n: usize }, } /// A request sent by a client to a server. diff --git a/src/server.rs b/src/server.rs index 65b35ce..d78bb43 100644 --- a/src/server.rs +++ b/src/server.rs @@ -112,6 +112,11 @@ fn serve_requests_regular( handle.delete(key); assert!(write_response(&mut *writer, id, None).is_ok()); } + Operation::Scan { ref key, n } => { + for v in handle.scan(key, *n) { + assert!(write_response(&mut *writer, id, Some(&v[..])).is_ok()); + } + } } } } diff --git a/src/stores.rs b/src/stores.rs index 8e98fed..81dc798 100644 --- a/src/stores.rs +++ b/src/stores.rs @@ -143,6 +143,70 @@ mod tests { assert_eq!(handle.get(b"foo"), None); } + fn map_test_scan(map: &impl KVMap) { + let mut handle = map.handle(); + for i in 10000..20000usize { + let bytes = i.clone().to_be_bytes(); + handle.set(&bytes, &bytes); + } + + // query 10000 next 10000 + let v = handle.scan(&10000_usize.to_be_bytes(), 10000); + assert_eq!(v.len(), 10000); + for i in 10000..20000usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 10000], bytes); + } + + // query 10000 next 20000, should have 10000 + let v = handle.scan(&10000_usize.to_be_bytes(), 20000); + assert_eq!(v.len(), 10000); + for i in 10000..20000usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 10000], bytes); + } + + // query 10000 next 5, should have 5 + let v = handle.scan(&10000_usize.to_be_bytes(), 5); + assert_eq!(v.len(), 5); + for i in 10000..10005usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 10000], bytes); + } + + // query 13333 next 444, should have 444 + let v = handle.scan(&13333_usize.to_be_bytes(), 444); + assert_eq!(v.len(), 444); + for i in 13333..13777usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 13333], bytes); + } + + // query 13333 next 0, should have 0 + let v = handle.scan(&13333_usize.to_be_bytes(), 0); + assert_eq!(v.len(), 0); + + // query 20000 next 10000, should have 0 + let v = handle.scan(&20000_usize.to_be_bytes(), 10000); + assert_eq!(v.len(), 0); + + // query 0 next 5000, should have 5000 + let v = handle.scan(&0_usize.to_be_bytes(), 5000); + assert_eq!(v.len(), 5000); + for i in 10000..15000usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 10000], bytes); + } + + // query 8000 next 5000, should have 5000 + let v = handle.scan(&8000_usize.to_be_bytes(), 5000); + assert_eq!(v.len(), 5000); + for i in 10000..15000usize { + let bytes = i.clone().to_be_bytes(); + assert_eq!(*v[i - 10000], bytes); + } + } + #[test] fn mutex_btreemap() { let mut map = btreemap::MutexBTreeMap::new(); @@ -227,4 +291,15 @@ mod tests { let mut map = rocksdb::RocksDB::new(&opt); map_test(&mut map); } + + #[test] + #[cfg(feature = "rocksdb")] + fn rocksdb_scan() { + let tmp_dir = tempfile::tempdir().unwrap(); + let opt = rocksdb::RocksDBOpt { + path: tmp_dir.path().to_str().unwrap().to_string(), + }; + let mut map = rocksdb::RocksDB::new(&opt); + map_test_scan(&mut map); + } } diff --git a/src/stores/btreemap.rs b/src/stores/btreemap.rs index d709f2c..bd3c6f1 100644 --- a/src/stores/btreemap.rs +++ b/src/stores/btreemap.rs @@ -60,6 +60,11 @@ impl KVMapHandle for MutexBTreeMap { fn delete(&mut self, key: &[u8]) { self.0.lock().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + // technically iteration is supported but querying a specific range is not a stable feature + unimplemented!("Range query is not supported"); + } } inventory::submit! { @@ -102,6 +107,11 @@ impl KVMapHandle for RwLockBTreeMap { fn delete(&mut self, key: &[u8]) { self.0.write().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + // technically iteration is supported but querying a specific range is not a stable feature + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/chashmap.rs b/src/stores/chashmap.rs index 7280b7e..1db689e 100644 --- a/src/stores/chashmap.rs +++ b/src/stores/chashmap.rs @@ -45,6 +45,10 @@ impl KVMapHandle for CHashMap { fn delete(&mut self, key: &[u8]) { self.0.remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/contrie.rs b/src/stores/contrie.rs index be37bb6..45bdc3e 100644 --- a/src/stores/contrie.rs +++ b/src/stores/contrie.rs @@ -46,6 +46,10 @@ impl KVMapHandle for Contrie { fn delete(&mut self, key: &[u8]) { self.0.remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/dashmap.rs b/src/stores/dashmap.rs index ffa9940..7ed0e6c 100644 --- a/src/stores/dashmap.rs +++ b/src/stores/dashmap.rs @@ -46,6 +46,10 @@ impl KVMapHandle for DashMap { fn delete(&mut self, key: &[u8]) { self.0.remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/flurry.rs b/src/stores/flurry.rs index 3b918e8..9a4efdb 100644 --- a/src/stores/flurry.rs +++ b/src/stores/flurry.rs @@ -46,6 +46,10 @@ impl KVMapHandle for Flurry { fn delete(&mut self, key: &[u8]) { self.0.pin().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/hashmap.rs b/src/stores/hashmap.rs index 1719fde..cfd4c30 100644 --- a/src/stores/hashmap.rs +++ b/src/stores/hashmap.rs @@ -99,6 +99,10 @@ impl KVMapHandle for MutexHashMap { let sid = shard(key, self.nr_shards); self.shards[sid].lock().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { @@ -161,6 +165,10 @@ impl KVMapHandle for RwLockHashMap { let sid = shard(key, self.nr_shards); self.shards[sid].write().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/null.rs b/src/stores/null.rs index 2c150d6..a6deec2 100644 --- a/src/stores/null.rs +++ b/src/stores/null.rs @@ -54,6 +54,10 @@ impl KVMapHandle for NullMap { } fn delete(&mut self, _key: &[u8]) {} + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + Vec::new() + } } inventory::submit! { diff --git a/src/stores/papaya.rs b/src/stores/papaya.rs index e08ae2e..fe3e0e5 100644 --- a/src/stores/papaya.rs +++ b/src/stores/papaya.rs @@ -46,6 +46,10 @@ impl KVMapHandle for Papaya { fn delete(&mut self, key: &[u8]) { self.0.pin().remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/stores/rocksdb.rs b/src/stores/rocksdb.rs index c658686..81f7eab 100644 --- a/src/stores/rocksdb.rs +++ b/src/stores/rocksdb.rs @@ -12,7 +12,7 @@ use crate::stores::{BenchKVMap, Registry}; use crate::*; -use rocksdb::DB; +use rocksdb::{Direction, IteratorMode, DB}; use serde::Deserialize; #[derive(Deserialize)] @@ -59,6 +59,22 @@ impl KVMapHandle for RocksDB { fn delete(&mut self, key: &[u8]) { assert!(self.db.delete(key).is_ok()); } + + fn scan(&mut self, key: &[u8], n: usize) -> Vec> { + let mut kv = Vec::with_capacity(n); + let iter = self + .db + .iterator(IteratorMode::From(key, Direction::Forward)); + let mut i = 0; + for item in iter { + if i == n { + break; + } + kv.push(item.unwrap().1); + i += 1; + } + kv + } } inventory::submit! { diff --git a/src/stores/scc.rs b/src/stores/scc.rs index 15e5493..1ba0a3e 100644 --- a/src/stores/scc.rs +++ b/src/stores/scc.rs @@ -49,6 +49,10 @@ impl KVMapHandle for SccHashMap { fn delete(&mut self, key: &[u8]) { self.0.remove(key); } + + fn scan(&mut self, _key: &[u8], _n: usize) -> Vec> { + unimplemented!("Range query is not supported"); + } } inventory::submit! { diff --git a/src/workload.rs b/src/workload.rs index a4746e2..36306f4 100644 --- a/src/workload.rs +++ b/src/workload.rs @@ -16,6 +16,7 @@ enum OperationType { Set, Get, Delete, + Scan, } /// Mix defines the percentages of operations, it consists of multiple supported operations @@ -27,8 +28,8 @@ struct Mix { } impl Mix { - fn new(set: u8, get: u8, delete: u8) -> Self { - let dist = WeightedIndex::new(&[set, get, delete]).unwrap(); + fn new(set: u8, get: u8, delete: u8, scan: u8) -> Self { + let dist = WeightedIndex::new(&[set, get, delete, scan]).unwrap(); Self { dist } } @@ -37,6 +38,7 @@ impl Mix { OperationType::Set, OperationType::Get, OperationType::Delete, + OperationType::Scan, ]; ops[self.dist.sample(rng)].clone() } @@ -149,7 +151,9 @@ impl KeyGenerator { /// A set of workload parameters that can be deserialized from a TOML string. /// /// This struct is used for interacting with workload configuration files and also create new -/// [`Workload`] instances. +/// [`Workload`] instances. Some options are wrapped in an `Option` type to ease writing +/// configuration files. If users would like to create a [`Workload`] instance directly using these +/// options, all fields must be present. #[derive(Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadOpt { /// Percentage of `SET` operations. @@ -161,6 +165,12 @@ pub struct WorkloadOpt { /// Percentage of `DELETE` operations. pub del_perc: u8, + /// Percentage of `SCAN` operations. + pub scan_perc: u8, + + /// The number of iterations per `SCAN` (only used when `scan_perc` is non-zero, default 10). + pub scan: Option, + /// Key length in bytes. pub klen: Option, @@ -200,15 +210,16 @@ pub struct WorkloadOpt { /// The minimal unit of workload context with its access pattern (mix and key generator). /// /// The values generated internally are fixed-sized only for now, similar to the keys. To -/// pressurize the -/// memory allocator, it might be a good idea to randomly adding a byte or two at each generated -/// values. +/// pressurize the memory allocator, it might be a good idea to randomly adding a byte or two at +/// each generated values. #[derive(Debug)] pub struct Workload { /// Percentage of different operations mix: Mix, /// Key generator based on distribution kgen: KeyGenerator, + /// Scan length + scan: usize, /// Value length for operations that need a value vlen: usize, /// How many operations have been access so far @@ -219,14 +230,16 @@ impl Workload { pub fn new(opt: &WorkloadOpt, thread_info: Option<(usize, usize)>) -> Self { // input sanity checks assert_eq!( - opt.set_perc + opt.get_perc + opt.del_perc, + opt.set_perc + opt.get_perc + opt.del_perc + opt.scan_perc, 100, "sum of ops in a mix should be 100" ); + let scan = opt.scan.expect("scan should be specified"); let klen = opt.klen.expect("klen should be specified"); let vlen = opt.vlen.expect("vlen should be specified"); let kmin = opt.kmin.expect("kmin should be specified"); let kmax = opt.kmax.expect("kmax should be specified"); + assert!(scan > 0, "scan size should be positive"); assert!(klen > 0, "klen should be positive"); assert!(kmax > kmin, "kmax should be greater than kmin"); @@ -243,7 +256,7 @@ impl Workload { (kminp, kmaxp) }; - let mix = Mix::new(opt.set_perc, opt.get_perc, opt.del_perc); + let mix = Mix::new(opt.set_perc, opt.get_perc, opt.del_perc, opt.scan_perc); let kgen = match opt.dist.as_str() { "increment" => KeyGenerator::new_increment(klen, kmin, kmax), "incrementp" => { @@ -273,6 +286,7 @@ impl Workload { Self { mix, kgen, + scan, vlen, count: 0, } @@ -302,6 +316,7 @@ impl Workload { } OperationType::Get => Operation::Get { key }, OperationType::Delete => Operation::Delete { key }, + OperationType::Scan => Operation::Scan { key, n: self.scan }, } } @@ -323,24 +338,28 @@ mod tests { #[test] fn mix_one_type_only() { let mut rng = rand::thread_rng(); - let mix = Mix::new(100, 0, 0); + let mix = Mix::new(100, 0, 0, 0); for _ in 0..100 { assert!(matches!(mix.next(&mut rng), OperationType::Set)); } - let mix = Mix::new(0, 100, 0); + let mix = Mix::new(0, 100, 0, 0); for _ in 0..100 { assert!(matches!(mix.next(&mut rng), OperationType::Get)); } - let mix = Mix::new(0, 0, 100); + let mix = Mix::new(0, 0, 100, 0); for _ in 0..100 { assert!(matches!(mix.next(&mut rng), OperationType::Delete)); } + let mix = Mix::new(0, 0, 0, 100); + for _ in 0..100 { + assert!(matches!(mix.next(&mut rng), OperationType::Scan)); + } } #[test] fn mix_small_write() { let mut rng = rand::thread_rng(); - let mix = Mix::new(5, 95, 0); + let mix = Mix::new(5, 95, 0, 0); let mut set = 0; #[allow(unused)] let mut get = 0; @@ -349,6 +368,7 @@ mod tests { OperationType::Set => set += 1, OperationType::Get => get += 1, OperationType::Delete => unreachable!(), + OperationType::Scan => unreachable!(), }; } assert!(set < 65000 && set > 35000); @@ -494,7 +514,9 @@ mod tests { fn workload_toml_correct() { let s = r#"set_perc = 70 get_perc = 20 - del_perc = 10 + del_perc = 5 + scan_perc = 5 + scan = 10 klen = 4 vlen = 6 dist = "uniform" @@ -509,7 +531,9 @@ mod tests { let s = r#"set_perc = 70 get_perc = 20 - del_perc = 10 + del_perc = 5 + scan_perc = 5 + scan = 10 klen = 40 vlen = 60 dist = "zipfian" @@ -526,7 +550,9 @@ mod tests { let s = r#"set_perc = 60 get_perc = 25 - del_perc = 15 + del_perc = 10 + scan_perc = 5 + scan = 10 klen = 14 vlen = 16 dist = "shuffle" @@ -546,6 +572,8 @@ mod tests { let s = r#"set_perc = 60 get_perc = 40 del_perc = 0 + scan_perc = 0 + scan = 10 klen = 0 vlen = 6 dist = "uniform" @@ -554,12 +582,30 @@ mod tests { let _ = Workload::new_from_toml_str(s, None); } + #[test] + #[should_panic(expected = "should be positive")] + fn workload_toml_invalid_wrong_scan() { + let s = r#"set_perc = 60 + get_perc = 40 + del_perc = 0 + scan_perc = 0 + scan = 0 + klen = 2 + vlen = 6 + dist = "uniform" + kmin = 0 + kmax = 12345"#; + let _ = Workload::new_from_toml_str(s, None); + } + #[test] #[should_panic(expected = "should be specified")] fn workload_toml_invalid_missing_fields() { let s = r#"set_perc = 60 get_perc = 40 del_perc = 0 + scan_perc = 0 + scan = 10 dist = "uniform" kmin = 0 kmax = 12345"#; @@ -572,6 +618,8 @@ mod tests { let s = r#"set_perc = 60 get_perc = 40 del_perc = 0 + scan_perc = 0 + scan = 10 klen = 4 vlen = 6 dist = "uniform" @@ -586,6 +634,8 @@ mod tests { let s = r#"set_perc = 70 get_perc = 40 del_perc = 0 + scan_perc = 0 + scan = 10 klen = 4 vlen = 6 dist = "uniform" @@ -600,6 +650,8 @@ mod tests { let s = r#"set_perc = 70 get_perc = 30 del_perc = 0 + scan_perc = 0 + scan = 10 klen = 4 vlen = 6 dist = "uniorm" @@ -614,6 +666,8 @@ mod tests { set_perc: 50, get_perc: 50, del_perc: 0, + scan_perc: 0, + scan: Some(10), klen: Some(16), vlen: Some(100), dist: "incrementp".to_string(), @@ -648,6 +702,8 @@ mod tests { set_perc: 100, get_perc: 0, del_perc: 0, + scan_perc: 0, + scan: Some(10), klen: Some(16), vlen: Some(100), dist: "incrementp".to_string(), @@ -685,6 +741,8 @@ mod tests { set_perc: 5, get_perc: 95, del_perc: 0, + scan_perc: 0, + scan: Some(10), klen: Some(16), vlen: Some(100), dist: "latest".to_string(), @@ -730,6 +788,8 @@ mod tests { set_perc: 50, get_perc: 50, del_perc: 0, + scan_perc: 0, + scan: Some(10), klen: Some(16), vlen: Some(100), dist: "uniform".to_string(), @@ -758,7 +818,7 @@ mod tests { dist.entry(key).and_modify(|c| *c += 1).or_insert(0); get += 1; } - Operation::Delete { .. } => { + Operation::Delete { .. } | Operation::Scan { .. } => { unreachable!(); } }