Our Personal Data Server from scratch! tranquil.farm
pds rust database fun oauth atproto
221
fork

Configure Feed

Select the types of activity you want to include in your feed.

test(tranquil-pds): mst fuzz + repo integrity properties

Lewis: May this revision serve well! <lu5a@proton.me>

authored by lu5a.myatproto.social and committed by

Tangled bc8fd66a 180de299

+682
+334
crates/tranquil-pds/tests/mst_integrity.rs
··· 1 + mod common; 2 + mod firehose; 3 + mod helpers; 4 + 5 + use std::collections::BTreeMap; 6 + use std::io::Cursor; 7 + use std::sync::Arc; 8 + use std::time::Duration; 9 + 10 + use bytes::Bytes; 11 + use cid::Cid; 12 + use common::*; 13 + use firehose::FirehoseConsumer; 14 + use helpers::build_car_with_signature; 15 + use iroh_car::CarReader; 16 + use jacquard_repo::commit::Commit; 17 + use jacquard_repo::mst::Mst; 18 + use jacquard_repo::storage::{BlockStore, MemoryBlockStore}; 19 + use k256::ecdsa::SigningKey; 20 + use reqwest::StatusCode; 21 + use serde_json::{Value, json}; 22 + use tranquil_db_traits::{EventBlocks, RepoEventType, SequenceNumber, SequencedEvent}; 23 + use tranquil_scopes::RepoAction; 24 + use tranquil_types::Did; 25 + 26 + async fn car_to_blocks(car_bytes: &[u8]) -> (Vec<Cid>, BTreeMap<Cid, Bytes>) { 27 + let mut reader = CarReader::new(Cursor::new(car_bytes)) 28 + .await 29 + .expect("parse CAR"); 30 + let roots = reader.header().roots().to_vec(); 31 + let mut blocks = BTreeMap::new(); 32 + while let Ok(Some((cid, data))) = reader.next_block().await { 33 + blocks.insert(cid, Bytes::from(data)); 34 + } 35 + (roots, blocks) 36 + } 37 + 38 + async fn create_post(client: &reqwest::Client, token: &str, did: &str, rkey: &str, text: &str) { 39 + let now = chrono::Utc::now().to_rfc3339(); 40 + let res = client 41 + .post(format!( 42 + "{}/xrpc/com.atproto.repo.createRecord", 43 + base_url().await 44 + )) 45 + .bearer_auth(token) 46 + .json(&json!({ 47 + "repo": did, 48 + "collection": "app.bsky.feed.post", 49 + "rkey": rkey, 50 + "record": { 51 + "$type": "app.bsky.feed.post", 52 + "text": text, 53 + "createdAt": now, 54 + } 55 + })) 56 + .send() 57 + .await 58 + .expect("createRecord"); 59 + assert_eq!(res.status(), StatusCode::OK); 60 + } 61 + 62 + #[tokio::test] 63 + async fn getrepo_car_roundtrips_mst_structure_and_records() { 64 + let client = client(); 65 + let (token, did) = create_account_and_login(&client).await; 66 + 67 + let expected_records: Vec<(String, String)> = (0..20) 68 + .map(|i| { 69 + let rkey = format!("3krtp{:08}", i); 70 + let text = format!("roundtrip record {i}"); 71 + (rkey, text) 72 + }) 73 + .collect(); 74 + for (rkey, text) in &expected_records { 75 + create_post(&client, &token, &did, rkey, text).await; 76 + } 77 + 78 + let res = client 79 + .get(format!( 80 + "{}/xrpc/com.atproto.sync.getRepo", 81 + base_url().await 82 + )) 83 + .query(&[("did", did.as_str())]) 84 + .send() 85 + .await 86 + .expect("getRepo"); 87 + assert_eq!(res.status(), StatusCode::OK); 88 + let car_bytes = res.bytes().await.unwrap(); 89 + 90 + let (roots, block_map) = car_to_blocks(&car_bytes).await; 91 + assert_eq!(roots.len(), 1, "CAR must have exactly one root"); 92 + let commit_cid = roots[0]; 93 + let storage = Arc::new(MemoryBlockStore::new_from_blocks(block_map)); 94 + 95 + let commit_bytes = storage 96 + .get(&commit_cid) 97 + .await 98 + .unwrap() 99 + .expect("CAR contains commit block"); 100 + let commit = Commit::from_cbor(&commit_bytes).expect("parse commit"); 101 + let data_cid = *commit.data(); 102 + 103 + let mst = Mst::load(storage.clone(), data_cid, None); 104 + let loaded_root = mst.get_pointer().await.expect("load root"); 105 + assert_eq!(loaded_root, data_cid, "loaded MST pointer == commit.data()"); 106 + 107 + for (rkey, _) in &expected_records { 108 + let path = format!("app.bsky.feed.post/{rkey}"); 109 + let leaf = mst 110 + .get(&path) 111 + .await 112 + .expect("mst.get") 113 + .unwrap_or_else(|| panic!("record {path} missing from exported MST")); 114 + let leaf_bytes = storage 115 + .get(&leaf) 116 + .await 117 + .unwrap() 118 + .unwrap_or_else(|| panic!("record block {leaf} missing from CAR")); 119 + assert!(!leaf_bytes.is_empty(), "record bytes empty"); 120 + } 121 + } 122 + 123 + #[tokio::test] 124 + async fn concurrent_swap_commit_writes_serialize() { 125 + let client = client(); 126 + let (token, did) = create_account_and_login(&client).await; 127 + 128 + create_post(&client, &token, &did, "3kswap00000001", "anchor").await; 129 + 130 + let latest_res = client 131 + .get(format!( 132 + "{}/xrpc/com.atproto.sync.getLatestCommit", 133 + base_url().await 134 + )) 135 + .query(&[("did", did.as_str())]) 136 + .send() 137 + .await 138 + .expect("getLatestCommit"); 139 + assert_eq!(latest_res.status(), StatusCode::OK); 140 + let latest: Value = latest_res.json().await.unwrap(); 141 + let swap_cid = latest["cid"].as_str().unwrap().to_string(); 142 + 143 + let now = chrono::Utc::now().to_rfc3339(); 144 + let payload_a = json!({ 145 + "repo": did, 146 + "collection": "app.bsky.feed.post", 147 + "rkey": "3kswap00000002", 148 + "record": { 149 + "$type": "app.bsky.feed.post", 150 + "text": "writer A", 151 + "createdAt": now, 152 + }, 153 + "swapCommit": swap_cid, 154 + }); 155 + let payload_b = json!({ 156 + "repo": did, 157 + "collection": "app.bsky.feed.post", 158 + "rkey": "3kswap00000003", 159 + "record": { 160 + "$type": "app.bsky.feed.post", 161 + "text": "writer B", 162 + "createdAt": now, 163 + }, 164 + "swapCommit": swap_cid, 165 + }); 166 + 167 + let base = base_url().await; 168 + let (res_a, res_b) = tokio::join!( 169 + client 170 + .post(format!("{base}/xrpc/com.atproto.repo.putRecord")) 171 + .bearer_auth(&token) 172 + .json(&payload_a) 173 + .send(), 174 + client 175 + .post(format!("{base}/xrpc/com.atproto.repo.putRecord")) 176 + .bearer_auth(&token) 177 + .json(&payload_b) 178 + .send(), 179 + ); 180 + let status_a = res_a.expect("A send").status(); 181 + let status_b = res_b.expect("B send").status(); 182 + 183 + let ok_a = status_a == StatusCode::OK; 184 + let ok_b = status_b == StatusCode::OK; 185 + assert!( 186 + ok_a ^ ok_b, 187 + "exactly one swap_commit write must succeed: status_a={status_a}, status_b={status_b}" 188 + ); 189 + } 190 + 191 + #[tokio::test] 192 + async fn imported_repo_emits_commit_event_with_valid_car() { 193 + let client = client(); 194 + let (token, did) = create_account_and_login(&client).await; 195 + 196 + let signing_key = SigningKey::random(&mut rand::thread_rng()); 197 + let (car_bytes, _car_root_cid) = build_car_with_signature(&did, &signing_key); 198 + 199 + let import_res = client 200 + .post(format!( 201 + "{}/xrpc/com.atproto.repo.importRepo", 202 + base_url().await 203 + )) 204 + .bearer_auth(&token) 205 + .header("Content-Type", "application/vnd.ipld.car") 206 + .body(car_bytes) 207 + .send() 208 + .await 209 + .expect("importRepo"); 210 + assert_eq!( 211 + import_res.status(), 212 + StatusCode::OK, 213 + "import failed: {:?}", 214 + import_res.text().await.unwrap_or_default() 215 + ); 216 + 217 + let repos = get_test_repos().await; 218 + let typed_did = Did::new(did.clone()).unwrap(); 219 + let events = repos 220 + .repo 221 + .get_events_since_seq(SequenceNumber::ZERO, None) 222 + .await 223 + .expect("events"); 224 + let our: Vec<&SequencedEvent> = events 225 + .iter() 226 + .filter(|e| e.did == typed_did && e.event_type == RepoEventType::Commit) 227 + .collect(); 228 + let last = our.last().expect("at least one commit event after import"); 229 + 230 + let inline = match last.blocks.as_ref().expect("blocks present") { 231 + EventBlocks::Inline(v) => v, 232 + _ => panic!("expected inline blocks"), 233 + }; 234 + assert!( 235 + !inline.is_empty(), 236 + "import event inline blocks must not be empty" 237 + ); 238 + 239 + let have_commit = inline.iter().any(|b| { 240 + let cid = Cid::read_bytes(b.cid_bytes.as_slice()).unwrap(); 241 + last.commit_cid 242 + .as_ref() 243 + .and_then(|c| c.to_cid()) 244 + .map(|commit_cid| cid == commit_cid) 245 + .unwrap_or(false) 246 + }); 247 + assert!(have_commit, "import event CAR must include commit block"); 248 + } 249 + 250 + #[tokio::test] 251 + async fn firehose_commit_block_bytes_roundtrip_to_same_cid() { 252 + let client = client(); 253 + let (token, did) = create_account_and_login(&client).await; 254 + 255 + let repos = get_test_repos().await; 256 + let cursor = repos.repo.get_max_seq().await.unwrap().as_i64(); 257 + let consumer = FirehoseConsumer::connect_with_cursor(app_port(), cursor).await; 258 + tokio::time::sleep(Duration::from_millis(100)).await; 259 + 260 + create_post(&client, &token, &did, "3krt001", "round-trip me").await; 261 + let frames = consumer 262 + .wait_for_commits(&did, 1, Duration::from_secs(10)) 263 + .await; 264 + let frame = frames.last().expect("frame"); 265 + 266 + let (_, block_map) = car_to_blocks(&frame.blocks).await; 267 + use sha2::{Digest, Sha256}; 268 + for (cid, bytes) in &block_map { 269 + let mut hasher = Sha256::new(); 270 + hasher.update(bytes); 271 + let hash = hasher.finalize(); 272 + let mh = multihash::Multihash::wrap(0x12, hash.as_slice()).expect("wrap"); 273 + let recomputed = Cid::new_v1(cid.codec(), mh); 274 + assert_eq!( 275 + recomputed, *cid, 276 + "CAR block {cid} bytes do not hash back to same CID" 277 + ); 278 + } 279 + } 280 + 281 + #[tokio::test] 282 + async fn firehose_commit_car_contains_new_record_bytes_for_every_create() { 283 + let client = client(); 284 + let (token, did) = create_account_and_login(&client).await; 285 + 286 + let repos = get_test_repos().await; 287 + let cursor = repos.repo.get_max_seq().await.unwrap().as_i64(); 288 + let consumer = FirehoseConsumer::connect_with_cursor(app_port(), cursor).await; 289 + tokio::time::sleep(Duration::from_millis(100)).await; 290 + 291 + let now = chrono::Utc::now().to_rfc3339(); 292 + let writes: Vec<Value> = (0..8) 293 + .map(|i| { 294 + json!({ 295 + "$type": "com.atproto.repo.applyWrites#create", 296 + "collection": "app.bsky.feed.post", 297 + "rkey": format!("3krec{:08}", i), 298 + "value": { 299 + "$type": "app.bsky.feed.post", 300 + "text": format!("rec {i}"), 301 + "createdAt": now, 302 + } 303 + }) 304 + }) 305 + .collect(); 306 + let res = client 307 + .post(format!( 308 + "{}/xrpc/com.atproto.repo.applyWrites", 309 + base_url().await 310 + )) 311 + .bearer_auth(&token) 312 + .json(&json!({ "repo": did, "writes": writes })) 313 + .send() 314 + .await 315 + .expect("applyWrites"); 316 + assert_eq!(res.status(), StatusCode::OK); 317 + 318 + let frames = consumer 319 + .wait_for_commits(&did, 1, Duration::from_secs(10)) 320 + .await; 321 + let frame = frames.last().expect("frame"); 322 + 323 + let (_, block_map) = car_to_blocks(&frame.blocks).await; 324 + for op in &frame.ops { 325 + if op.action == RepoAction::Create { 326 + let cid = op.cid.expect("create cid"); 327 + assert!( 328 + block_map.contains_key(&cid), 329 + "record CID {cid} for path {} missing from CAR", 330 + op.path 331 + ); 332 + } 333 + } 334 + }
+348
crates/tranquil-pds/tests/mst_property_fuzz.rs
··· 1 + mod common; 2 + mod mst_verify; 3 + 4 + use std::collections::HashMap; 5 + use std::str::FromStr; 6 + 7 + use cid::Cid; 8 + use common::*; 9 + use jacquard_common::smol_str::SmolStr; 10 + use jacquard_repo::commit::Commit; 11 + use jacquard_repo::mst::{Mst, VerifiedWriteOp}; 12 + use jacquard_repo::storage::BlockStore; 13 + use mst_verify::{extract_event_blocks, inline_to_store}; 14 + use rand::rngs::StdRng; 15 + use rand::{Rng, SeedableRng}; 16 + use reqwest::StatusCode; 17 + use serde_json::{Value, json}; 18 + use tranquil_db_traits::{RepoEventType, SequenceNumber, SequencedEvent}; 19 + use tranquil_types::Did; 20 + 21 + const COLLECTIONS: &[&str] = &[ 22 + "app.bsky.feed.post", 23 + "app.bsky.feed.like", 24 + "app.bsky.graph.follow", 25 + "app.bsky.feed.repost", 26 + ]; 27 + 28 + #[derive(Copy, Clone, Debug)] 29 + enum FuzzOp { 30 + Create, 31 + Update, 32 + Delete, 33 + } 34 + 35 + fn pick_op(rng: &mut StdRng, have_keys: bool) -> FuzzOp { 36 + match (have_keys, rng.gen_range(0..10)) { 37 + (false, _) => FuzzOp::Create, 38 + (_, 0..=5) => FuzzOp::Create, 39 + (_, 6..=7) => FuzzOp::Update, 40 + _ => FuzzOp::Delete, 41 + } 42 + } 43 + 44 + fn random_rkey(rng: &mut StdRng) -> String { 45 + let tid_char_pool = b"234567abcdefghijklmnopqrstuvwxyz"; 46 + let mut out = Vec::with_capacity(13); 47 + (0..13).for_each(|_| { 48 + let c = tid_char_pool[rng.gen_range(0..tid_char_pool.len())]; 49 + out.push(c); 50 + }); 51 + String::from_utf8(out).unwrap() 52 + } 53 + 54 + fn random_collection(rng: &mut StdRng) -> &'static str { 55 + COLLECTIONS[rng.gen_range(0..COLLECTIONS.len())] 56 + } 57 + 58 + fn record_for_collection(col: &str, text: &str, now: &str) -> Value { 59 + match col { 60 + "app.bsky.feed.post" | "app.bsky.feed.repost" | "app.bsky.feed.like" => json!({ 61 + "$type": col, 62 + "text": text, 63 + "createdAt": now, 64 + }), 65 + _ => json!({ 66 + "$type": col, 67 + "subject": format!("did:plc:synthetic{text}"), 68 + "createdAt": now, 69 + }), 70 + } 71 + } 72 + 73 + async fn verify_commit_forward_and_inverse(event: &SequencedEvent) -> Result<(), String> { 74 + let prev_data = event 75 + .prev_data_cid 76 + .as_ref() 77 + .and_then(|c| c.to_cid()) 78 + .ok_or("no prev_data_cid")?; 79 + let commit_cid = event 80 + .commit_cid 81 + .as_ref() 82 + .and_then(|c| c.to_cid()) 83 + .ok_or("no commit_cid")?; 84 + let inline = extract_event_blocks(event)?; 85 + let ops = event 86 + .ops 87 + .as_ref() 88 + .and_then(|v| v.as_array()) 89 + .ok_or("ops not array")?; 90 + 91 + let storage = inline_to_store(inline); 92 + let commit_bytes = storage 93 + .get(&commit_cid) 94 + .await 95 + .map_err(|e| format!("get commit: {e:?}"))? 96 + .ok_or("missing commit block")?; 97 + let commit = Commit::from_cbor(&commit_bytes).map_err(|e| format!("parse commit: {e:?}"))?; 98 + let new_data = *commit.data(); 99 + 100 + let mut forward = Mst::load(storage.clone(), prev_data, None); 101 + for op in ops { 102 + let action = op["action"].as_str().ok_or("op.action")?; 103 + let path = op["path"].as_str().ok_or("op.path")?; 104 + match action { 105 + "create" | "update" => { 106 + let cid = Cid::from_str(op["cid"].as_str().ok_or("op.cid")?) 107 + .map_err(|e| format!("{e:?}"))?; 108 + forward = forward 109 + .add(path, cid) 110 + .await 111 + .map_err(|e| format!("fwd add {path}: {e:?}"))?; 112 + } 113 + "delete" => { 114 + forward = forward 115 + .delete(path) 116 + .await 117 + .map_err(|e| format!("fwd delete {path}: {e:?}"))?; 118 + } 119 + other => return Err(format!("unknown action {other}")), 120 + } 121 + } 122 + let got = forward 123 + .persist() 124 + .await 125 + .map_err(|e| format!("persist: {e:?}"))?; 126 + if got != new_data { 127 + return Err(format!("forward root mismatch exp={new_data} got={got}")); 128 + } 129 + 130 + let mut inverse = Mst::load(storage, new_data, None); 131 + for op in ops { 132 + let action = op["action"].as_str().ok_or("op.action")?; 133 + let path = op["path"].as_str().ok_or("op.path")?; 134 + let key = SmolStr::new(path); 135 + let verified = match action { 136 + "create" => { 137 + let cid = Cid::from_str(op["cid"].as_str().ok_or("op.cid")?) 138 + .map_err(|e| format!("{e:?}"))?; 139 + VerifiedWriteOp::Create { key, cid } 140 + } 141 + "update" => { 142 + let cid = Cid::from_str(op["cid"].as_str().ok_or("op.cid")?) 143 + .map_err(|e| format!("{e:?}"))?; 144 + let prev = Cid::from_str(op["prev"].as_str().ok_or("op.prev")?) 145 + .map_err(|e| format!("{e:?}"))?; 146 + VerifiedWriteOp::Update { key, cid, prev } 147 + } 148 + "delete" => { 149 + let prev = Cid::from_str(op["prev"].as_str().ok_or("op.prev")?) 150 + .map_err(|e| format!("{e:?}"))?; 151 + VerifiedWriteOp::Delete { key, prev } 152 + } 153 + other => return Err(format!("unknown action {other}")), 154 + }; 155 + let inverted = inverse 156 + .invert_op(verified.clone()) 157 + .await 158 + .map_err(|e| format!("invert {verified:?}: {e:?}"))?; 159 + if !inverted { 160 + return Err(format!("op not invertible: {verified:?}")); 161 + } 162 + } 163 + let got_prev = inverse 164 + .get_pointer() 165 + .await 166 + .map_err(|e| format!("get_pointer: {e:?}"))?; 167 + if got_prev != prev_data { 168 + return Err(format!( 169 + "inverse root mismatch exp={prev_data} got={got_prev}" 170 + )); 171 + } 172 + Ok(()) 173 + } 174 + 175 + async fn fuzz_run_with_seed(seed: u64, steps: usize) -> Vec<String> { 176 + let client = client(); 177 + let (token, did) = create_account_and_login(&client).await; 178 + let mut rng = StdRng::seed_from_u64(seed); 179 + 180 + let mut live_keys: HashMap<String, String> = HashMap::new(); 181 + 182 + for step in 0..steps { 183 + let now = chrono::Utc::now().to_rfc3339(); 184 + let op = pick_op(&mut rng, !live_keys.is_empty()); 185 + match op { 186 + FuzzOp::Create => { 187 + let col = random_collection(&mut rng); 188 + let rkey = random_rkey(&mut rng); 189 + let path = format!("{col}/{rkey}"); 190 + if live_keys.contains_key(&path) { 191 + continue; 192 + } 193 + let record = record_for_collection(col, &format!("s{seed}-n{step}"), &now); 194 + let res = client 195 + .post(format!( 196 + "{}/xrpc/com.atproto.repo.createRecord", 197 + base_url().await 198 + )) 199 + .bearer_auth(&token) 200 + .json(&json!({ 201 + "repo": did, 202 + "collection": col, 203 + "rkey": rkey, 204 + "record": record, 205 + })) 206 + .send() 207 + .await 208 + .expect("createRecord"); 209 + if res.status() == StatusCode::OK { 210 + live_keys.insert(path, col.to_string()); 211 + } 212 + } 213 + FuzzOp::Update => { 214 + let keys: Vec<&String> = live_keys.keys().collect(); 215 + if keys.is_empty() { 216 + continue; 217 + } 218 + let path = keys[rng.gen_range(0..keys.len())].clone(); 219 + let col = live_keys.get(&path).unwrap().clone(); 220 + let rkey = path.split('/').nth(1).unwrap().to_string(); 221 + let record = record_for_collection(&col, &format!("s{seed}-u{step}"), &now); 222 + let res = client 223 + .post(format!( 224 + "{}/xrpc/com.atproto.repo.putRecord", 225 + base_url().await 226 + )) 227 + .bearer_auth(&token) 228 + .json(&json!({ 229 + "repo": did, 230 + "collection": col, 231 + "rkey": rkey, 232 + "record": record, 233 + })) 234 + .send() 235 + .await 236 + .expect("putRecord"); 237 + assert_eq!(res.status(), StatusCode::OK, "putRecord failed"); 238 + } 239 + FuzzOp::Delete => { 240 + let keys: Vec<String> = live_keys.keys().cloned().collect(); 241 + if keys.is_empty() { 242 + continue; 243 + } 244 + let path = keys[rng.gen_range(0..keys.len())].clone(); 245 + let col = live_keys.get(&path).unwrap().clone(); 246 + let rkey = path.split('/').nth(1).unwrap().to_string(); 247 + let res = client 248 + .post(format!( 249 + "{}/xrpc/com.atproto.repo.deleteRecord", 250 + base_url().await 251 + )) 252 + .bearer_auth(&token) 253 + .json(&json!({ 254 + "repo": did, 255 + "collection": col, 256 + "rkey": rkey, 257 + })) 258 + .send() 259 + .await 260 + .expect("deleteRecord"); 261 + if res.status() == StatusCode::OK { 262 + live_keys.remove(&path); 263 + } 264 + } 265 + } 266 + } 267 + 268 + let repos = get_test_repos().await; 269 + let typed_did = Did::new(did.clone()).unwrap(); 270 + let events = repos 271 + .repo 272 + .get_events_since_seq(SequenceNumber::ZERO, None) 273 + .await 274 + .expect("get_events_since_seq"); 275 + 276 + let our: Vec<SequencedEvent> = events 277 + .into_iter() 278 + .filter(|e| { 279 + e.did == typed_did 280 + && e.event_type == RepoEventType::Commit 281 + && e.prev_data_cid.is_some() 282 + && e.ops 283 + .as_ref() 284 + .and_then(|v| v.as_array()) 285 + .is_some_and(|a| !a.is_empty()) 286 + }) 287 + .collect(); 288 + 289 + let mut failures = Vec::new(); 290 + for event in &our { 291 + if let Err(msg) = verify_commit_forward_and_inverse(event).await { 292 + failures.push(format!( 293 + "seed={seed} seq={} ops={:?}: {msg}", 294 + event.seq.as_i64(), 295 + event 296 + .ops 297 + .as_ref() 298 + .and_then(|v| v.as_array()) 299 + .map(|a| a.len()) 300 + )); 301 + } 302 + } 303 + failures 304 + } 305 + 306 + #[tokio::test] 307 + async fn mst_property_fuzz_seed_1() { 308 + let failures = fuzz_run_with_seed(1, 150).await; 309 + assert!( 310 + failures.is_empty(), 311 + "fuzz seed=1 found {} invalid commits:\n - {}", 312 + failures.len(), 313 + failures.join("\n - ") 314 + ); 315 + } 316 + 317 + #[tokio::test] 318 + async fn mst_property_fuzz_seed_42() { 319 + let failures = fuzz_run_with_seed(42, 150).await; 320 + assert!( 321 + failures.is_empty(), 322 + "fuzz seed=42 found {} invalid commits:\n - {}", 323 + failures.len(), 324 + failures.join("\n - ") 325 + ); 326 + } 327 + 328 + #[tokio::test] 329 + async fn mst_property_fuzz_seed_9001() { 330 + let failures = fuzz_run_with_seed(9001, 150).await; 331 + assert!( 332 + failures.is_empty(), 333 + "fuzz seed=9001 found {} invalid commits:\n - {}", 334 + failures.len(), 335 + failures.join("\n - ") 336 + ); 337 + } 338 + 339 + #[tokio::test] 340 + async fn mst_property_fuzz_deep_tree_seed_7() { 341 + let failures = fuzz_run_with_seed(7, 400).await; 342 + assert!( 343 + failures.is_empty(), 344 + "fuzz deep seed=7 found {} invalid commits:\n - {}", 345 + failures.len(), 346 + failures.join("\n - ") 347 + ); 348 + }