Our Personal Data Server from scratch! tranquil.farm
pds rust database fun oauth atproto
213
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(tranquil-store): soak harness driving leak gate, signal tweaks

Lewis: May this revision serve well! <lu5a@proton.me>

+511 -45
+1
.config/nextest.toml
··· 45 45 retries = 0 46 46 fail-fast = false 47 47 test-threads = 1 48 + slow-timeout = { period = "5m", terminate-after = 1000 } 48 49 49 50 [test-groups] 50 51 serial-env-tests = { max-threads = 1 }
+3 -3
crates/tranquil-api/src/admin/signal.rs
··· 76 76 let result = tokio::select! { 77 77 biased; 78 78 _ = shutdown.cancelled() => { 79 - tracing::info!("Signal linking aborted due to server shutdown"); 79 + tracing::info!("server shutting down, aborting signal linking"); 80 80 return; 81 81 } 82 82 r = link_result.completion => r, ··· 84 84 match result { 85 85 Ok(Ok(client)) => { 86 86 if slot_for_task.complete_link(generation, client).await { 87 - tracing::info!("Signal device linked successfully"); 87 + tracing::info!("signal device linked"); 88 88 } else { 89 89 tracing::warn!( 90 - "Signal link completed but generation mismatch or already linked; discarding" 90 + "discarding completed signal link, generation mismatch or already linked" 91 91 ); 92 92 } 93 93 }
+1 -1
crates/tranquil-api/src/notification_prefs.rs
··· 282 282 "Invalid Telegram username. Must be 5-32 characters, alphanumeric or underscore".into(), 283 283 ), 284 284 CommsChannel::Signal => ApiError::InvalidRequest( 285 - "Invalid Signal username. Must be 3-32 characters followed by .XX (e.g. username.01)".into(), 285 + "Invalid Signal username. Must be a 3-32 character nickname, a dot, then a 2-20 digit discriminator".into(), 286 286 ), 287 287 CommsChannel::Email => ApiError::InvalidEmail, 288 288 });
+1 -1
crates/tranquil-comms/src/sender.rs
··· 299 299 "type": 1, 300 300 "options": [{ 301 301 "name": "handle", 302 - "description": "Your PDS handle (e.g. alice.example.com)", 302 + "description": "Your PDS handle", 303 303 "type": 3, 304 304 "required": false 305 305 }]
+2 -3
crates/tranquil-config/src/lib.rs
··· 394 394 395 395 #[derive(Debug, Config)] 396 396 pub struct ServerConfig { 397 - /// Public hostname of the PDS (e.g. `pds.example.com`). 397 + /// Public hostname of the PDS, such as `pds.example.com`. 398 398 #[config(env = "PDS_HOSTNAME")] 399 399 pub hostname: String, 400 400 ··· 463 463 format!("https://{}", self.hostname) 464 464 } 465 465 466 - /// Hostname without port suffix (e.g. `pds.example.com` from 467 - /// `pds.example.com:443`). 466 + /// Hostname without port suffix. Returns `pds.example.com` from `pds.example.com:443`. 468 467 pub fn hostname_without_port(&self) -> &str { 469 468 self.hostname.split(':').next().unwrap_or(&self.hostname) 470 469 }
+6 -1
crates/tranquil-pds/tests/security_fixes.rs
··· 88 88 assert!(is_valid_signal_username("bob_smith.99")); 89 89 assert!(is_valid_signal_username("user123.42")); 90 90 assert!(is_valid_signal_username("lu1.01")); 91 - assert!(is_valid_signal_username("abc.00")); 92 91 assert!(is_valid_signal_username("a_very_long_username_here.55")); 92 + assert!(is_valid_signal_username("alice.123")); 93 + assert!(is_valid_signal_username("alice.999999999")); 94 + assert!(is_valid_signal_username("alice.18446744073709551615")); 93 95 94 96 assert!(!is_valid_signal_username("alice")); 95 97 assert!(!is_valid_signal_username("alice.1")); 96 98 assert!(!is_valid_signal_username("alice.001")); 99 + assert!(!is_valid_signal_username("abc.00")); 100 + assert!(!is_valid_signal_username("alice.0")); 101 + assert!(!is_valid_signal_username("alice.999999999999999999999")); 97 102 assert!(!is_valid_signal_username(".01")); 98 103 assert!(!is_valid_signal_username("ab.01")); 99 104 assert!(!is_valid_signal_username(""));
+18 -9
crates/tranquil-signal/src/client.rs
··· 33 33 pub fn parse(username: &str) -> Result<Self, InvalidSignalUsername> { 34 34 let reject = || Err(InvalidSignalUsername(username.to_string())); 35 35 36 - if username.len() < 6 || username.len() > 35 { 37 - return reject(); 38 - } 39 - 40 36 let Some((base, discriminator)) = username.rsplit_once('.') else { 41 37 return reject(); 42 38 }; 43 39 44 - if base.len() < 3 || base.len() > 32 { 40 + if !matches!(base.len(), 3..=32) { 45 41 return reject(); 46 42 } 47 43 ··· 53 49 return reject(); 54 50 } 55 51 56 - if discriminator.len() != 2 || !discriminator.chars().all(|c| c.is_ascii_digit()) { 52 + if !is_valid_discriminator(discriminator) { 57 53 return reject(); 58 54 } 59 55 ··· 63 59 pub fn as_str(&self) -> &str { 64 60 &self.0 65 61 } 62 + } 63 + 64 + fn is_valid_discriminator(s: &str) -> bool { 65 + if !s.chars().all(|c| c.is_ascii_digit()) { 66 + return false; 67 + } 68 + if !matches!(s.len(), 2..=20) { 69 + return false; 70 + } 71 + if s.len() > 2 && s.starts_with('0') { 72 + return false; 73 + } 74 + s.parse::<u64>().is_ok_and(|n| n != 0) 66 75 } 67 76 68 77 impl fmt::Display for SignalUsername { ··· 115 124 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 116 125 write!( 117 126 f, 118 - "message body too long: {} bytes (max {})", 127 + "message body is {} bytes, max {}", 119 128 self.len, self.max 120 129 ) 121 130 } ··· 368 377 let req = tokio::select! { 369 378 biased; 370 379 _ = shutdown.cancelled() => { 371 - tracing::info!("signal worker shutting down (cancellation)"); 380 + tracing::info!("signal worker cancelled, shutting down"); 372 381 break; 373 382 } 374 383 msg = rx.recv() => match msg { 375 384 Some(r) => r, 376 385 None => { 377 - tracing::info!("signal worker shutting down (channel closed)"); 386 + tracing::info!("signal worker channel closed, shutting down"); 378 387 break; 379 388 } 380 389 },
+1 -1
crates/tranquil-signal/src/fjall_store.rs
··· 1050 1050 .and_then(|v| match <[u8; 32]>::try_from(v.as_ref()) { 1051 1051 Ok(arr) => Some(ProfileKey { bytes: arr }), 1052 1052 Err(_) => { 1053 - warn!(%uuid, len = v.len(), "corrupted profile key (expected 32 bytes)"); 1053 + warn!(%uuid, len = v.len(), "corrupted profile key, expected 32 bytes"); 1054 1054 None 1055 1055 } 1056 1056 }))
+1 -1
crates/tranquil-signal/src/store.rs
··· 1117 1117 Some(r) => match <[u8; 32]>::try_from(r.key.as_slice()) { 1118 1118 Ok(arr) => Some(ProfileKey { bytes: arr }), 1119 1119 Err(_) => { 1120 - warn!(%uuid, len = r.key.len(), "corrupted profile key (expected 32 bytes)"); 1120 + warn!(%uuid, len = r.key.len(), "corrupted profile key, expected 32 bytes"); 1121 1121 None 1122 1122 } 1123 1123 },
+4 -6
crates/tranquil-store/src/gauntlet/leak.rs
··· 166 166 #[test] 167 167 fn flat_metrics_no_violation() { 168 168 let cfg = LeakGateConfig::short_for_tests(); 169 - let series: Vec<MetricsSample> = (0..20) 170 - .map(|i| sample(60_000 + i * 60_000, GIB)) 171 - .collect(); 169 + let series: Vec<MetricsSample> = 170 + (0..20).map(|i| sample(60_000 + i * 60_000, GIB)).collect(); 172 171 assert!(evaluate(&series, cfg).is_empty()); 173 172 } 174 173 ··· 221 220 #[test] 222 221 fn missing_metric_samples_skipped() { 223 222 let cfg = LeakGateConfig::short_for_tests(); 224 - let mut series: Vec<MetricsSample> = (0..10) 225 - .map(|i| sample(60_000 + i * 60_000, GIB)) 226 - .collect(); 223 + let mut series: Vec<MetricsSample> = 224 + (0..10).map(|i| sample(60_000 + i * 60_000, GIB)).collect(); 227 225 series[3].rss_bytes = None; 228 226 series[7].rss_bytes = None; 229 227 assert!(evaluate(&series, cfg).is_empty());
+9
crates/tranquil-store/src/gauntlet/mod.rs
··· 1 1 pub mod farm; 2 2 pub mod invariants; 3 + pub mod leak; 4 + pub mod metrics; 3 5 pub mod op; 4 6 pub mod oracle; 5 7 pub mod overrides; ··· 7 9 pub mod runner; 8 10 pub mod scenarios; 9 11 pub mod shrink; 12 + pub mod soak; 10 13 pub mod workload; 11 14 12 15 pub use invariants::{ 13 16 EventLogSnapshot, Invariant, InvariantSet, InvariantViolation, SnapshotEvent, invariants_for, 14 17 }; 18 + pub use leak::{LeakGateBuildError, LeakGateConfig, LeakViolation, evaluate as evaluate_leak_gate}; 19 + pub use metrics::{MetricName, MetricsSample, sample_harness}; 15 20 pub use op::{ 16 21 CollectionName, DidSeed, EventKind, Op, OpStream, PayloadSeed, RecordKey, RetentionSecs, Seed, 17 22 ValueSeed, ··· 26 31 }; 27 32 pub use scenarios::{Scenario, UnknownScenario, config_for}; 28 33 pub use shrink::{ShrinkOutcome, shrink_failure}; 34 + pub use soak::{ 35 + DEFAULT_CHUNK_OPS, DEFAULT_SAMPLE_INTERVAL_MS, InvariantViolationRecord, SoakConfig, SoakError, 36 + SoakEvent, SoakReport, run_soak, 37 + }; 29 38 pub use workload::{ 30 39 ByteRange, DidSpaceSize, KeySpaceSize, OpCount, OpWeights, RetentionMaxSecs, SizeDistribution, 31 40 ValueBytes, WorkloadModel,
+4 -4
crates/tranquil-store/src/gauntlet/regression.rs
··· 97 97 f.sync_all()?; 98 98 } 99 99 std::fs::rename(&tmp, &path)?; 100 - if let Some(parent) = path.parent() { 101 - if let Ok(dir) = std::fs::File::open(parent) { 102 - let _ = dir.sync_all(); 103 - } 100 + if let Some(parent) = path.parent() 101 + && let Ok(dir) = std::fs::File::open(parent) 102 + { 103 + let _ = dir.sync_all(); 104 104 } 105 105 Ok(path) 106 106 }
+9 -9
crates/tranquil-store/src/gauntlet/runner.rs
··· 126 126 } 127 127 128 128 #[derive(Debug, thiserror::Error)] 129 - enum OpError { 129 + pub(super) enum OpError { 130 130 #[error("mst add: {0}")] 131 131 MstAdd(String), 132 132 #[error("mst delete: {0}")] ··· 249 249 } 250 250 } 251 251 252 - fn segments_subdir(root: &Path) -> PathBuf { 252 + pub(super) fn segments_subdir(root: &Path) -> PathBuf { 253 253 root.join("segments") 254 254 } 255 255 ··· 373 373 } 374 374 } 375 375 376 - fn open_eventlog<S: StorageIO + Send + Sync + 'static>( 376 + pub(super) fn open_eventlog<S: StorageIO + Send + Sync + 'static>( 377 377 io: S, 378 378 segments_dir: PathBuf, 379 379 max_segment_size: u64, ··· 586 586 } 587 587 } 588 588 589 - fn eventlog_snapshot<S: StorageIO + Send + Sync + 'static>( 589 + pub(super) fn eventlog_snapshot<S: StorageIO + Send + Sync + 'static>( 590 590 state: Option<&EventLogState<S>>, 591 591 ) -> Option<EventLogSnapshot> { 592 592 let s = state?; ··· 746 746 } 747 747 } 748 748 749 - async fn run_invariants<S: StorageIO + Send + Sync + 'static>( 749 + pub(super) async fn run_invariants<S: StorageIO + Send + Sync + 'static>( 750 750 store: &Arc<TranquilBlockStore<S>>, 751 751 oracle: &Oracle, 752 752 root: Option<Cid>, ··· 828 828 )) 829 829 } 830 830 831 - async fn refresh_oracle_graph<S: StorageIO + Send + Sync + 'static>( 831 + pub(super) async fn refresh_oracle_graph<S: StorageIO + Send + Sync + 'static>( 832 832 store: &Arc<TranquilBlockStore<S>>, 833 833 oracle: &mut Oracle, 834 834 root: Option<Cid>, ··· 889 889 } 890 890 } 891 891 892 - fn blockstore_config(dir: &std::path::Path, s: &StoreConfig) -> BlockStoreConfig { 892 + pub(super) fn blockstore_config(dir: &std::path::Path, s: &StoreConfig) -> BlockStoreConfig { 893 893 BlockStoreConfig { 894 894 data_dir: dir.join("data"), 895 895 index_dir: dir.join("index"), ··· 954 954 DidHash::from_did(&format!("did:plc:gauntlet{:08x}", seed.0)) 955 955 } 956 956 957 - async fn apply_op<S: StorageIO + Send + Sync + 'static>( 957 + pub(super) async fn apply_op<S: StorageIO + Send + Sync + 'static>( 958 958 harness: &mut Harness<S>, 959 959 root: &mut Option<Cid>, 960 960 oracle: &mut Oracle, ··· 1202 1202 ) -> Result<Vec<CidBytes>, OpError> { 1203 1203 removed_mst_blocks 1204 1204 .into_iter() 1205 - .chain(removed_cids.into_iter()) 1205 + .chain(removed_cids) 1206 1206 .map(|c| try_cid_to_fixed(&c)) 1207 1207 .collect::<Result<_, _>>() 1208 1208 .map_err(OpError::from)
+326
crates/tranquil-store/src/gauntlet/soak.rs
··· 1 + use std::io::{self, Write}; 2 + use std::path::PathBuf; 3 + use std::sync::Arc; 4 + use std::time::{Duration, Instant}; 5 + 6 + use cid::Cid; 7 + use serde::{Deserialize, Serialize}; 8 + use tracing::warn; 9 + 10 + use super::invariants::{InvariantSet, InvariantViolation}; 11 + use super::leak::{LeakGateConfig, LeakViolation, evaluate as evaluate_leak_gate}; 12 + use super::metrics::{MetricsSample, sample_harness}; 13 + use super::op::{OpStream, Seed}; 14 + use super::oracle::Oracle; 15 + use super::runner::{ 16 + EventLogState, GauntletConfig, Harness, IoBackend, apply_op, blockstore_config, 17 + eventlog_snapshot, open_eventlog, refresh_oracle_graph, run_invariants, segments_subdir, 18 + }; 19 + use super::workload::OpCount; 20 + use crate::blockstore::TranquilBlockStore; 21 + use crate::io::{RealIO, StorageIO}; 22 + 23 + const OP_ERROR_LOG_THROTTLE: u64 = 1024; 24 + 25 + pub const DEFAULT_CHUNK_OPS: usize = 5_000; 26 + pub const DEFAULT_SAMPLE_INTERVAL_MS: u64 = 60_000; 27 + 28 + #[derive(Debug, Clone)] 29 + pub struct SoakConfig { 30 + pub gauntlet: GauntletConfig, 31 + pub total_duration: Duration, 32 + pub sample_interval: Duration, 33 + pub chunk_ops: usize, 34 + pub leak_gate: LeakGateConfig, 35 + } 36 + 37 + impl SoakConfig { 38 + pub fn new(gauntlet: GauntletConfig, total_duration: Duration) -> Self { 39 + Self { 40 + gauntlet, 41 + total_duration, 42 + sample_interval: Duration::from_millis(DEFAULT_SAMPLE_INTERVAL_MS), 43 + chunk_ops: DEFAULT_CHUNK_OPS, 44 + leak_gate: LeakGateConfig::standard(), 45 + } 46 + } 47 + } 48 + 49 + #[derive(Debug, thiserror::Error)] 50 + pub enum SoakError { 51 + #[error(transparent)] 52 + Io(#[from] io::Error), 53 + #[error("soak requires IoBackend::Real; scenario configured Simulated")] 54 + SimulatedBackendRejected, 55 + #[error("open block store: {0}")] 56 + StoreOpen(String), 57 + #[error("open event log: {0}")] 58 + EventLogOpen(String), 59 + } 60 + 61 + #[derive(Debug, Clone, Serialize, Deserialize)] 62 + pub struct SoakReport { 63 + pub seed: Seed, 64 + pub ops_executed: u64, 65 + pub op_errors: u64, 66 + pub chunks: u64, 67 + pub samples: Vec<MetricsSample>, 68 + pub invariant_violations: Vec<InvariantViolationRecord>, 69 + pub leak_violations: Vec<LeakViolation>, 70 + pub total_wall_ms: u64, 71 + } 72 + 73 + #[derive(Debug, Clone, Serialize, Deserialize)] 74 + pub struct InvariantViolationRecord { 75 + pub invariant: String, 76 + pub detail: String, 77 + } 78 + 79 + impl SoakReport { 80 + pub fn is_clean(&self) -> bool { 81 + self.invariant_violations.is_empty() && self.leak_violations.is_empty() 82 + } 83 + } 84 + 85 + #[derive(Debug, Serialize, Deserialize)] 86 + #[serde(tag = "type")] 87 + pub enum SoakEvent { 88 + #[serde(rename = "sample")] 89 + Sample { 90 + seed: u64, 91 + chunk: u64, 92 + ops_executed: u64, 93 + sample: MetricsSample, 94 + }, 95 + #[serde(rename = "invariant_violation")] 96 + Invariant { 97 + seed: u64, 98 + invariant: String, 99 + detail: String, 100 + }, 101 + #[serde(rename = "summary")] 102 + Summary { 103 + seed: u64, 104 + total_wall_ms: u64, 105 + ops_executed: u64, 106 + op_errors: u64, 107 + chunks: u64, 108 + clean: bool, 109 + invariant_violations: usize, 110 + leak_violations: Vec<LeakViolation>, 111 + }, 112 + } 113 + 114 + pub async fn run_soak<W: Write + Send>( 115 + cfg: SoakConfig, 116 + mut emitter: W, 117 + ) -> Result<SoakReport, SoakError> { 118 + if !matches!(cfg.gauntlet.io, IoBackend::Real) { 119 + return Err(SoakError::SimulatedBackendRejected); 120 + } 121 + let dir = tempfile::TempDir::new()?; 122 + let store_cfg = blockstore_config(dir.path(), &cfg.gauntlet.store); 123 + let segments_dir: PathBuf = segments_subdir(dir.path()); 124 + let store = TranquilBlockStore::open(store_cfg) 125 + .map(Arc::new) 126 + .map_err(|e| SoakError::StoreOpen(e.to_string()))?; 127 + let eventlog: Option<EventLogState<RealIO>> = match cfg.gauntlet.eventlog { 128 + None => None, 129 + Some(ec) => Some( 130 + open_eventlog(RealIO::new(), segments_dir, ec.max_segment_size.0) 131 + .map_err(|e| SoakError::EventLogOpen(e.to_string()))?, 132 + ), 133 + }; 134 + let mut harness = Harness { store, eventlog }; 135 + let outcome = drive_soak(&mut harness, &cfg, &mut emitter).await; 136 + shutdown_harness(&mut harness); 137 + outcome 138 + } 139 + 140 + fn shutdown_harness<S: StorageIO + Send + Sync + 'static>(harness: &mut Harness<S>) { 141 + if let Some(el) = harness.eventlog.as_mut() { 142 + if let Err(e) = el.writer.shutdown() { 143 + warn!(error = %e, "soak: event log writer shutdown failed"); 144 + } 145 + el.manager.shutdown(); 146 + } 147 + } 148 + 149 + async fn drive_soak<S, W>( 150 + harness: &mut Harness<S>, 151 + cfg: &SoakConfig, 152 + emitter: &mut W, 153 + ) -> Result<SoakReport, SoakError> 154 + where 155 + S: StorageIO + Send + Sync + 'static, 156 + W: Write + Send, 157 + { 158 + let mut oracle = Oracle::new(); 159 + let mut root: Option<Cid> = None; 160 + 161 + let start = Instant::now(); 162 + let mut samples: Vec<MetricsSample> = Vec::new(); 163 + let mut invariant_records: Vec<InvariantViolationRecord> = Vec::new(); 164 + let mut ops_executed: u64 = 0; 165 + let mut op_errors: u64 = 0; 166 + let mut chunks: u64 = 0; 167 + let mut last_sample = start; 168 + let mut next_error_log_at: u64 = 1; 169 + 170 + let initial = sample_harness(harness, Duration::ZERO); 171 + emit_event( 172 + emitter, 173 + &SoakEvent::Sample { 174 + seed: cfg.gauntlet.seed.0, 175 + chunk: 0, 176 + ops_executed: 0, 177 + sample: initial, 178 + }, 179 + )?; 180 + samples.push(initial); 181 + 182 + while start.elapsed() < cfg.total_duration { 183 + let chunk_seed = Seed( 184 + cfg.gauntlet 185 + .seed 186 + .0 187 + .wrapping_add(chunks.wrapping_mul(0x9E37_79B9_7F4A_7C15)), 188 + ); 189 + let stream: OpStream = cfg 190 + .gauntlet 191 + .workload 192 + .generate(chunk_seed, OpCount(cfg.chunk_ops)); 193 + for op in stream.iter() { 194 + if start.elapsed() >= cfg.total_duration { 195 + break; 196 + } 197 + match apply_op(harness, &mut root, &mut oracle, op, &cfg.gauntlet.workload).await { 198 + Ok(()) => { 199 + ops_executed = ops_executed.saturating_add(1); 200 + } 201 + Err(e) => { 202 + op_errors = op_errors.saturating_add(1); 203 + if op_errors >= next_error_log_at { 204 + warn!( 205 + op_errors, 206 + ops_executed, 207 + elapsed_ms = u64::try_from(start.elapsed().as_millis()) 208 + .unwrap_or(u64::MAX), 209 + error = %e, 210 + "soak: op error milestone" 211 + ); 212 + next_error_log_at = next_error_log_at 213 + .saturating_mul(2) 214 + .max(OP_ERROR_LOG_THROTTLE); 215 + } 216 + } 217 + } 218 + if last_sample.elapsed() >= cfg.sample_interval { 219 + let elapsed = start.elapsed(); 220 + let s = sample_harness(harness, elapsed); 221 + emit_event( 222 + emitter, 223 + &SoakEvent::Sample { 224 + seed: cfg.gauntlet.seed.0, 225 + chunk: chunks, 226 + ops_executed, 227 + sample: s, 228 + }, 229 + )?; 230 + samples.push(s); 231 + last_sample = Instant::now(); 232 + } 233 + } 234 + chunks = chunks.saturating_add(1); 235 + tokio::task::yield_now().await; 236 + } 237 + 238 + let final_elapsed = start.elapsed(); 239 + let final_sample = sample_harness(harness, final_elapsed); 240 + emit_event( 241 + emitter, 242 + &SoakEvent::Sample { 243 + seed: cfg.gauntlet.seed.0, 244 + chunk: chunks, 245 + ops_executed, 246 + sample: final_sample, 247 + }, 248 + )?; 249 + samples.push(final_sample); 250 + 251 + let invariants = match refresh_oracle_graph(&harness.store, &mut oracle, root).await { 252 + Ok(()) => { 253 + let snapshot = eventlog_snapshot(harness.eventlog.as_ref()); 254 + let set = cfg 255 + .gauntlet 256 + .invariants 257 + .without(InvariantSet::RESTART_IDEMPOTENT); 258 + run_invariants(&harness.store, &oracle, root, snapshot, set).await 259 + } 260 + Err(e) => vec![InvariantViolation { 261 + invariant: "MstRootDurability", 262 + detail: format!("refresh: {e}"), 263 + }], 264 + }; 265 + for v in invariants.iter() { 266 + let rec = InvariantViolationRecord { 267 + invariant: v.invariant.to_string(), 268 + detail: v.detail.clone(), 269 + }; 270 + emit_event( 271 + emitter, 272 + &SoakEvent::Invariant { 273 + seed: cfg.gauntlet.seed.0, 274 + invariant: rec.invariant.clone(), 275 + detail: rec.detail.clone(), 276 + }, 277 + )?; 278 + invariant_records.push(rec); 279 + } 280 + 281 + let leak_violations = evaluate_leak_gate(&samples, cfg.leak_gate); 282 + let total_wall_ms = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); 283 + let clean = invariant_records.is_empty() && leak_violations.is_empty(); 284 + emit_event( 285 + emitter, 286 + &SoakEvent::Summary { 287 + seed: cfg.gauntlet.seed.0, 288 + total_wall_ms, 289 + ops_executed, 290 + op_errors, 291 + chunks, 292 + clean, 293 + invariant_violations: invariant_records.len(), 294 + leak_violations: leak_violations.clone(), 295 + }, 296 + )?; 297 + 298 + Ok(SoakReport { 299 + seed: cfg.gauntlet.seed, 300 + ops_executed, 301 + op_errors, 302 + chunks, 303 + samples, 304 + invariant_violations: invariant_records, 305 + leak_violations, 306 + total_wall_ms, 307 + }) 308 + } 309 + 310 + fn emit_event<W: Write>(emitter: &mut W, event: &SoakEvent) -> io::Result<()> { 311 + let line = serde_json::to_string(event).map_err(io::Error::other)?; 312 + writeln!(emitter, "{line}")?; 313 + emitter.flush() 314 + } 315 + 316 + #[cfg(test)] 317 + mod tests { 318 + use super::*; 319 + 320 + fn send_sync<T: Send + Sync>() {} 321 + 322 + #[test] 323 + fn soak_error_is_send_sync() { 324 + send_sync::<SoakError>(); 325 + } 326 + }
+119
crates/tranquil-store/tests/gauntlet_soak.rs
··· 1 + use std::io::{self, BufWriter, Write}; 2 + use std::time::Duration; 3 + 4 + use tranquil_store::gauntlet::{ 5 + LeakGateConfig, Scenario, Seed, SoakConfig, SoakReport, config_for, run_soak, 6 + }; 7 + 8 + fn soak_hours() -> Option<f64> { 9 + std::env::var("GAUNTLET_SOAK_HOURS") 10 + .ok() 11 + .and_then(|s| s.parse::<f64>().ok()) 12 + .filter(|h| h.is_finite() && *h > 0.0) 13 + } 14 + 15 + fn soak_sample_interval() -> Duration { 16 + std::env::var("GAUNTLET_SOAK_SAMPLE_SECS") 17 + .ok() 18 + .and_then(|s| s.parse::<u64>().ok()) 19 + .filter(|s| *s > 0) 20 + .map(Duration::from_secs) 21 + .unwrap_or_else(|| Duration::from_secs(60)) 22 + } 23 + 24 + fn emitter_stream() -> Box<dyn Write + Send> { 25 + match std::env::var("GAUNTLET_SOAK_OUTPUT").ok().as_deref() { 26 + Some(path) if !path.is_empty() => { 27 + let f = std::fs::OpenOptions::new() 28 + .create(true) 29 + .append(true) 30 + .open(path) 31 + .expect("open GAUNTLET_SOAK_OUTPUT target"); 32 + Box::new(BufWriter::new(f)) 33 + } 34 + _ => Box::new(BufWriter::new(io::stderr())), 35 + } 36 + } 37 + 38 + fn report_summary(report: &SoakReport) -> String { 39 + let leaks: Vec<String> = report 40 + .leak_violations 41 + .iter() 42 + .map(|v| { 43 + format!( 44 + "{}: {} -> {} ({}% over {}ms window, limit {}%)", 45 + v.metric, 46 + v.start_value, 47 + v.end_value, 48 + v.growth_pct.round() as i64, 49 + v.end_ms - v.start_ms, 50 + v.limit_pct 51 + ) 52 + }) 53 + .collect(); 54 + let invariants: Vec<String> = report 55 + .invariant_violations 56 + .iter() 57 + .map(|v| format!("{}: {}", v.invariant, v.detail)) 58 + .collect(); 59 + format!( 60 + "seed={:016x} ops={} chunks={} errors={} wall_ms={} leaks=[{}] invariants=[{}]", 61 + report.seed.0, 62 + report.ops_executed, 63 + report.chunks, 64 + report.op_errors, 65 + report.total_wall_ms, 66 + leaks.join(" ; "), 67 + invariants.join(" ; "), 68 + ) 69 + } 70 + 71 + #[tokio::test] 72 + async fn soak_short_smoke() { 73 + let cfg = SoakConfig { 74 + gauntlet: config_for(Scenario::SmokePR, Seed(7)), 75 + total_duration: Duration::from_secs(10), 76 + sample_interval: Duration::from_secs(2), 77 + chunk_ops: 200, 78 + leak_gate: LeakGateConfig::try_new(0, 60_000, 1000.0).expect("valid leak gate"), 79 + }; 80 + let mut buf: Vec<u8> = Vec::new(); 81 + let report = run_soak(cfg, &mut buf).await.expect("soak run"); 82 + assert!( 83 + report.samples.len() >= 3, 84 + "expected at least initial + periodic + final samples, got {}", 85 + report.samples.len() 86 + ); 87 + assert!( 88 + report.ops_executed > 0, 89 + "expected ops executed, got {}", 90 + report.ops_executed 91 + ); 92 + let text = String::from_utf8(buf).expect("utf8 ndjson"); 93 + assert!( 94 + text.contains("\"type\":\"summary\""), 95 + "ndjson must include summary line; got {text}" 96 + ); 97 + } 98 + 99 + #[tokio::test] 100 + #[ignore = "configurable via GAUNTLET_SOAK_HOURS; default 24h leak gate (1h warmup, 4h window, 5% limit)"] 101 + async fn soak_long_leak_gate() { 102 + let hours = soak_hours().unwrap_or(24.0); 103 + let total = Duration::from_secs_f64(hours * 3600.0); 104 + let cfg = SoakConfig { 105 + gauntlet: config_for(Scenario::MstChurn, Seed(0)), 106 + total_duration: total, 107 + sample_interval: soak_sample_interval(), 108 + chunk_ops: 10_000, 109 + leak_gate: LeakGateConfig::standard(), 110 + }; 111 + let mut emitter = emitter_stream(); 112 + let report = run_soak(cfg, &mut emitter).await.expect("soak run"); 113 + let _ = emitter.flush(); 114 + assert!( 115 + report.is_clean(), 116 + "soak failed: {}", 117 + report_summary(&report) 118 + ); 119 + }
+1 -1
crates/tranquil-store/tests/rotation_robustness.rs
··· 203 203 let write_handle = manager.open_for_append(file_id).unwrap(); 204 204 { 205 205 let mut writer = DataFileWriter::new(&*sim, write_handle.fd(), file_id).unwrap(); 206 - let _ = writer.append_block(&test_cid(1), &vec![0x11; 128]).unwrap(); 206 + let _ = writer.append_block(&test_cid(1), &[0x11; 128]).unwrap(); 207 207 writer.sync().unwrap(); 208 208 } 209 209 drop(write_handle);
+2 -2
crates/tranquil-store/tests/sim_blockstore.rs
··· 517 517 return; 518 518 }; 519 519 let start_pos = writer.position(); 520 - drop(writer); 521 - drop(handle); 520 + let _ = writer; 521 + let _ = handle; 522 522 523 523 let mut rng = Rng::new(seed); 524 524 let block_count = (rng.range_u32(15) + 5) as u16;
+2 -2
crates/tranquil-store/tests/verify_rollback_orphan.rs
··· 64 64 io.sync_dir(&data_dir).unwrap(); 65 65 66 66 let _ = io.delete(&hint_file_path(&data_dir, next_id)); 67 - drop(writer); 68 - drop(next_handle); 67 + let _ = writer; 68 + let _ = next_handle; 69 69 manager.rollback_rotation(next_id); 70 70 } 71 71
+1 -1
example.toml
··· 1 1 [server] 2 - # Public hostname of the PDS (e.g. `pds.example.com`). 2 + # Public hostname of the PDS, such as `pds.example.com`. 3 3 # 4 4 # Can also be specified via environment variable `PDS_HOSTNAME`. 5 5 #