very fast at protocol indexer with flexible filtering, xrpc queries, cursor-backed event stream, and more, built on fjall
rust fjall at-protocol atproto indexer
60
fork

Configure Feed

Select the types of activity you want to include in your feed.

[all] internal relay backed indexer

dawn abace9d6 701120fd

+2424 -2027
+3 -3
Cargo.toml
··· 4 4 edition = "2024" 5 5 6 6 [features] 7 - default = ["events"] 8 - sync_all = [] 7 + default = ["indexer"] 8 + __persist_sync_all = [] 9 9 backlinks = [] 10 10 relay = [] 11 - events = [] 11 + indexer = [] 12 12 13 13 [dependencies] 14 14 tokio = { version = "1.0", features = ["full"] }
+1 -1
README.md
··· 11 11 `hydrant` is an AT Protocol indexer built on the `fjall` database. it's built to 12 12 be flexible, supporting both full-network indexing and filtered indexing (e.g., 13 13 by DID), allowing querying with XRPCs (not only `com.atproto.*`!), providing an 14 - ordered event stream, etc. 14 + ordered event stream, etc. oh and it can also act as a relay! 15 15 16 16 you can see 17 17 [random.wisp.place](https://tangled.org/did:plc:dfl62fgb7wtjj3fcbb72naae/random.wisp.place)
+12 -7
src/api/debug.rs
··· 205 205 "invalid_u64".to_string() 206 206 } 207 207 } else if partition == "blocks" { 208 - // key is col|cid_bytes — show as "col|<cid_str>" 208 + // key is col|cid_bytes, show as "col|<cid_str>" 209 209 if let Some(sep) = k.iter().position(|&b| b == keys::SEP) { 210 210 let col = String::from_utf8_lossy(&k[..sep]); 211 211 match cid::Cid::read_bytes(&k[sep + 1..]) { ··· 303 303 State(state): State<Arc<AppState>>, 304 304 ) -> Result<StatusCode, StatusCode> { 305 305 tokio::task::spawn_blocking(move || { 306 - crate::db::ephemeral::ephemeral_ttl_tick(&state.db, &state.ephemeral_ttl) 306 + #[cfg(feature = "indexer")] 307 + let res = crate::db::ephemeral::ephemeral_ttl_tick(&state.db, &state.ephemeral_ttl); 308 + #[cfg(feature = "relay")] 309 + let res = crate::db::ephemeral::relay_events_ttl_tick(&state.db, &state.ephemeral_ttl); 310 + res 307 311 }) 308 312 .await 309 313 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)? ··· 316 320 pub struct DebugSeedWatermarkRequest { 317 321 /// unix timestamp (seconds) to write the watermark at 318 322 pub ts: u64, 319 - /// event_id the watermark points to — all events before this id will be pruned 323 + /// event_id the watermark points to, all events before this id will be pruned 320 324 pub event_id: u64, 321 325 } 322 326 ··· 328 332 Query(req): Query<DebugSeedWatermarkRequest>, 329 333 ) -> Result<StatusCode, StatusCode> { 330 334 tokio::task::spawn_blocking(move || { 335 + #[cfg(feature = "indexer")] 336 + let key = crate::db::keys::event_watermark_key(req.ts); 337 + #[cfg(feature = "relay")] 338 + let key = crate::db::keys::relay_event_watermark_key(req.ts); 331 339 state 332 340 .db 333 341 .cursors 334 - .insert( 335 - crate::db::keys::event_watermark_key(req.ts), 336 - req.event_id.to_be_bytes(), 337 - ) 342 + .insert(key, req.event_id.to_be_bytes()) 338 343 .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) 339 344 }) 340 345 .await
+5 -1
src/api/filter.rs
··· 16 16 .route("/filter", patch(handle_patch_filter)) 17 17 } 18 18 19 + use tracing::error; 19 20 pub async fn handle_get_filter( 20 21 State(hydrant): State<Hydrant>, 21 22 ) -> Result<Json<FilterSnapshot>, (StatusCode, String)> { ··· 46 47 p.excludes = body.excludes; 47 48 p.apply() 48 49 .await 49 - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) 50 + .map_err(|e| { 51 + error!(err = %e, "failed to patch filter"); 52 + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) 53 + }) 50 54 .map(Json) 51 55 }
+2 -2
src/api/mod.rs
··· 13 13 mod ingestion; 14 14 mod repos; 15 15 mod stats; 16 - #[cfg(feature = "events")] 16 + #[cfg(feature = "indexer")] 17 17 mod stream; 18 18 mod xrpc; 19 19 ··· 22 22 let mut app = Router::new() 23 23 .route("/health", get(|| async { "OK" })) 24 24 .route("/stats", get(stats::get_stats)); 25 - #[cfg(feature = "events")] 25 + #[cfg(feature = "indexer")] 26 26 let app = app.nest("/stream", stream::router()); 27 27 let app = app 28 28 .merge(xrpc::router())
+3
src/api/xrpc/get_latest_commit.rs
··· 31 31 RepoStatus::Takendown => Some(GetLatestCommitError::RepoTakendown(None)), 32 32 RepoStatus::Suspended => Some(GetLatestCommitError::RepoSuspended(None)), 33 33 RepoStatus::Deactivated => Some(GetLatestCommitError::RepoDeactivated(None)), 34 + RepoStatus::Deleted => Some(GetLatestCommitError::RepoNotFound(Some(CowStr::Borrowed( 35 + "deleted", 36 + )))), 34 37 _ => None, 35 38 }; 36 39 if let Some(err) = xrpc_err {
+3
src/api/xrpc/get_repo.rs
··· 32 32 RepoStatus::Takendown => Some(GetRepoError::RepoTakendown(None)), 33 33 RepoStatus::Suspended => Some(GetRepoError::RepoSuspended(None)), 34 34 RepoStatus::Deactivated => Some(GetRepoError::RepoDeactivated(None)), 35 + RepoStatus::Deleted => Some(GetRepoError::RepoNotFound(Some(CowStr::Borrowed( 36 + "deleted", 37 + )))), 35 38 _ => None, 36 39 }; 37 40 if let Some(err) = xrpc_err {
+16 -15
src/api/xrpc/get_repo_status.rs
··· 29 29 }); 30 30 }; 31 31 32 - let (active, status) = repo_status_to_api(state.status); 32 + let status = repo_status_to_api(state.status); 33 33 34 34 // rev is only meaningful when the repo is active and has been synced at least once 35 - let rev = active.then(|| state.root.map(|c| c.rev.to_tid())).flatten(); 35 + let rev = state 36 + .active 37 + .then(|| state.root.map(|c| c.rev.to_tid())) 38 + .flatten(); 36 39 37 40 Ok(Json(GetRepoStatusOutput { 38 - active, 41 + active: state.active, 39 42 did: req.did, 40 43 rev, 41 44 status: status.map(|s| match s { ··· 51 54 })) 52 55 } 53 56 54 - pub(super) fn repo_status_to_api(status: RepoStatus) -> (bool, Option<ApiRepoStatus<'static>>) { 57 + pub(super) fn repo_status_to_api(status: RepoStatus) -> Option<ApiRepoStatus<'static>> { 55 58 match status { 56 - RepoStatus::Synced => (true, None), 57 - RepoStatus::Deactivated => (false, Some(ApiRepoStatus::Deactivated)), 58 - RepoStatus::Takendown => (false, Some(ApiRepoStatus::Takendown)), 59 - RepoStatus::Suspended => (false, Some(ApiRepoStatus::Suspended)), 60 - // we lost sync with this repo! report desynchronized 61 - // technicalllyyyy backfilling can mean the repo is active 62 - // because we are syncing it from the pds, but like also it is currently 63 - // desync'ed so... 64 - RepoStatus::Backfilling | RepoStatus::Error(_) => { 65 - (false, Some(ApiRepoStatus::Desynchronized)) 66 - } 59 + RepoStatus::Synced => None, 60 + RepoStatus::Deactivated => Some(ApiRepoStatus::Deactivated), 61 + RepoStatus::Takendown => Some(ApiRepoStatus::Takendown), 62 + RepoStatus::Suspended => Some(ApiRepoStatus::Suspended), 63 + RepoStatus::Deleted => Some(ApiRepoStatus::Deleted), 64 + // per spec, desynchronized and throttled have active=may-be-true 65 + RepoStatus::Desynchronized => Some(ApiRepoStatus::Desynchronized), 66 + RepoStatus::Throttled => Some(ApiRepoStatus::Throttled), 67 + RepoStatus::Error(_) => Some(ApiRepoStatus::Desynchronized), 67 68 } 68 69 }
+3 -3
src/api/xrpc/list_repos.rs
··· 31 31 let mut next_cursor: Option<Did<'static>> = None; 32 32 33 33 for item in hydrant.repos.iter_states(cursor.as_ref()) { 34 - let (did, state) = item?; 34 + let (did, state, _metadata) = item?; 35 35 36 36 // skip repos that haven't been synced at least once 37 37 let Some(commit) = state.root else { ··· 48 48 continue; 49 49 }; 50 50 51 - let (active, status) = repo_status_to_api(state.status); 51 + let status = repo_status_to_api(state.status); 52 52 repos.push(Repo { 53 - active: Some(active), 53 + active: Some(state.active), 54 54 did: did.clone(), 55 55 head: Cid::from(commit_cid), 56 56 rev: atp_commit.rev,
+10 -8
src/api/xrpc/mod.rs
··· 27 27 use serde::{Deserialize, Serialize}; 28 28 use smol_str::ToSmolStr; 29 29 use std::fmt::Display; 30 + #[cfg(feature = "relay")] 31 + use { 32 + jacquard_api::com_atproto::sync::subscribe_repos::SubscribeReposEndpoint, 33 + jacquard_common::xrpc::SubscriptionEndpoint, 34 + }; 30 35 31 36 mod com_atproto_describe_repo; 32 37 mod count_records; ··· 43 48 mod subscribe_repos; 44 49 45 50 pub fn router() -> Router<Hydrant> { 46 - #[allow(unused_mut)] 47 - let mut r = Router::new() 51 + let r = Router::new() 48 52 .route(GetRecordRequest::PATH, get(get_record::handle)) 49 53 .route(ListRecordsRequest::PATH, get(list_records::handle)) 50 54 .route(CountRecords::PATH, get(count_records::handle)) ··· 61 65 .route(ListReposRequest::PATH, get(list_repos::handle)); 62 66 63 67 #[cfg(feature = "relay")] 64 - { 65 - r = r.route( 66 - "/xrpc/com.atproto.sync.subscribeRepos", 67 - axum::routing::get(subscribe_repos::handle), 68 - ); 69 - } 68 + let r = r.route( 69 + SubscribeReposEndpoint::PATH, 70 + axum::routing::get(subscribe_repos::handle), 71 + ); 70 72 71 73 r 72 74 }
+66 -35
src/backfill/manager.rs
··· 1 1 use crate::db::types::TrimmedDid; 2 - use crate::db::{self, deser_repo_state}; 3 - use crate::ops; 2 + use crate::db::{self, keys}; 4 3 use crate::state::AppState; 5 - use crate::types::{GaugeState, RepoStatus, ResyncState}; 4 + use crate::types::{GaugeState, ResyncState}; 6 5 use miette::{IntoDiagnostic, Result}; 6 + 7 7 use std::sync::Arc; 8 8 use std::time::Duration; 9 - use tracing::{debug, error, info, warn}; 9 + use tracing::{debug, error, info}; 10 10 11 11 pub fn queue_gone_backfills(state: &Arc<AppState>) -> Result<()> { 12 12 debug!("scanning for deactivated/takendown repos to retry..."); ··· 28 28 if matches!(resync_state, ResyncState::Gone { .. }) { 29 29 debug!(did = %did, "queuing retry for gone repo"); 30 30 31 - let Some(state_bytes) = state.db.repos.get(&key).into_diagnostic()? else { 32 - warn!(did = %did, "repo state not found"); 33 - continue; 31 + let metadata_key = keys::repo_metadata_key(&did); 32 + let metadata_bytes = match state 33 + .db 34 + .repo_metadata 35 + .get(&metadata_key) 36 + .map(|b| b.ok_or_else(|| miette::miette!("repo metadata not found"))) 37 + .into_diagnostic() 38 + .flatten() 39 + { 40 + Ok(b) => b, 41 + Err(e) => { 42 + error!(did = %did, err = %e, "failed to get repo metadata"); 43 + continue; 44 + } 34 45 }; 46 + let mut metadata = crate::db::deser_repo_metadata(&metadata_bytes)?; 35 47 36 - // update repo state back to backfilling 37 - let repo_state = deser_repo_state(&state_bytes)?; 38 - ops::update_repo_status( 39 - &mut batch, 40 - &state.db, 41 - &did, 42 - repo_state, 43 - RepoStatus::Backfilling, 44 - )?; 48 + // move from resync back into pending 49 + batch.remove(&state.db.resync, key.clone()); 50 + let old_pending = keys::pending_key(metadata.index_id); 51 + batch.remove(&state.db.pending, old_pending); 52 + metadata.index_id = rand::random::<u64>(); 53 + batch.insert( 54 + &state.db.pending, 55 + keys::pending_key(metadata.index_id), 56 + key.clone(), 57 + ); 58 + batch.insert( 59 + &state.db.repo_metadata, 60 + &metadata_key, 61 + crate::db::ser_repo_metadata(&metadata)?, 62 + ); 45 63 46 64 transitions.push((GaugeState::Resync(None), GaugeState::Pending)); 47 65 } ··· 100 118 if next_retry <= now { 101 119 debug!(did = %did, "retrying backfill"); 102 120 103 - let state_bytes = match state.db.repos.get(&key).into_diagnostic() { 121 + let metadata_key = keys::repo_metadata_key(&did); 122 + let metadata_bytes = match state 123 + .db 124 + .repo_metadata 125 + .get(&metadata_key) 126 + .map(|b| b.ok_or_else(|| miette::miette!("repo metadata not found"))) 127 + .into_diagnostic() 128 + .flatten() 129 + { 104 130 Ok(b) => b, 105 - Err(err) => { 106 - error!(did = %did, err = %err, "failed to get repo state"); 131 + Err(e) => { 132 + error!(did = %did, err = %e, "failed to get repo metadata"); 107 133 continue; 108 134 } 109 135 }; 110 - let Some(state_bytes) = state_bytes else { 111 - error!(did = %did, "repo state not found"); 112 - continue; 136 + let mut metadata = match crate::db::deser_repo_metadata( 137 + metadata_bytes.as_ref(), 138 + ) { 139 + Ok(m) => m, 140 + Err(e) => { 141 + error!(did = %did, err = %e, "failed to deserialize repo metadata"); 142 + continue; 143 + } 113 144 }; 114 145 115 - let repo_state = match deser_repo_state(&state_bytes) { 146 + // move from resync back into pending 147 + batch.remove(&state.db.resync, key.clone()); 148 + let old_pending = keys::pending_key(metadata.index_id); 149 + batch.remove(&state.db.pending, old_pending); 150 + metadata.index_id = rand::random::<u64>(); 151 + batch.insert( 152 + &state.db.pending, 153 + keys::pending_key(metadata.index_id), 154 + key.clone(), 155 + ); 156 + let serialized_metadata = match crate::db::ser_repo_metadata(&metadata) { 116 157 Ok(s) => s, 117 158 Err(e) => { 118 - error!(did = %did, err = %e, "failed to deserialize repo state"); 159 + error!(did = %did, err = %e, "failed to serialize repo metadata"); 119 160 continue; 120 161 } 121 162 }; 122 - let res = ops::update_repo_status( 123 - &mut batch, 124 - &state.db, 125 - &did, 126 - repo_state, 127 - RepoStatus::Backfilling, 128 - ); 129 - if let Err(e) = res { 130 - error!(did = %did, err = %e, "failed to update repo status"); 131 - continue; 132 - } 163 + batch.insert(&state.db.repo_metadata, &metadata_key, serialized_metadata); 133 164 134 165 transitions.push((GaugeState::Resync(Some(kind)), GaugeState::Pending)); 135 166 }
+66 -38
src/backfill/mod.rs
··· 31 31 32 32 pub mod manager; 33 33 34 - use crate::ingest::{BufferTx, IngestMessage}; 34 + use crate::ingest::indexer::{IndexerMessage, IndexerTx}; 35 35 use crate::util::{WatchEnabledExt, url_to_fluent_uri}; 36 36 37 37 pub struct BackfillWorker { 38 38 state: Arc<AppState>, 39 - buffer_tx: BufferTx, 39 + buffer_tx: IndexerTx, 40 40 http: reqwest::Client, 41 41 semaphore: Arc<Semaphore>, 42 42 verify_signatures: bool, ··· 48 48 impl BackfillWorker { 49 49 pub fn new( 50 50 state: Arc<AppState>, 51 - buffer_tx: BufferTx, 51 + buffer_tx: IndexerTx, 52 52 timeout: Duration, 53 53 concurrency_limit: usize, 54 54 verify_signatures: bool, ··· 182 182 async fn did_task( 183 183 state: &Arc<AppState>, 184 184 http: reqwest::Client, 185 - buffer_tx: BufferTx, 185 + buffer_tx: IndexerTx, 186 186 did: &Did<'static>, 187 187 pending_key: Slice, 188 188 _permit: tokio::sync::OwnedSemaphorePermit, ··· 192 192 let db = &state.db; 193 193 194 194 match process_did(&state, &http, &did, verify_signatures, ephemeral).await { 195 - Ok(Some(repo_state)) => { 195 + Ok(Some(_repo_state)) => { 196 196 let did_key = keys::repo_key(&did); 197 197 198 198 // determine old gauge state 199 199 // if it was error/suspended etc, we need to know which error kind it was to decrement correctly. 200 - // we have to peek at the resync state. 201 - let old_gauge = state.db.repo_gauge_state_async(&repo_state, &did_key).await; 202 - 203 200 let mut batch = db.inner.batch(); 204 - // remove from pending 205 - if old_gauge == GaugeState::Pending { 206 - batch.remove(&db.pending, pending_key); 207 - } 208 - // remove from resync 209 - if old_gauge.is_resync() { 210 - batch.remove(&db.resync, &did_key); 211 - } 201 + // unconditionally remove from pending 202 + batch.remove(&db.pending, pending_key); 203 + // remove from resync, just in case 204 + batch.remove(&db.resync, &did_key); 205 + 212 206 tokio::task::spawn_blocking(move || batch.commit().into_diagnostic()) 213 207 .await 214 208 .into_diagnostic()??; 215 209 216 210 state 217 211 .db 218 - .update_gauge_diff_async(&old_gauge, &GaugeState::Synced) 212 + .update_gauge_diff_async(&GaugeState::Pending, &GaugeState::Synced) 219 213 .await; 220 214 221 215 let state = state.clone(); ··· 229 223 .await 230 224 .into_diagnostic()??; 231 225 232 - if let Err(e) = buffer_tx.send(IngestMessage::BackfillFinished(did.clone())) { 226 + if let Err(e) = buffer_tx 227 + .send(IndexerMessage::BackfillFinished(did.clone())) 228 + .await 229 + { 233 230 error!(err = %e, "failed to send BackfillFinished"); 234 231 } 235 232 Ok(()) ··· 310 307 { 311 308 let mut state: RepoState = 312 309 rmp_serde::from_slice(&state_bytes).into_diagnostic()?; 310 + state.active = true; 313 311 state.status = RepoStatus::Error(error_string.into()); 314 312 Some(rmp_serde::to_vec(&state).into_diagnostic()?) 315 313 } else { ··· 422 420 ); 423 421 state.update_from_doc(doc); 424 422 425 - let emit_identity = |status: &RepoStatus| { 423 + let emit_identity = |status: &RepoStatus, active: bool| { 424 + let status = match status { 425 + RepoStatus::Deactivated => "deactivated", 426 + RepoStatus::Takendown => "takendown", 427 + RepoStatus::Suspended => "suspended", 428 + RepoStatus::Deleted => "deleted", 429 + RepoStatus::Desynchronized => "desynchronized", 430 + RepoStatus::Throttled => "throttled", 431 + _ => "", 432 + }; 426 433 let evt = AccountEvt { 427 434 did: did.clone(), 428 - active: !matches!( 429 - status, 430 - RepoStatus::Deactivated | RepoStatus::Takendown | RepoStatus::Suspended 431 - ), 432 - status: Some( 433 - match status { 434 - RepoStatus::Deactivated => "deactivated", 435 - RepoStatus::Takendown => "takendown", 436 - RepoStatus::Suspended => "suspended", 437 - _ => "active", 438 - } 439 - .into(), 440 - ), 435 + active, 436 + status: status 437 + .is_empty() 438 + .then_some(None) 439 + .unwrap_or_else(|| Some(status.into())), 441 440 }; 442 441 let _ = app_state.db.event_tx.send(ops::make_account_event(db, evt)); 443 442 }; ··· 472 471 if let Some(status) = inactive_status { 473 472 warn!(?status, "repo is inactive, stopping backfill"); 474 473 475 - emit_identity(&status); 474 + emit_identity(&status, false); 476 475 477 476 let resync_state = ResyncState::Gone { 478 477 status: status.clone(), ··· 483 482 app_state 484 483 .db 485 484 .update_repo_state_async(did, move |state, (key, batch)| { 485 + state.active = false; 486 486 state.status = status; 487 487 batch.insert(&app_state_clone.db.resync, key, resync_bytes); 488 488 Ok((true, ())) ··· 498 498 Err(e) => Err(e).into_diagnostic()?, 499 499 }; 500 500 501 - // emit identity event so any consumers know 502 - emit_identity(&state.status); 501 + // emit identity event so any consumers know, but only if something changed 502 + if state.active != previous_state.active 503 + || state.status != previous_state.status 504 + || previous_state.pds.is_none() 505 + { 506 + emit_identity(&state.status, state.active); 507 + } 503 508 504 509 trace!( 505 510 bytes = car_bytes.body.len(), ··· 721 726 } 722 727 723 728 // 6. update data, status is updated in worker shard 724 - state.tracked = true; 725 729 state.root = Some(root_commit); 726 730 state.touch(); 727 731 ··· 731 735 ser_repo_state(&state)?, 732 736 ); 733 737 738 + let metadata_key = keys::repo_metadata_key(&did); 739 + let metadata_bytes = app_state 740 + .db 741 + .repo_metadata 742 + .get(&metadata_key) 743 + .into_diagnostic()? 744 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 745 + let mut metadata = crate::db::deser_repo_metadata(&metadata_bytes)?; 746 + metadata.tracked = true; 747 + batch.insert( 748 + &app_state.db.repo_metadata, 749 + &metadata_key, 750 + crate::db::ser_repo_metadata(&metadata)?, 751 + ); 752 + 734 753 // add the counts 735 754 if !ephemeral { 736 755 for (col, cnt) in collection_counts { ··· 746 765 .into_diagnostic()?? 747 766 }; 748 767 768 + let metadata_key = keys::repo_metadata_key(did); 769 + let metadata_bytes = db 770 + .repo_metadata 771 + .get(&metadata_key) 772 + .into_diagnostic()? 773 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 774 + let metadata = crate::db::deser_repo_metadata(metadata_bytes.as_ref())?; 775 + 749 776 let Some((_state, records_cnt_delta, added_blocks, count)) = result else { 750 - // signal mode: no signal-matching records found — clean up the optimistically-added repo 777 + // signal mode: no signal-matching records found, clean up the optimistically-added repo 751 778 let did_key = keys::repo_key(did); 752 - let backfill_pending_key = keys::pending_key(previous_state.index_id); 779 + let backfill_pending_key = keys::pending_key(metadata.index_id); 753 780 let app_state = app_state.clone(); 754 781 tokio::task::spawn_blocking(move || { 755 782 let mut batch = app_state.db.inner.batch(); 756 783 batch.remove(&app_state.db.repos, &did_key); 784 + batch.remove(&app_state.db.repo_metadata, &metadata_key); 757 785 batch.remove(&app_state.db.pending, backfill_pending_key); 758 786 batch.commit().into_diagnostic() 759 787 })
+6 -1
src/control/filter.rs
··· 1 1 use std::sync::Arc; 2 + use tracing::error; 2 3 3 4 use miette::{IntoDiagnostic, Result}; 4 5 ··· 284 285 db_filter::load(&filter_ks) 285 286 }) 286 287 .await 287 - .into_diagnostic()??; 288 + .into_diagnostic()? 289 + .map_err(|e| { 290 + error!(err = %e, "failed to apply filter patch"); 291 + e 292 + })?; 288 293 289 294 let exclude_list = { 290 295 let filter_ks = self.state.db.filter.clone();
+81 -34
src/control/mod.rs
··· 1 + #![allow(unused_imports)] 2 + 1 3 pub(crate) mod crawler; 2 4 pub(crate) mod filter; 3 5 pub(crate) mod firehose; ··· 22 24 use tokio::sync::{mpsc, watch}; 23 25 use tracing::{debug, error, info}; 24 26 25 - #[cfg(feature = "events")] 27 + #[cfg(feature = "indexer")] 26 28 use crate::backfill::BackfillWorker; 27 29 use crate::config::{Config, SignatureVerification}; 28 30 use crate::db::{ ··· 30 32 load_persisted_firehose_sources, 31 33 }; 32 34 use crate::filter::FilterMode; 33 - #[cfg(feature = "events")] 34 - use crate::ingest::worker::FirehoseWorker; 35 + #[cfg(feature = "indexer")] 36 + use crate::ingest::indexer::FirehoseWorker; 35 37 use crate::state::AppState; 36 38 use crate::types::MarshallableEvt; 37 39 38 40 use crawler::{CrawlerShared, spawn_crawler_producer}; 39 41 use firehose::{FirehoseShared, spawn_firehose_ingestor}; 40 - #[cfg(feature = "events")] 42 + #[cfg(feature = "indexer")] 41 43 use stream::event_stream_thread; 42 44 #[cfg(feature = "relay")] 43 45 use stream::relay_stream_thread; ··· 215 217 } 216 218 217 219 let fut = async move { 218 - // internal buffered channel between ingestors / backfill and the firehose worker 219 - let (buffer_tx, buffer_rx) = mpsc::unbounded_channel(); 220 + // raw firehose events from pds/relay to RelayWorker 221 + let (buffer_tx, buffer_rx) = mpsc::channel::<crate::ingest::IngestMessage>(500); 222 + 223 + // validated IndexerMessages from RelayWorker/backfill to FirehoseWorker 224 + #[cfg(feature = "indexer")] 225 + let (indexer_tx, indexer_rx) = 226 + mpsc::channel::<crate::ingest::indexer::IndexerMessage>(500); 220 227 221 228 // 5. spawn the backfill worker (not used in relay mode) 222 - #[cfg(feature = "events")] 229 + #[cfg(feature = "indexer")] 223 230 tokio::spawn({ 224 231 let state = state.clone(); 225 232 BackfillWorker::new( 226 233 state.clone(), 227 - buffer_tx.clone(), 234 + indexer_tx.clone(), 228 235 config.repo_fetch_timeout, 229 236 config.backfill_concurrency_limit, 230 237 matches!( ··· 238 245 }); 239 246 240 247 // 6. re-queue any repos that lost their backfill state, then start the retry worker 241 - #[cfg(feature = "events")] 248 + #[cfg(feature = "indexer")] 242 249 { 243 250 if let Err(e) = tokio::task::spawn_blocking({ 244 251 let state = state.clone(); ··· 258 265 } 259 266 260 267 // 7. ephemeral GC thread (not used in relay mode) 261 - #[cfg(feature = "events")] 268 + #[cfg(feature = "indexer")] 262 269 if config.ephemeral { 263 270 let state = state.clone(); 264 271 std::thread::Builder::new() ··· 267 274 .into_diagnostic()?; 268 275 } 269 276 277 + // relay events TTL: relay_events keyspace grows unbounded without pruning 278 + #[cfg(feature = "relay")] 279 + { 280 + let state = state.clone(); 281 + std::thread::Builder::new() 282 + .name("relay-events-gc".into()) 283 + .spawn(move || crate::db::ephemeral::relay_events_ttl_worker(state)) 284 + .into_diagnostic()?; 285 + } 286 + 270 287 // 8. cursor / counts persist thread 271 288 std::thread::spawn({ 272 289 let state = state.clone(); ··· 298 315 }); 299 316 300 317 // 9. events/sec stats ticker 301 - #[cfg(feature = "events")] 302 318 tokio::spawn({ 303 319 let state = state.clone(); 304 - let mut last_id = state.db.next_event_id.load(Ordering::Relaxed); 320 + let get_id = |state: &AppState| { 321 + #[cfg(feature = "indexer")] 322 + let id = state.db.next_event_id.load(Ordering::Relaxed); 323 + #[cfg(feature = "relay")] 324 + let id = state.db.next_relay_seq.load(Ordering::Relaxed); 325 + id 326 + }; 327 + let mut last_id = get_id(&state); 305 328 let mut last_time = std::time::Instant::now(); 306 329 let mut interval = tokio::time::interval(std::time::Duration::from_secs(60)); 307 330 async move { 308 331 loop { 309 332 interval.tick().await; 310 333 311 - let current_id = state.db.next_event_id.load(Ordering::Relaxed); 334 + let current_id = get_id(&state); 312 335 let current_time = std::time::Instant::now(); 313 336 let delta = current_id.saturating_sub(last_id); 314 337 ··· 410 433 .await; 411 434 } 412 435 413 - // 11. spawn crawler infrastructure (always, to support dynamic source management) 436 + // 11. spawn crawler infrastructure 437 + #[cfg(feature = "indexer")] 414 438 { 415 439 use crate::crawler::throttle::Throttler; 416 440 use crate::crawler::{ ··· 536 560 } 537 561 } 538 562 539 - // 12. spawn the firehose worker on a blocking thread (fatal task) 540 - let handle = tokio::runtime::Handle::current(); 541 - let firehose_worker = std::thread::spawn({ 563 + // 12. spawn the relay worker 564 + let relay_worker = std::thread::spawn({ 542 565 let state = state.clone(); 543 - let handle = handle.clone(); 566 + let handle = tokio::runtime::Handle::current(); 567 + let config = config.clone(); 568 + 569 + #[cfg(feature = "indexer")] 570 + let hook = indexer_tx.clone(); 571 + 544 572 move || { 545 - #[cfg(feature = "relay")] 546 - return crate::ingest::relay_worker::RelayWorker::new( 573 + crate::ingest::relay::RelayWorker::new( 547 574 state, 548 575 buffer_rx, 576 + #[cfg(feature = "indexer")] 577 + hook, 549 578 matches!(config.verify_signatures, SignatureVerification::Full), 550 579 config.firehose_workers, 551 580 crate::ingest::validation::ValidationOptions { ··· 553 582 rev_clock_skew_secs: config.rev_clock_skew_secs, 554 583 }, 555 584 ) 556 - .run(handle); 557 - #[cfg(feature = "events")] 558 - return FirehoseWorker::new( 585 + .run(handle) 586 + } 587 + }); 588 + 589 + let tx = Arc::clone(&fatal_tx); 590 + tokio::spawn( 591 + tokio::task::spawn_blocking(move || { 592 + relay_worker 593 + .join() 594 + .map_err(|e| miette::miette!("relay worker died: {e:?}")) 595 + }) 596 + .map(move |r| { 597 + let result = r.into_diagnostic().flatten().flatten(); 598 + let _ = tx.send(Some(result.map_err(|e| e.to_string()))); 599 + }), 600 + ); 601 + 602 + // 13. spawn the firehose worker (if enabled) 603 + #[cfg(feature = "indexer")] 604 + let firehose_worker = std::thread::spawn({ 605 + let state = state.clone(); 606 + let handle = tokio::runtime::Handle::current(); 607 + let config = config.clone(); 608 + move || { 609 + FirehoseWorker::new( 559 610 state, 560 - buffer_rx, 561 - matches!(config.verify_signatures, SignatureVerification::Full), 611 + indexer_rx, 562 612 config.ephemeral, 563 613 config.firehose_workers, 564 - crate::ingest::validation::ValidationOptions { 565 - verify_mst: config.verify_mst, 566 - rev_clock_skew_secs: config.rev_clock_skew_secs, 567 - }, 568 614 ) 569 - .run(handle); 615 + .run(handle) 570 616 } 571 617 }); 572 618 619 + #[cfg(feature = "indexer")] 573 620 { 574 621 let tx = Arc::clone(&fatal_tx); 575 622 tokio::spawn( 576 623 tokio::task::spawn_blocking(move || { 577 624 firehose_worker 578 625 .join() 579 - .map_err(|e| miette::miette!("buffer processor died: {e:?}")) 626 + .map_err(|e| miette::miette!("firehose worker died: {e:?}")) 580 627 }) 581 628 .map(move |r| { 582 629 let result = r.into_diagnostic().flatten().flatten(); ··· 619 666 /// 620 667 /// multiple concurrent subscribers each receive a full independent copy of the stream. 621 668 /// the stream ends when the `EventStream` is dropped. 622 - #[cfg(feature = "events")] 669 + #[cfg(feature = "indexer")] 623 670 pub fn subscribe(&self, cursor: Option<u64>) -> EventStream { 624 671 let (tx, rx) = mpsc::channel(500); 625 672 let state = self.state.clone(); ··· 835 882 /// implements [`futures::Stream`] and can be used with `StreamExt::next`, 836 883 /// `while let Some(evt) = stream.next().await`, `forward`, etc. 837 884 /// the stream terminates when the underlying channel closes (i.e. hydrant shuts down). 838 - #[cfg(feature = "events")] 885 + #[cfg(feature = "indexer")] 839 886 pub struct EventStream(mpsc::Receiver<Event>); 840 887 841 - #[cfg(feature = "events")] 888 + #[cfg(feature = "indexer")] 842 889 impl Stream for EventStream { 843 890 type Item = Event; 844 891
+164 -54
src/control/repos.rs
··· 17 17 use url::Url; 18 18 19 19 use crate::db::types::{DbRkey, DidKey, TrimmedDid}; 20 - use crate::db::{self, Db, keys, ser_repo_state}; 20 + use crate::db::{self, Db, keys}; 21 21 use crate::state::AppState; 22 - use crate::types::{GaugeState, RepoState, RepoStatus}; 22 + use crate::types::{GaugeState, RepoMetadata, RepoState, RepoStatus}; 23 23 use crate::util::invalid_handle; 24 24 25 25 /// information about a tracked or known repository. returned by [`ReposControl`] methods. ··· 77 77 pub(crate) fn iter_states( 78 78 &self, 79 79 cursor: Option<&Did<'_>>, 80 - ) -> impl Iterator<Item = Result<(Did<'static>, RepoState<'static>)>> { 80 + ) -> impl Iterator<Item = Result<(Did<'static>, RepoState<'static>, crate::types::RepoMetadata)>> 81 + { 81 82 let start_bound = if let Some(cursor) = cursor { 82 83 let did_key = keys::repo_key(cursor); 83 84 std::ops::Bound::Excluded(did_key) ··· 85 86 std::ops::Bound::Unbounded 86 87 }; 87 88 89 + let db = self.0.db.clone(); 88 90 self.0 89 91 .db 90 92 .repos 91 93 .range((start_bound, std::ops::Bound::Unbounded)) 92 - .map(|g| { 94 + .map(move |g| { 93 95 let (k, v) = g.into_inner().into_diagnostic()?; 94 96 let repo_state = crate::db::deser_repo_state(&v)?.into_static(); 95 97 let did = TrimmedDid::try_from(k.as_ref())?.to_did(); 96 - Ok((did, repo_state)) 98 + let metadata_key = keys::repo_metadata_key(&did); 99 + let metadata = db 100 + .repo_metadata 101 + .get(&metadata_key) 102 + .into_diagnostic()? 103 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 104 + let metadata = crate::db::deser_repo_metadata(metadata.as_ref())?; 105 + Ok((did, repo_state, metadata)) 97 106 }) 98 107 } 99 108 100 109 /// iterates through all repositories, returning their state. 101 110 pub fn iter(&self, cursor: Option<&Did<'_>>) -> impl Iterator<Item = Result<RepoInfo>> { 102 111 self.iter_states(cursor) 103 - .map(|r| r.map(|(did, s)| repo_state_to_info(did, s))) 112 + .map(|r| r.map(|(did, s, m)| repo_state_to_info(did, s, m.tracked))) 104 113 } 105 114 106 115 #[allow(dead_code)] ··· 113 122 }; 114 123 115 124 let repos = self.0.db.repos.clone(); 125 + let db = self.0.db.clone(); 116 126 self.0 117 127 .db 118 128 .pending ··· 131 141 tracing::warn!(id, did = ?did_key, "stale pending???"); 132 142 return Ok(None); 133 143 }; 134 - let repo_state = crate::db::deser_repo_state(&bytes)?; 144 + let repo_state = crate::db::deser_repo_state(bytes.as_ref())?; 135 145 let did = TrimmedDid::try_from(did_key.as_ref())?.to_did(); 136 - Ok(Some((id, repo_state_to_info(did, repo_state)))) 146 + let metadata_key = keys::repo_metadata_key(&did); 147 + let metadata = db 148 + .repo_metadata 149 + .get(&metadata_key) 150 + .into_diagnostic()? 151 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 152 + let metadata = crate::db::deser_repo_metadata(metadata.as_ref())?; 153 + Ok(Some(( 154 + id, 155 + repo_state_to_info(did, repo_state.into_static(), metadata.tracked), 156 + ))) 137 157 }) 138 158 .map(|b| b.transpose()) 139 159 .flatten() ··· 149 169 }; 150 170 151 171 let repos = self.0.db.repos.clone(); 172 + let db = self.0.db.clone(); 152 173 self.0 153 174 .db 154 175 .resync ··· 160 181 tracing::warn!(did = ?did_key, "stale resync???"); 161 182 return Ok(None); 162 183 }; 163 - let repo_state = crate::db::deser_repo_state(&bytes)?; 184 + let repo_state = crate::db::deser_repo_state(bytes.as_ref())?; 164 185 let did = TrimmedDid::try_from(did_key.as_ref())?.to_did(); 165 - Ok(Some(repo_state_to_info(did, repo_state))) 186 + let metadata_key = keys::repo_metadata_key(&did); 187 + let metadata = db 188 + .repo_metadata 189 + .get(&metadata_key) 190 + .into_diagnostic()? 191 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 192 + let metadata = crate::db::deser_repo_metadata(metadata.as_ref())?; 193 + Ok(Some(repo_state_to_info( 194 + did, 195 + repo_state.into_static(), 196 + metadata.tracked, 197 + ))) 166 198 }) 167 199 .map(|b| b.transpose()) 168 200 .flatten() ··· 199 231 transitions: &mut Vec<(GaugeState, GaugeState)>, 200 232 ) -> Result<bool> { 201 233 let did_key = keys::repo_key(did); 234 + let metadata_key = keys::repo_metadata_key(did); 235 + 202 236 let repo_bytes = db.repos.get(&did_key).into_diagnostic()?; 203 237 let existing = repo_bytes 204 238 .as_deref() 205 239 .map(db::deser_repo_state) 206 240 .transpose()?; 207 241 208 - if let Some(mut repo_state) = existing 209 - && repo_state.status != RepoStatus::Backfilling 210 - { 211 - let resync = db.resync.get(&did_key).into_diagnostic()?; 212 - let old = db::Db::repo_gauge_state(&repo_state, resync.as_deref()); 213 - repo_state.tracked = true; 214 - repo_state.status = RepoStatus::Backfilling; 215 - batch.insert(&db.repos, &did_key, ser_repo_state(&repo_state)?); 216 - batch.insert( 217 - &db.pending, 218 - keys::pending_key(repo_state.index_id), 219 - &did_key, 220 - ); 221 - batch.remove(&db.resync, &did_key); 222 - transitions.push((old, GaugeState::Pending)); 223 - return Ok(true); 242 + if let Some(repo_state) = existing { 243 + let metadata_bytes = db 244 + .repo_metadata 245 + .get(&metadata_key) 246 + .into_diagnostic()? 247 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 248 + let mut metadata = crate::db::deser_repo_metadata(&metadata_bytes)?; 249 + 250 + // skip if already in pending queue 251 + let is_pending = db 252 + .pending 253 + .get(keys::pending_key(metadata.index_id)) 254 + .into_diagnostic()? 255 + .is_some(); 256 + if !is_pending { 257 + let resync = db.resync.get(&did_key).into_diagnostic()?; 258 + let old = db::Db::repo_gauge_state(&repo_state, resync.as_deref()); 259 + metadata.tracked = true; 260 + // insert into pending with new index_id 261 + let old_pending = keys::pending_key(metadata.index_id); 262 + batch.remove(&db.pending, &old_pending); 263 + metadata.index_id = rand::Rng::next_u64(&mut rand::rng()); 264 + batch.insert(&db.pending, keys::pending_key(metadata.index_id), &did_key); 265 + batch.remove(&db.resync, &did_key); 266 + batch.insert( 267 + &db.repo_metadata, 268 + &metadata_key, 269 + crate::db::ser_repo_metadata(&metadata)?, 270 + ); 271 + transitions.push((old, GaugeState::Pending)); 272 + return Ok(true); 273 + } 224 274 } 225 275 226 276 Ok(false) ··· 291 341 292 342 for did in dids { 293 343 let did_key = keys::repo_key(&did); 294 - let repo_bytes = db.repos.get(&did_key).into_diagnostic()?; 295 - let existing = repo_bytes 296 - .as_deref() 297 - .map(db::deser_repo_state) 344 + let metadata_key = keys::repo_metadata_key(&did); 345 + 346 + let metadata_bytes = db.repo_metadata.get(&metadata_key).into_diagnostic()?; 347 + let existing_metadata = metadata_bytes 348 + .map(|b| crate::db::deser_repo_metadata(&b)) 298 349 .transpose()?; 299 350 300 - if let Some(repo_state) = existing { 301 - // the double read here is an ok tradeoff, the block will be in read-cache anyway 302 - if !repo_state.tracked && Self::_resync(db, &did, &mut batch, &mut transitions)? 303 - { 351 + if let Some(metadata) = existing_metadata { 352 + if !metadata.tracked && Self::_resync(db, &did, &mut batch, &mut transitions)? { 304 353 queued.push(did); 305 354 } 306 355 } else { 307 - let repo_state = RepoState::backfilling(rng.next_u64()); 308 - batch.insert(&db.repos, &did_key, ser_repo_state(&repo_state)?); 356 + let repo_state = RepoState::backfilling(); 357 + let metadata = RepoMetadata::backfilling(rng.next_u64()); 358 + batch.insert(&db.repos, &did_key, crate::db::ser_repo_state(&repo_state)?); 309 359 batch.insert( 310 - &db.pending, 311 - keys::pending_key(repo_state.index_id), 312 - &did_key, 360 + &db.repo_metadata, 361 + &metadata_key, 362 + crate::db::ser_repo_metadata(&metadata)?, 313 363 ); 364 + batch.insert(&db.pending, keys::pending_key(metadata.index_id), &did_key); 314 365 added += 1; 315 366 queued.push(did); 316 367 transitions.push((GaugeState::Synced, GaugeState::Pending)); ··· 351 402 352 403 for did in dids { 353 404 let did_key = keys::repo_key(&did); 405 + let metadata_key = keys::repo_metadata_key(&did); 406 + 354 407 let repo_bytes = db.repos.get(&did_key).into_diagnostic()?; 355 408 let existing = repo_bytes 356 409 .as_deref() ··· 358 411 .transpose()?; 359 412 360 413 if let Some(repo_state) = existing { 361 - if repo_state.tracked { 362 - let resync = db.resync.get(&did_key).into_diagnostic()?; 363 - let old = db::Db::repo_gauge_state(&repo_state, resync.as_deref()); 364 - let mut repo_state = repo_state.into_static(); 365 - repo_state.tracked = false; 366 - batch.insert(&db.repos, &did_key, ser_repo_state(&repo_state)?); 367 - batch.remove(&db.pending, keys::pending_key(repo_state.index_id)); 368 - batch.remove(&db.resync, &did_key); 369 - if old != GaugeState::Synced { 370 - gauge_decrements.push(old); 414 + let metadata_bytes = db.repo_metadata.get(&metadata_key).into_diagnostic()?; 415 + let existing_metadata = metadata_bytes 416 + .map(|b| crate::db::deser_repo_metadata(&b)) 417 + .transpose()?; 418 + 419 + if let Some(mut metadata) = existing_metadata { 420 + if metadata.tracked { 421 + let resync = db.resync.get(&did_key).into_diagnostic()?; 422 + let old = db::Db::repo_gauge_state(&repo_state, resync.as_deref()); 423 + metadata.tracked = false; 424 + batch.insert( 425 + &db.repo_metadata, 426 + &metadata_key, 427 + crate::db::ser_repo_metadata(&metadata)?, 428 + ); 429 + batch.remove(&db.pending, keys::pending_key(metadata.index_id)); 430 + batch.remove(&db.resync, &did_key); 431 + if old != GaugeState::Synced { 432 + gauge_decrements.push(old); 433 + } 434 + untracked.push(did); 371 435 } 372 - untracked.push(did); 373 436 } 374 437 } 375 438 } ··· 390 453 } 391 454 } 392 455 393 - pub(crate) fn repo_state_to_info(did: Did<'static>, s: RepoState<'_>) -> RepoInfo { 456 + pub(crate) fn repo_state_to_info(did: Did<'static>, s: RepoState<'_>, tracked: bool) -> RepoInfo { 394 457 let (rev, data) = s 395 458 .root 396 459 .map(|c| (Some(c.rev.to_tid()), Some(c.data))) ··· 398 461 RepoInfo { 399 462 did, 400 463 status: s.status, 401 - tracked: s.tracked, 464 + tracked, 402 465 rev, 403 466 data, 404 467 handle: s.handle.map(|h| h.into_static()), ··· 479 542 /// returns `None` if hydrant has never seen this repository. 480 543 pub async fn info(&self) -> Result<Option<RepoInfo>> { 481 544 let did = self.did.clone().into_static(); 482 - Ok(self.state().await?.map(|s| repo_state_to_info(did, s))) 545 + let did_key = keys::repo_key(&did); 546 + let metadata_key = keys::repo_metadata_key(&did); 547 + let app_state = self.state.clone(); 548 + 549 + tokio::task::spawn_blocking(move || { 550 + let state_bytes = app_state.db.repos.get(&did_key).into_diagnostic()?; 551 + let Some(state_bytes) = state_bytes else { 552 + return Ok(None); 553 + }; 554 + let repo_state = crate::db::deser_repo_state(&state_bytes)?; 555 + 556 + let metadata_bytes = app_state 557 + .db 558 + .repo_metadata 559 + .get(&metadata_key) 560 + .into_diagnostic()? 561 + .ok_or_else(|| miette::miette!("repo metadata not found for {}", did))?; 562 + let metadata = crate::db::deser_repo_metadata(&metadata_bytes)?; 563 + 564 + Ok(Some(repo_state_to_info(did, repo_state, metadata.tracked))) 565 + }) 566 + .await 567 + .into_diagnostic()? 483 568 } 484 569 485 570 /// returns the collections of this repository and the number of records it has in each. ··· 518 603 return Err(MiniDocError::RepoNotFound); 519 604 }; 520 605 521 - if info.status == RepoStatus::Backfilling { 606 + // check if repo is still backfilling (in pending) 607 + let metadata_key = keys::repo_metadata_key(&self.did); 608 + let app_state = self.state.clone(); 609 + 610 + let is_pending = tokio::task::spawn_blocking(move || { 611 + let metadata_bytes = app_state 612 + .db 613 + .repo_metadata 614 + .get(&metadata_key) 615 + .into_diagnostic()?; 616 + let Some(metadata_bytes) = metadata_bytes else { 617 + return Ok::<_, miette::Report>(false); 618 + }; 619 + let metadata = crate::db::deser_repo_metadata(metadata_bytes.as_ref())?; 620 + Ok(app_state 621 + .db 622 + .pending 623 + .get(crate::db::keys::pending_key(metadata.index_id)) 624 + .into_diagnostic()? 625 + .is_some()) 626 + }) 627 + .await 628 + .map_err(|e| MiniDocError::Other(miette::miette!(e)))? 629 + .map_err(MiniDocError::Other)?; 630 + 631 + if is_pending { 522 632 return Err(MiniDocError::NotSynced); 523 633 } 524 634
+3 -3
src/control/stream.rs
··· 7 7 use crate::state::AppState; 8 8 use std::sync::atomic::Ordering; 9 9 10 - #[cfg(feature = "events")] 10 + #[cfg(feature = "indexer")] 11 11 use { 12 12 super::Event, 13 13 crate::db, ··· 20 20 sha2::{Digest, Sha256}, 21 21 }; 22 22 23 - #[cfg(feature = "events")] 23 + #[cfg(feature = "indexer")] 24 24 pub(super) fn event_stream_thread( 25 25 state: Arc<AppState>, 26 26 tx: mpsc::Sender<Event>, ··· 156 156 } 157 157 } 158 158 159 - #[cfg(feature = "events")] 159 + #[cfg(feature = "indexer")] 160 160 fn stored_to_event(state: &AppState, id: u64, stored: StoredEvent<'_>) -> Option<Event> { 161 161 let StoredEvent { 162 162 live,
+3 -1
src/crawler/mod.rs
··· 1 + #![allow(dead_code, unused_imports)] 2 + 1 3 use crate::state::AppState; 2 4 use futures::future::join_all; 3 5 use jacquard_common::types::string::Did; ··· 121 123 if delta_processed == 0 && delta_crawled == 0 { 122 124 if is_throttled { 123 125 info!("throttled: pending queue full"); 124 - } else { 126 + } else if *self.0.state.crawler_enabled.borrow() { 125 127 info!("idle: no repos crawled or processed in 60s"); 126 128 } 127 129 continue;
+16 -8
src/crawler/worker.rs
··· 1 1 use crate::db::{keys, ser_repo_state}; 2 2 use crate::state::AppState; 3 - use crate::types::RepoState; 3 + use crate::types::{RepoMetadata, RepoState}; 4 4 use miette::{IntoDiagnostic, Result}; 5 5 use rand::Rng; 6 6 use rand::rngs::SmallRng; ··· 142 142 BLOCKING_TASK_TIMEOUT, 143 143 tokio::task::spawn_blocking(move || -> Result<Vec<InFlightGuard>> { 144 144 let mut rng: SmallRng = rand::make_rng(); 145 - let mut write_batch = db.inner.batch(); 145 + let mut batch = db.inner.batch(); 146 146 let mut surviving = Vec::new(); 147 147 for guard in guards { 148 148 let did_key = keys::repo_key(&*guard); 149 + let metadata_key = keys::repo_metadata_key(&*guard); 149 150 if db.repos.contains_key(&did_key).into_diagnostic()? { 150 151 continue; 151 152 } 152 - let state = RepoState::untracked(rng.next_u64()); 153 - write_batch.insert(&db.repos, &did_key, ser_repo_state(&state)?); 154 - write_batch.insert(&db.pending, keys::pending_key(state.index_id), &did_key); 153 + let state = RepoState::backfilling(); 154 + let metadata = RepoMetadata::backfilling(rng.next_u64()); 155 + batch.insert(&db.repos, &did_key, ser_repo_state(&state)?); 156 + batch.insert( 157 + &db.repo_metadata, 158 + &metadata_key, 159 + crate::db::ser_repo_metadata(&metadata)?, 160 + ); 161 + batch.insert(&db.pending, keys::pending_key(metadata.index_id), &did_key); 155 162 // clear any stale retry entry, this DID is confirmed and being enqueued 156 - write_batch.remove(&db.crawler, keys::crawler_retry_key(&*guard)); 163 + batch.remove(&db.crawler, keys::crawler_retry_key(&*guard)); 157 164 trace!(did = %*guard, "enqueuing repo"); 158 165 surviving.push(guard); 159 166 } 160 167 if let Some(cursor) = cursor_update { 161 - write_batch.insert(&db.cursors, cursor.key, cursor.value); 168 + batch.insert(&db.cursors, cursor.key, cursor.value); 162 169 } 163 - write_batch.commit().into_diagnostic()?; 170 + // todo: repo state overwrites here are acceptable? 171 + batch.commit().into_diagnostic()?; 164 172 Ok(surviving) 165 173 }), 166 174 )
+56 -17
src/db/ephemeral.rs
··· 1 1 use crate::db::{Db, keys}; 2 + use fjall::Keyspace; 2 3 use miette::{IntoDiagnostic, WrapErr}; 3 4 use std::sync::Arc; 4 5 use std::sync::atomic::Ordering; 5 6 use std::time::Duration; 6 7 use tracing::{debug, error, info}; 7 8 9 + #[cfg(feature = "indexer")] 8 10 pub fn ephemeral_ttl_worker(state: Arc<crate::state::AppState>) { 9 11 info!("ephemeral TTL worker started"); 10 12 loop { ··· 15 17 } 16 18 } 17 19 20 + #[cfg(feature = "relay")] 21 + pub fn relay_events_ttl_worker(state: Arc<crate::state::AppState>) { 22 + info!("relay events TTL worker started"); 23 + loop { 24 + std::thread::sleep(Duration::from_secs(60)); 25 + if let Err(e) = relay_events_ttl_tick(&state.db, &state.ephemeral_ttl) { 26 + error!(err = %e, "relay events TTL tick failed"); 27 + } 28 + } 29 + } 30 + 31 + #[cfg(feature = "indexer")] 18 32 pub fn ephemeral_ttl_tick(db: &Db, ttl: &Duration) -> miette::Result<()> { 33 + let current_seq = db.next_event_id.load(Ordering::SeqCst); 34 + ttl_tick_inner( 35 + db, 36 + ttl, 37 + keys::EVENT_WATERMARK_PREFIX, 38 + keys::event_watermark_key, 39 + &db.events, 40 + current_seq, 41 + ) 42 + } 43 + 44 + #[cfg(feature = "relay")] 45 + pub fn relay_events_ttl_tick(db: &Db, ttl: &Duration) -> miette::Result<()> { 46 + let current_seq = db.next_relay_seq.load(Ordering::SeqCst); 47 + ttl_tick_inner( 48 + db, 49 + ttl, 50 + keys::RELAY_EVENT_WATERMARK_PREFIX, 51 + keys::relay_event_watermark_key, 52 + &db.relay_events, 53 + current_seq, 54 + ) 55 + } 56 + 57 + fn ttl_tick_inner( 58 + db: &Db, 59 + ttl: &Duration, 60 + watermark_prefix: &'static [u8], 61 + watermark_key: fn(u64) -> Vec<u8>, 62 + events_ks: &Keyspace, 63 + current_seq: u64, 64 + ) -> miette::Result<()> { 19 65 let now = chrono::Utc::now().timestamp() as u64; 20 66 let cutoff_ts = now.saturating_sub(ttl.as_secs()); 21 67 22 68 // write current watermark 23 - #[cfg(feature = "events")] 24 - let current_event_id = db.next_event_id.load(Ordering::SeqCst); 25 - #[cfg(not(feature = "events"))] 26 - let current_event_id = 0u64; 27 69 db.cursors 28 - .insert( 29 - keys::event_watermark_key(now), 30 - current_event_id.to_be_bytes(), 31 - ) 70 + .insert(watermark_key(now), current_seq.to_be_bytes()) 32 71 .into_diagnostic()?; 33 72 34 73 // find the watermark entry closest to and <= cutoff_ts 35 - let cutoff_key = keys::event_watermark_key(cutoff_ts); 36 - let cutoff_event_id = db 74 + let cutoff_key = watermark_key(cutoff_ts); 75 + let cutoff_seq = db 37 76 .cursors 38 77 .range(..=cutoff_key.as_slice()) 39 78 .next_back() 40 79 .map(|g| g.into_inner().into_diagnostic()) 41 80 .transpose()? 42 - .filter(|(k, _)| k.starts_with(keys::EVENT_WATERMARK_PREFIX)) 81 + .filter(|(k, _)| k.starts_with(watermark_prefix)) 43 82 .map(|(_, v)| { 44 83 v.as_ref() 45 84 .try_into() 46 85 .into_diagnostic() 47 - .wrap_err("expected cutoff event id to be u64") 86 + .wrap_err("expected cutoff seq to be u64") 48 87 }) 49 88 .transpose()? 50 89 .map(u64::from_be_bytes); 51 90 52 - let Some(cutoff_event_id) = cutoff_event_id else { 91 + let Some(cutoff_seq) = cutoff_seq else { 53 92 // no watermark old enough yet, nothing to prune 54 93 return Ok(()); 55 94 }; 56 95 57 - let cutoff_key_events = keys::event_key(cutoff_event_id); 96 + let cutoff_key_events = keys::event_key(cutoff_seq); 58 97 let mut batch = db.inner.batch(); 59 98 let mut pruned = 0usize; 60 99 61 - for guard in db.events.range(..cutoff_key_events) { 100 + for guard in events_ks.range(..cutoff_key_events) { 62 101 let k = guard.key().into_diagnostic()?; 63 - batch.remove(&db.events, k); 102 + batch.remove(events_ks, k); 64 103 pruned += 1; 65 104 } 66 105 67 106 // clean up consumed watermark entries (everything up to and including cutoff_ts) 68 107 for guard in db.cursors.range(..=cutoff_key) { 69 108 let k = guard.key().into_diagnostic()?; 70 - if k.starts_with(keys::EVENT_WATERMARK_PREFIX) { 109 + if k.starts_with(watermark_prefix) { 71 110 batch.remove(&db.cursors, k); 72 111 } 73 112 }
+23 -1
src/db/keys/mod.rs
··· 10 10 /// separator used for composite keys 11 11 pub const SEP: u8 = b'|'; 12 12 13 + #[cfg(feature = "indexer")] 13 14 pub const EVENT_WATERMARK_PREFIX: &[u8] = b"ewm|"; 15 + 16 + #[cfg(feature = "relay")] 17 + pub const RELAY_EVENT_WATERMARK_PREFIX: &[u8] = b"rwm|"; 14 18 15 19 /// THIS SHOULD ALWAYS BE STABLE. DO NOT CHANGE 16 20 pub const VERSIONING_KEY: &[u8] = b"db_version"; ··· 22 26 vec 23 27 } 24 28 29 + pub const REPO_METADATA_PREFIX: &[u8] = b"rm|"; 30 + 31 + pub fn repo_metadata_key<'a>(did: &'a Did) -> Vec<u8> { 32 + let mut vec = Vec::with_capacity(REPO_METADATA_PREFIX.len() + 32); 33 + vec.extend_from_slice(REPO_METADATA_PREFIX); 34 + TrimmedDid::from(did).write_to_vec(&mut vec); 35 + vec 36 + } 37 + 25 38 pub fn pending_key(id: u64) -> [u8; 8] { 26 39 id.to_be_bytes() 27 40 } 28 41 42 + #[cfg(feature = "indexer")] 29 43 pub fn event_watermark_key(timestamp_secs: u64) -> Vec<u8> { 30 44 let mut key = Vec::with_capacity(EVENT_WATERMARK_PREFIX.len() + 8); 31 45 key.extend_from_slice(EVENT_WATERMARK_PREFIX); 46 + key.extend_from_slice(&timestamp_secs.to_be_bytes()); 47 + key 48 + } 49 + 50 + #[cfg(feature = "relay")] 51 + pub fn relay_event_watermark_key(timestamp_secs: u64) -> Vec<u8> { 52 + let mut key = Vec::with_capacity(RELAY_EVENT_WATERMARK_PREFIX.len() + 8); 53 + key.extend_from_slice(RELAY_EVENT_WATERMARK_PREFIX); 32 54 key.extend_from_slice(&timestamp_secs.to_be_bytes()); 33 55 key 34 56 } ··· 209 231 key 210 232 } 211 233 212 - /// key format: {SEQ} (u64 big-endian), mirroring event_key 213 234 #[cfg(feature = "relay")] 235 + /// key format: {SEQ} (u64 big-endian), mirroring event_key 214 236 pub fn relay_event_key(seq: u64) -> [u8; 8] { 215 237 seq.to_be_bytes() 216 238 }
+2
src/db/migration/mod.rs
··· 7 7 mod v1; 8 8 mod v2; 9 9 mod v3; 10 + mod v4; 10 11 11 12 type MigrationFn = fn(&Db, &mut OwnedWriteBatch) -> Result<()>; 12 13 ··· 15 16 ("stable_firehose_cursors", v1::stable_firehose_cursors), 16 17 ("repo_state_root_commit", v2::repo_state_root_commit), 17 18 ("firehose_source_is_pds", v3::firehose_source_is_pds), 19 + ("repo_state_active", v4::repo_state_active), 18 20 ]; 19 21 20 22 fn read_version(db: &Db) -> Result<u64> {
+1
src/db/migration/v2.rs
··· 9 9 Db, 10 10 types::{DbTid, DidKey}, 11 11 }; 12 + use crate::types::Commit; 12 13 use crate::types::v2::*; 13 14 14 15 #[derive(Debug, Clone, Serialize, Deserialize)]
+89
src/db/migration/v4.rs
··· 1 + use fjall::OwnedWriteBatch; 2 + use miette::{Context, IntoDiagnostic, Result}; 3 + 4 + use crate::db::Db; 5 + use crate::types::v4; 6 + 7 + #[derive(serde::Deserialize)] 8 + #[serde(bound(deserialize = "'i: 'de"))] 9 + pub(crate) struct OldRepoState<'i> { 10 + pub status: crate::types::v2::RepoStatus, 11 + pub root: Option<crate::types::v2::Commit>, 12 + pub last_message_time: Option<i64>, 13 + pub last_updated_at: i64, 14 + pub tracked: bool, 15 + pub index_id: u64, 16 + #[serde(borrow)] 17 + pub signing_key: Option<crate::db::types::DidKey<'i>>, 18 + #[serde(borrow)] 19 + pub pds: Option<jacquard_common::CowStr<'i>>, 20 + #[serde(borrow)] 21 + pub handle: Option<jacquard_common::types::string::Handle<'i>>, 22 + } 23 + 24 + pub(super) fn repo_state_active(db: &Db, batch: &mut OwnedWriteBatch) -> Result<()> { 25 + for item in db.repos.iter() { 26 + let (k, v) = item.into_inner().into_diagnostic()?; 27 + let old: OldRepoState = rmp_serde::from_slice(&v) 28 + .into_diagnostic() 29 + .wrap_err("invalid repo state")?; 30 + 31 + // derive active from the old status: accounts in any inactive state had active=false; 32 + // everything else (synced, backfilling, error) was active from the upstream's perspective. 33 + let active = !matches!( 34 + old.status, 35 + crate::types::v2::RepoStatus::Deactivated 36 + | crate::types::v2::RepoStatus::Takendown 37 + | crate::types::v2::RepoStatus::Suspended 38 + ); 39 + 40 + let status = match old.status { 41 + crate::types::v2::RepoStatus::Backfilling => v4::RepoStatus::Desynchronized, 42 + crate::types::v2::RepoStatus::Synced => v4::RepoStatus::Synced, 43 + crate::types::v2::RepoStatus::Error(s) => match s.as_str() { 44 + "desynchronized" => v4::RepoStatus::Desynchronized, 45 + "throttled" => v4::RepoStatus::Throttled, 46 + _ => v4::RepoStatus::Error(s), 47 + }, 48 + crate::types::v2::RepoStatus::Deactivated => v4::RepoStatus::Deactivated, 49 + crate::types::v2::RepoStatus::Takendown => v4::RepoStatus::Takendown, 50 + crate::types::v2::RepoStatus::Suspended => v4::RepoStatus::Suspended, 51 + }; 52 + 53 + let new_state = v4::RepoState { 54 + active, 55 + status, 56 + root: old.root, 57 + last_message_time: old.last_message_time, 58 + last_updated_at: old.last_updated_at, 59 + signing_key: old.signing_key, 60 + pds: old.pds, 61 + handle: old.handle, 62 + }; 63 + 64 + let new_metadata = v4::RepoMetadata { 65 + tracked: old.tracked, 66 + index_id: old.index_id, 67 + }; 68 + 69 + batch.insert( 70 + &db.repos, 71 + k.clone(), 72 + rmp_serde::to_vec(&new_state) 73 + .into_diagnostic() 74 + .wrap_err("cant serialize new repo state")?, 75 + ); 76 + 77 + let did = crate::db::types::TrimmedDid::try_from(k.as_ref())?.to_did(); 78 + let metadata_key = crate::db::keys::repo_metadata_key(&did); 79 + batch.insert( 80 + &db.repo_metadata, 81 + metadata_key, 82 + rmp_serde::to_vec(&new_metadata) 83 + .into_diagnostic() 84 + .wrap_err("cant serialize new repo metadata")?, 85 + ); 86 + } 87 + 88 + Ok(()) 89 + }
+41 -36
src/db/mod.rs
··· 1 1 use crate::config::Compression; 2 2 use crate::db::compaction::DropPrefixFilterFactory; 3 - #[cfg(feature = "events")] 3 + #[cfg(feature = "indexer")] 4 4 use crate::types::BroadcastEvent; 5 5 #[cfg(feature = "relay")] 6 6 use crate::types::RelayBroadcast; 7 - use crate::types::RepoState; 7 + use crate::types::{RepoMetadata, RepoState}; 8 8 9 9 use fjall::config::{BlockSizePolicy, CompressionPolicy, RestartIntervalPolicy}; 10 10 use fjall::{ ··· 49 49 pub pending: Keyspace, 50 50 pub resync: Keyspace, 51 51 pub resync_buffer: Keyspace, 52 + pub repo_metadata: Keyspace, 52 53 pub events: Keyspace, 53 54 pub counts: Keyspace, 54 55 pub filter: Keyspace, 55 56 pub crawler: Keyspace, 56 57 #[cfg(feature = "backlinks")] 57 58 pub backlinks: Keyspace, 58 - #[cfg(feature = "events")] 59 + #[cfg(feature = "indexer")] 59 60 pub(crate) event_tx: broadcast::Sender<BroadcastEvent>, 60 - #[cfg(feature = "events")] 61 + #[cfg(feature = "indexer")] 61 62 pub next_event_id: Arc<AtomicU64>, 62 63 #[cfg(feature = "relay")] 63 64 pub(crate) relay_events: Keyspace, ··· 297 298 .data_block_compression_policy(CompressionPolicy::disabled()) 298 299 .data_block_restart_interval_policy(RestartIntervalPolicy::all(16)), 299 300 )?; 301 + let repo_metadata = open_ks( 302 + "repo_metadata", 303 + opts() 304 + // point reads for tracking check 305 + .expect_point_read_hits(true) 306 + .max_memtable_size(mb(8)) 307 + .data_block_size_policy(BlockSizePolicy::all(kb(4))) 308 + .data_block_compression_policy(CompressionPolicy::disabled()) 309 + .data_block_restart_interval_policy(RestartIntervalPolicy::all(4)), 310 + )?; 300 311 let events = open_ks( 301 312 "events", 302 313 opts() ··· 404 415 // when adding new keyspaces, make sure to add them to the /stats endpoint 405 416 // and also update any relevant /debug/* endpoints 406 417 407 - #[cfg(feature = "events")] 418 + #[cfg(feature = "indexer")] 408 419 let (event_tx, _) = broadcast::channel(10000); 420 + 421 + #[cfg(feature = "relay")] 422 + let (relay_broadcast_tx, _) = broadcast::channel(10000); 409 423 410 424 let this = Self { 411 425 inner: db, ··· 417 431 pending, 418 432 resync, 419 433 resync_buffer, 434 + repo_metadata, 420 435 events, 421 436 counts, 422 437 filter, 423 438 crawler, 424 439 #[cfg(feature = "backlinks")] 425 440 backlinks, 426 - #[cfg(feature = "events")] 441 + #[cfg(feature = "indexer")] 427 442 event_tx, 428 443 counts_map: HashMap::new(), 429 - #[cfg(feature = "events")] 444 + #[cfg(feature = "indexer")] 430 445 next_event_id: Arc::new(AtomicU64::new(0)), 431 446 #[cfg(feature = "relay")] 432 447 relay_events, 433 448 #[cfg(feature = "relay")] 434 449 next_relay_seq: Arc::new(AtomicU64::new(0)), 435 450 #[cfg(feature = "relay")] 436 - relay_broadcast_tx: { 437 - let (tx, _) = broadcast::channel(10000); 438 - tx 439 - }, 451 + relay_broadcast_tx, 440 452 }; 441 453 442 454 migration::run(&this)?; ··· 457 469 .store(last_relay_seq + 1, std::sync::atomic::Ordering::Relaxed); 458 470 } 459 471 460 - #[cfg(feature = "events")] 472 + #[cfg(feature = "indexer")] 461 473 { 462 474 let mut last_id = 0; 463 475 if let Some(guard) = this.events.iter().next_back() { ··· 572 584 } 573 585 574 586 pub fn persist(&self) -> Result<()> { 575 - #[cfg(not(feature = "sync_all"))] 587 + #[cfg(not(feature = "__persist_sync_all"))] 576 588 const MODE: PersistMode = PersistMode::Buffer; 577 - #[cfg(feature = "sync_all")] 589 + #[cfg(feature = "__persist_sync_all")] 578 590 const MODE: PersistMode = PersistMode::SyncAll; 579 591 self.inner.persist(MODE).into_diagnostic()?; 580 592 Ok(()) ··· 594 606 compact(self.pending.clone()), 595 607 compact(self.resync.clone()), 596 608 compact(self.resync_buffer.clone()), 609 + compact(self.repo_metadata.clone()), 597 610 compact(self.events.clone()), 598 611 compact(self.counts.clone()), 599 612 compact(self.filter.clone()), 600 613 compact(self.crawler.clone()), 601 614 )?; 615 + #[cfg(feature = "relay")] 616 + compact(self.relay_events.clone()).await?; 602 617 #[cfg(feature = "backlinks")] 603 618 compact(self.backlinks.clone()).await?; 604 - #[cfg(feature = "relay")] 605 - compact(self.relay_events.clone()).await?; 606 619 Ok(()) 607 620 } 608 621 ··· 762 775 ) -> crate::types::GaugeState { 763 776 match repo_state.status { 764 777 crate::types::RepoStatus::Synced => crate::types::GaugeState::Synced, 765 - crate::types::RepoStatus::Backfilling => crate::types::GaugeState::Pending, 766 778 crate::types::RepoStatus::Error(_) 767 779 | crate::types::RepoStatus::Deactivated 768 780 | crate::types::RepoStatus::Takendown 769 - | crate::types::RepoStatus::Suspended => { 781 + | crate::types::RepoStatus::Suspended 782 + | crate::types::RepoStatus::Deleted 783 + | crate::types::RepoStatus::Desynchronized 784 + | crate::types::RepoStatus::Throttled => { 770 785 if let Some(resync_bytes) = resync_bytes { 771 786 if let Ok(crate::types::ResyncState::Error { kind, .. }) = 772 787 rmp_serde::from_slice::<crate::types::ResyncState>(resync_bytes) ··· 781 796 } 782 797 } 783 798 } 784 - 785 - pub(crate) async fn repo_gauge_state_async( 786 - &self, 787 - repo_state: &RepoState<'_>, 788 - did_key: &[u8], 789 - ) -> crate::types::GaugeState { 790 - let repo_state = repo_state.clone().into_static(); 791 - let did_key = did_key.to_vec(); 792 - 793 - let db_resync = self.resync.clone(); 794 - 795 - tokio::task::spawn_blocking(move || { 796 - let resync_bytes_opt = db_resync.get(&did_key).ok().flatten(); 797 - Self::repo_gauge_state(&repo_state, resync_bytes_opt.as_deref()) 798 - }) 799 - .await 800 - .unwrap_or(crate::types::GaugeState::Resync(None)) 801 - } 802 799 } 803 800 804 801 pub fn set_firehose_cursor(db: &Db, relay: &Url, cursor: i64) -> Result<()> { ··· 823 820 )) 824 821 }) 825 822 .transpose() 823 + } 824 + 825 + pub fn ser_repo_metadata(state: &RepoMetadata) -> Result<Vec<u8>> { 826 + rmp_serde::to_vec(&state).into_diagnostic() 827 + } 828 + 829 + pub fn deser_repo_metadata(bytes: &[u8]) -> Result<RepoMetadata> { 830 + rmp_serde::from_slice(bytes).into_diagnostic() 826 831 } 827 832 828 833 pub fn ser_repo_state(state: &RepoState) -> Result<Vec<u8>> {
+5 -6
src/filter.rs
··· 57 57 } 58 58 } 59 59 60 - fn nsid_matches(pattern: &str, collection: &str) -> bool { 61 - if let Some(prefix) = pattern.strip_suffix(".*") { 62 - collection == prefix || collection.starts_with(prefix) 63 - } else { 64 - collection == pattern 65 - } 60 + fn nsid_matches(pattern: &str, col: &str) -> bool { 61 + pattern 62 + .strip_suffix(".*") 63 + .map(|prefix| col == prefix || col.starts_with(prefix)) 64 + .unwrap_or_else(|| col == pattern) 66 65 }
+18 -10
src/ingest/firehose.rs
··· 1 - use crate::db::deser_repo_state; 2 1 use crate::filter::{FilterHandle, FilterMode}; 3 2 use crate::ingest::stream::{FirehoseError, FirehoseStream, SubscribeReposMessage, decode_frame}; 4 3 use crate::ingest::{BufferTx, IngestMessage}; ··· 147 146 } 148 147 trace!(did = %did, "forwarding message to ingest buffer"); 149 148 150 - if let Err(e) = self.buffer_tx.send(IngestMessage::Firehose { 151 - relay: self.relay_host.clone(), 152 - is_pds: self.is_pds, 153 - msg: msg.into_static(), 154 - }) { 149 + if let Err(e) = self 150 + .buffer_tx 151 + .send(IngestMessage::Firehose { 152 + url: self.relay_host.clone(), 153 + is_pds: self.is_pds, 154 + msg: msg.into_static(), 155 + }) 156 + .await 157 + { 155 158 error!(err = %e, "failed to send message to buffer processor"); 156 159 } 157 160 } ··· 174 177 match filter.mode { 175 178 FilterMode::Full => Ok(true), 176 179 FilterMode::Filter => { 177 - let repo_key = crate::db::keys::repo_key(&did); 178 - if let Some(bytes) = state.db.repos.get(&repo_key).into_diagnostic()? { 179 - let repo_state = deser_repo_state(&bytes)?; 180 + let metadata_key = crate::db::keys::repo_metadata_key(&did); 181 + if let Some(bytes) = state 182 + .db 183 + .repo_metadata 184 + .get(&metadata_key) 185 + .into_diagnostic()? 186 + { 187 + let metadata = crate::db::deser_repo_metadata(bytes.as_ref())?; 180 188 181 - if repo_state.tracked { 189 + if metadata.tracked { 182 190 trace!(did = %did, "tracked repo, processing"); 183 191 return Ok(true); 184 192 } else {
+653
src/ingest/indexer.rs
··· 1 + use super::*; 2 + use crate::db::{self, keys, ser_repo_metadata}; 3 + use crate::ingest::stream::{Account, Commit, Identity}; 4 + use crate::ingest::validation; 5 + use crate::ops; 6 + use crate::resolver::{NoSigningKeyError, ResolverError}; 7 + use crate::state::AppState; 8 + use crate::types::{ 9 + AccountEvt, BroadcastEvent, GaugeState, IdentityEvt, RepoMetadata, RepoState, RepoStatus, 10 + }; 11 + 12 + use fjall::OwnedWriteBatch; 13 + 14 + use jacquard_common::IntoStatic; 15 + use jacquard_common::cowstr::ToCowStr; 16 + use jacquard_common::types::did::Did; 17 + use jacquard_repo::error::CommitError; 18 + use miette::{Diagnostic, IntoDiagnostic, Result}; 19 + use std::collections::hash_map::DefaultHasher; 20 + use std::hash::{Hash, Hasher}; 21 + use std::sync::Arc; 22 + use std::sync::atomic::Ordering::SeqCst; 23 + use thiserror::Error; 24 + use tokio::runtime::Handle as TokioHandle; 25 + use tokio::sync::mpsc; 26 + use tracing::{debug, error, info, warn}; 27 + 28 + #[derive(Debug)] 29 + pub struct IndexerCommitData { 30 + pub commit: stream::Commit<'static>, 31 + /// true if the relay detected a gap (missing seq) before this commit 32 + /// and the indexer should trigger a backfill. 33 + pub chain_break: bool, 34 + /// result of parse_car_bytes, already done by relay so indexer does not re-parse. 35 + pub parsed_blocks: jacquard_repo::car::reader::ParsedCar, 36 + } 37 + 38 + #[derive(Debug)] 39 + pub struct IndexerIdentityData { 40 + pub identity: stream::Identity<'static>, 41 + /// whether the identity actually changed (handle or key). 42 + pub changed: bool, 43 + } 44 + 45 + #[derive(Debug)] 46 + pub struct IndexerAccountData { 47 + pub account: stream::Account<'static>, 48 + /// whether the repo was active prior to this event. 49 + pub was_active: bool, 50 + /// whether any state actually changed (active or status). 51 + pub changed: bool, 52 + } 53 + 54 + #[derive(Debug)] 55 + pub enum IndexerEventData { 56 + Commit(IndexerCommitData), 57 + Identity(IndexerIdentityData), 58 + Account(IndexerAccountData), 59 + Sync(Did<'static>), 60 + } 61 + 62 + #[derive(Debug)] 63 + pub struct IndexerEvent { 64 + pub seq: i64, 65 + pub firehose: Url, 66 + pub data: IndexerEventData, 67 + } 68 + 69 + /// message sent from `relay_worker` to the indexer (`FirehoseWorker`) after 70 + /// validation and repo-state management are done. 71 + #[derive(Debug)] 72 + pub enum IndexerMessage { 73 + /// a firehose event that passed relay-side validation. 74 + Event(Box<IndexerEvent>), 75 + /// a new repo was discovered and needs backfill. 76 + NewRepo(Did<'static>), 77 + /// backfill for this DID has completed; drain the resync buffer. 78 + BackfillFinished(Did<'static>), 79 + } 80 + 81 + pub type IndexerTx = mpsc::Sender<IndexerMessage>; 82 + pub type IndexerRx = mpsc::Receiver<IndexerMessage>; 83 + 84 + #[derive(Debug, Diagnostic, Error)] 85 + enum IngestError { 86 + #[error("{0}")] 87 + Generic(miette::Report), 88 + 89 + #[error(transparent)] 90 + #[diagnostic(transparent)] 91 + Resolver(#[from] ResolverError), 92 + 93 + #[error(transparent)] 94 + #[diagnostic(transparent)] 95 + Commit(#[from] CommitError), 96 + 97 + #[error(transparent)] 98 + #[diagnostic(transparent)] 99 + NoSigningKey(#[from] NoSigningKeyError), 100 + } 101 + 102 + impl From<miette::Report> for IngestError { 103 + fn from(report: miette::Report) -> Self { 104 + IngestError::Generic(report) 105 + } 106 + } 107 + 108 + #[derive(Debug)] 109 + enum RepoProcessResult<'s, 'c> { 110 + // message processed successfully, here is the (possibly updated) state 111 + Ok(RepoState<'s>), 112 + // repo was deleted as part of processing 113 + Deleted, 114 + // needs backfill; carries the triggering commit to buffer (None when already in the buffer) 115 + NeedsBackfill(Option<&'c Commit<'c>>), 116 + } 117 + 118 + pub struct FirehoseWorker { 119 + state: Arc<AppState>, 120 + rx: IndexerRx, 121 + ephemeral: bool, 122 + num_shards: usize, 123 + } 124 + 125 + struct WorkerContext<'a> { 126 + ephemeral: bool, 127 + state: &'a AppState, 128 + batch: OwnedWriteBatch, 129 + added_blocks: &'a mut i64, 130 + records_delta: &'a mut i64, 131 + broadcast_events: &'a mut Vec<BroadcastEvent>, 132 + } 133 + 134 + impl FirehoseWorker { 135 + pub fn new(state: Arc<AppState>, rx: IndexerRx, ephemeral: bool, num_shards: usize) -> Self { 136 + Self { 137 + state, 138 + rx, 139 + ephemeral, 140 + num_shards, 141 + } 142 + } 143 + 144 + // starts the worker threads and the main dispatch loop 145 + // the dispatch loop reads from the firehose channel and 146 + // distributes messages to shards based on the hash of the DID 147 + pub fn run(mut self, handle: TokioHandle) -> Result<()> { 148 + let mut shards = Vec::with_capacity(self.num_shards); 149 + 150 + for i in 0..self.num_shards { 151 + // unbounded here so we dont block other shards potentially 152 + // if one has a small lag or something 153 + let (tx, rx) = mpsc::unbounded_channel(); 154 + shards.push(tx); 155 + 156 + let state = self.state.clone(); 157 + let ephemeral = self.ephemeral; 158 + let handle = handle.clone(); 159 + std::thread::Builder::new() 160 + .name(format!("ingest-shard-{i}")) 161 + .spawn(move || { 162 + Self::shard(i, rx, state, ephemeral, handle); 163 + }) 164 + .into_diagnostic()?; 165 + } 166 + 167 + info!(num = self.num_shards, "started shards"); 168 + 169 + while let Some(msg) = self.rx.blocking_recv() { 170 + let did = match &msg { 171 + IndexerMessage::Event(e) => match &e.data { 172 + IndexerEventData::Commit(m) => &m.commit.repo, 173 + IndexerEventData::Identity(m) => &m.identity.did, 174 + IndexerEventData::Account(m) => &m.account.did, 175 + IndexerEventData::Sync(did) => did, 176 + }, 177 + IndexerMessage::NewRepo(did) => did, 178 + IndexerMessage::BackfillFinished(did) => did, 179 + }; 180 + 181 + let mut hasher = DefaultHasher::new(); 182 + did.hash(&mut hasher); 183 + let hash = hasher.finish(); 184 + let shard_idx = (hash as usize) % self.num_shards; 185 + 186 + if let Err(e) = shards[shard_idx].send(msg) { 187 + error!(shard = shard_idx, err = %e, "failed to send message to shard, shard panicked?"); 188 + break; 189 + } 190 + } 191 + 192 + Err(miette::miette!( 193 + "firehose worker dispatcher shutting down, shard died?" 194 + )) 195 + } 196 + 197 + #[inline(always)] 198 + fn shard( 199 + id: usize, 200 + mut rx: mpsc::UnboundedReceiver<IndexerMessage>, 201 + state: Arc<AppState>, 202 + ephemeral: bool, 203 + handle: TokioHandle, 204 + ) { 205 + let _guard = handle.enter(); 206 + debug!(shard = id, "shard started"); 207 + 208 + let mut broadcast_events = Vec::new(); 209 + 210 + while let Some(msg) = rx.blocking_recv() { 211 + let batch = state.db.inner.batch(); 212 + broadcast_events.clear(); 213 + 214 + let mut added_blocks = 0; 215 + let mut records_delta = 0; 216 + 217 + let mut ctx = WorkerContext { 218 + state: &state, 219 + batch, 220 + added_blocks: &mut added_blocks, 221 + records_delta: &mut records_delta, 222 + broadcast_events: &mut broadcast_events, 223 + ephemeral, 224 + }; 225 + 226 + match msg { 227 + IndexerMessage::BackfillFinished(did) => { 228 + let _span = tracing::info_span!("ingest", did = %did).entered(); 229 + debug!("backfill finished, verifying state and draining buffer"); 230 + 231 + let repo_key = keys::repo_key(&did); 232 + if let Ok(Some(state_bytes)) = state.db.repos.get(&repo_key).into_diagnostic() { 233 + match crate::db::deser_repo_state(&state_bytes) { 234 + Ok(repo_state) => { 235 + let repo_state = repo_state.into_static(); 236 + 237 + match Self::drain_resync_buffer(&mut ctx, &did, repo_state) { 238 + Ok(RepoProcessResult::Ok(s)) => { 239 + let res = ops::transition_repo( 240 + &mut ctx.batch, 241 + &state.db, 242 + &did, 243 + s, 244 + RepoStatus::Synced, 245 + ); 246 + if let Err(e) = res { 247 + error!(err = %e, "failed to transition to synced"); 248 + } 249 + } 250 + Ok(RepoProcessResult::NeedsBackfill(_)) => {} 251 + Ok(RepoProcessResult::Deleted) => {} 252 + Err(e) => { 253 + error!(err = %e, "failed to drain resync buffer") 254 + } 255 + }; 256 + } 257 + Err(e) => error!(err = %e, "failed to deser repo state"), 258 + } 259 + } 260 + } 261 + IndexerMessage::NewRepo(did) => { 262 + let _span = tracing::info_span!("ingest", did = %did).entered(); 263 + debug!("new repo discovered, triggering backfill"); 264 + 265 + let repo_key = keys::repo_key(&did); 266 + if let Ok(Some(state_bytes)) = state.db.repos.get(&repo_key).into_diagnostic() { 267 + match crate::db::deser_repo_state(&state_bytes) { 268 + Ok(repo_state) => { 269 + if let Err(e) = Self::trigger_backfill(&mut ctx, &did, repo_state) { 270 + error!(err = %e, "failed to trigger backfill for new repo"); 271 + } 272 + } 273 + Err(e) => error!(err = %e, "failed to deser repo state"), 274 + } 275 + } 276 + } 277 + IndexerMessage::Event(e) => { 278 + let IndexerEvent { 279 + seq, 280 + firehose, 281 + data, 282 + } = *e; 283 + let _span = tracing::info_span!("ingest", hose = %firehose, did = tracing::field::Empty).entered(); 284 + 285 + let repo_bytes = { 286 + let did = match &data { 287 + IndexerEventData::Commit(m) => &m.commit.repo, 288 + IndexerEventData::Identity(m) => &m.identity.did, 289 + IndexerEventData::Account(m) => &m.account.did, 290 + IndexerEventData::Sync(did) => did, 291 + }; 292 + _span.record("did", did.as_ref()); 293 + 294 + let repo_key = keys::repo_key(did); 295 + match state.db.repos.get(&repo_key).into_diagnostic() { 296 + Ok(Some(b)) => b, 297 + Ok(None) => { 298 + state 299 + .firehose_cursors 300 + .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 301 + continue; 302 + } 303 + Err(e) => { 304 + error!(err = %e, "failed to get repo state"); 305 + state 306 + .firehose_cursors 307 + .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 308 + continue; 309 + } 310 + } 311 + }; 312 + let repo_state = match crate::db::deser_repo_state(&repo_bytes) { 313 + Ok(s) => s, 314 + Err(e) => { 315 + error!(err = %e, "failed to deser repo state"); 316 + state 317 + .firehose_cursors 318 + .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 319 + continue; 320 + } 321 + }; 322 + 323 + match data { 324 + IndexerEventData::Commit(msg) => { 325 + let IndexerCommitData { 326 + commit, 327 + chain_break, 328 + parsed_blocks, 329 + } = msg; 330 + 331 + let try_persist = |commit: &Commit| { 332 + if let Err(e) = 333 + ops::persist_to_resync_buffer(&state.db, &commit.repo, commit) 334 + { 335 + error!(err = %e, "failed to persist commit to resync_buffer"); 336 + } 337 + }; 338 + 339 + match Self::handle_commit( 340 + &mut ctx, 341 + repo_state, 342 + &commit, 343 + chain_break, 344 + parsed_blocks, 345 + ) { 346 + Ok(RepoProcessResult::Ok(_)) => {} 347 + Ok(RepoProcessResult::Deleted) => { 348 + state.db.update_count("repos", -1); 349 + } 350 + Ok(RepoProcessResult::NeedsBackfill(Some(commit))) => { 351 + try_persist(commit); 352 + } 353 + Ok(RepoProcessResult::NeedsBackfill(None)) => {} 354 + Err(e) => { 355 + if let IngestError::Generic(ref r) = e { 356 + db::check_poisoned_report(r); 357 + } 358 + error!(err = %e, "error processing commit"); 359 + try_persist(&commit); 360 + } 361 + } 362 + } 363 + IndexerEventData::Identity(msg) => { 364 + let IndexerIdentityData { identity, changed } = msg; 365 + 366 + if let Err(e) = 367 + Self::handle_identity(&mut ctx, repo_state, &identity, changed) 368 + { 369 + error!(err = %e, "error processing identity"); 370 + } 371 + } 372 + IndexerEventData::Account(msg) => { 373 + let IndexerAccountData { 374 + account, 375 + was_active, 376 + changed, 377 + } = msg; 378 + 379 + if let Err(e) = Self::handle_account( 380 + &mut ctx, repo_state, changed, &account, was_active, 381 + ) { 382 + error!(err = %e, "error processing account"); 383 + } 384 + } 385 + IndexerEventData::Sync(did) => { 386 + warn!("sync event, triggering backfill"); 387 + if let Err(e) = Self::trigger_backfill(&mut ctx, &did, repo_state) { 388 + error!(err = %e, "failed to trigger backfill on sync"); 389 + } 390 + } 391 + } 392 + state 393 + .firehose_cursors 394 + .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 395 + } 396 + } 397 + 398 + if let Err(e) = ctx.batch.commit() { 399 + error!(shard = id, err = %e, "failed to commit batch"); 400 + } 401 + 402 + if added_blocks > 0 { 403 + state.db.update_count("blocks", added_blocks); 404 + } 405 + if records_delta != 0 { 406 + state.db.update_count("records", records_delta); 407 + } 408 + for evt in broadcast_events.drain(..) { 409 + let _ = state.db.event_tx.send(evt); 410 + } 411 + 412 + // state.db.inner.persist(fjall::PersistMode::Buffer).ok(); 413 + } 414 + } 415 + 416 + // don't retry commit or sync on key fetch errors 417 + // since we'll just try again later if we get commit or sync again 418 + fn check_if_retriable_failure(e: &IngestError) -> bool { 419 + matches!( 420 + e, 421 + IngestError::Generic(_) 422 + | IngestError::Resolver(ResolverError::Ratelimited) 423 + | IngestError::Resolver(ResolverError::Transport(_)) 424 + ) 425 + } 426 + } 427 + 428 + impl FirehoseWorker { 429 + fn handle_commit<'s, 'c>( 430 + ctx: &mut WorkerContext, 431 + mut repo_state: RepoState<'s>, 432 + commit: &'c Commit<'c>, 433 + chain_break: bool, 434 + parsed_blocks: jacquard_repo::car::reader::ParsedCar, 435 + ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 436 + let db = &ctx.state.db; 437 + let did = &commit.repo; 438 + repo_state.advance_message_time(commit.time.0.timestamp_millis()); 439 + 440 + let metadata_key = keys::repo_metadata_key(did); 441 + let metadata_bytes = db.repo_metadata.get(&metadata_key).into_diagnostic()?; 442 + let is_backfilling = if let Some(metadata_bytes) = metadata_bytes { 443 + let metadata = crate::db::deser_repo_metadata(metadata_bytes.as_ref())?; 444 + db.pending 445 + .get(keys::pending_key(metadata.index_id)) 446 + .into_diagnostic()? 447 + .is_some() 448 + } else { 449 + false 450 + }; 451 + 452 + if chain_break { 453 + warn!("chain break detected, triggering backfill"); 454 + Self::trigger_backfill(ctx, did, repo_state)?; 455 + return Ok(RepoProcessResult::NeedsBackfill(Some(commit))); 456 + } 457 + 458 + if is_backfilling { 459 + return Ok(RepoProcessResult::NeedsBackfill(Some(commit))); 460 + } 461 + 462 + let root_bytes = parsed_blocks 463 + .blocks 464 + .get(&parsed_blocks.root) 465 + .ok_or_else(|| IngestError::Generic(miette::miette!("root block missing from CAR")))?; 466 + 467 + let commit_obj = jacquard_repo::commit::Commit::from_cbor(root_bytes) 468 + .map_err(|e| IngestError::Generic(miette::miette!("invalid commit object: {e}")))? 469 + .into_static(); 470 + 471 + let validated = validation::ValidatedCommit { 472 + commit, 473 + parsed_blocks, 474 + commit_obj, 475 + chain_break: validation::ChainBreak::default(), // not used by apply_commit 476 + }; 477 + 478 + let res = ops::apply_commit( 479 + &mut ctx.batch, 480 + db, 481 + repo_state, 482 + validated, 483 + &ctx.state.filter.load(), 484 + ctx.ephemeral, 485 + )?; 486 + let repo_state = res.repo_state; 487 + *ctx.added_blocks += res.blocks_count; 488 + *ctx.records_delta += res.records_delta; 489 + ctx.broadcast_events 490 + .push(BroadcastEvent::Persisted(db.next_event_id.load(SeqCst) - 1)); 491 + 492 + Ok(RepoProcessResult::Ok(repo_state)) 493 + } 494 + 495 + fn handle_identity<'s>( 496 + ctx: &mut WorkerContext, 497 + repo_state: RepoState<'s>, 498 + identity: &Identity<'_>, 499 + changed: bool, 500 + ) -> Result<RepoProcessResult<'s, 'static>, IngestError> { 501 + let db = &ctx.state.db; 502 + let did = &identity.did; 503 + if changed { 504 + let evt = IdentityEvt { 505 + did: did.clone().into_static(), 506 + handle: repo_state.handle.clone().map(IntoStatic::into_static), 507 + }; 508 + ctx.broadcast_events.push(ops::make_identity_event(db, evt)); 509 + } 510 + 511 + Ok(RepoProcessResult::Ok(repo_state)) 512 + } 513 + 514 + fn handle_account<'s, 'c>( 515 + ctx: &mut WorkerContext, 516 + repo_state: RepoState<'s>, 517 + changed: bool, 518 + account: &'c Account<'c>, 519 + was_active: bool, 520 + ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 521 + let db = &ctx.state.db; 522 + let did = &account.did; 523 + let is_inactive = !account.active; 524 + let evt = AccountEvt { 525 + did: did.clone().into_static(), 526 + active: account.active, 527 + status: account.status.as_ref().map(|s| s.to_cowstr().into_static()), 528 + }; 529 + 530 + if is_inactive { 531 + use crate::ingest::stream::AccountStatus; 532 + match &account.status { 533 + Some(AccountStatus::Deleted) => { 534 + debug!("account deleted, wiping data"); 535 + crate::ops::delete_repo(&mut ctx.batch, db, did, &repo_state)?; 536 + return Ok(RepoProcessResult::Deleted); 537 + } 538 + _ => { 539 + // status update logic is now handled in RelayWorker; 540 + // FirehoseWorker just needs to update gauges if status changed. 541 + if changed && was_active { 542 + db.update_gauge_diff(&GaugeState::Synced, &GaugeState::Resync(None)); 543 + } 544 + } 545 + } 546 + } else { 547 + // if account became active, update gauges 548 + if !was_active { 549 + db.update_gauge_diff(&GaugeState::Resync(None), &GaugeState::Synced); 550 + } 551 + } 552 + 553 + if changed { 554 + ctx.broadcast_events.push(ops::make_account_event(db, evt)); 555 + } 556 + 557 + Ok(RepoProcessResult::Ok(repo_state)) 558 + } 559 + 560 + fn drain_resync_buffer<'s>( 561 + ctx: &mut WorkerContext, 562 + did: &Did, 563 + mut repo_state: RepoState<'s>, 564 + ) -> Result<RepoProcessResult<'s, 'static>, IngestError> { 565 + let db = &ctx.state.db; 566 + let prefix = keys::resync_buffer_prefix(did); 567 + 568 + for guard in db.resync_buffer.prefix(&prefix) { 569 + let (key, value) = guard.into_inner().into_diagnostic()?; 570 + let commit: Commit = rmp_serde::from_slice(&value).into_diagnostic()?; 571 + 572 + let parsed_blocks = TokioHandle::current() 573 + .block_on(jacquard_repo::car::reader::parse_car_bytes( 574 + commit.blocks.as_ref(), 575 + )) 576 + .map_err(|e| IngestError::Generic(miette::miette!("malformed CAR: {e}")))?; 577 + 578 + // buffered commits have already been source-checked on arrival; skip host check 579 + let res = Self::handle_commit(ctx, repo_state, &commit, false, parsed_blocks); 580 + let res = match res { 581 + Ok(r) => r, 582 + Err(e) => { 583 + if !Self::check_if_retriable_failure(&e) { 584 + ctx.batch.remove(&db.resync_buffer, key); 585 + } 586 + return Err(e); 587 + } 588 + }; 589 + match res { 590 + RepoProcessResult::Ok(rs) => { 591 + ctx.batch.remove(&db.resync_buffer, key); 592 + repo_state = rs; 593 + } 594 + RepoProcessResult::NeedsBackfill(_) => { 595 + // commit is already in the buffer, leave it there for the next backfill 596 + return Ok(RepoProcessResult::NeedsBackfill(None)); 597 + } 598 + RepoProcessResult::Deleted => { 599 + ctx.batch.remove(&db.resync_buffer, key); 600 + return Ok(RepoProcessResult::Deleted); 601 + } 602 + } 603 + } 604 + 605 + Ok(RepoProcessResult::Ok(repo_state)) 606 + } 607 + 608 + fn trigger_backfill<'s>( 609 + ctx: &mut WorkerContext, 610 + did: &Did, 611 + repo_state: RepoState<'s>, 612 + ) -> Result<RepoState<'s>, IngestError> { 613 + let db = &ctx.state.db; 614 + let mut batch = db.inner.batch(); 615 + let repo_key = keys::repo_key(did); 616 + let meta_key = keys::repo_metadata_key(did); 617 + 618 + let resync_bytes = db.resync.get(&repo_key).into_diagnostic()?; 619 + let old_gauge = crate::db::Db::repo_gauge_state(&repo_state, resync_bytes.as_deref()); 620 + 621 + let existing_metadata = db 622 + .repo_metadata 623 + .get(&meta_key) 624 + .into_diagnostic()? 625 + .map(|b| crate::db::deser_repo_metadata(&b)) 626 + .transpose()?; 627 + let had_metadata = existing_metadata.is_some(); 628 + let mut metadata = existing_metadata.unwrap_or_else(|| RepoMetadata { 629 + index_id: 0, // this is set later 630 + tracked: true, 631 + }); 632 + 633 + let old_pkey = keys::pending_key(metadata.index_id); 634 + let was_pending = had_metadata && db.pending.get(&old_pkey).into_diagnostic()?.is_some(); 635 + // remove old pending entry and insert new one with fresh index_id 636 + if had_metadata { 637 + // only remove if we had one so we dont delete a random entry 638 + batch.remove(&db.pending, old_pkey); 639 + } 640 + 641 + metadata.index_id = rand::random::<u64>(); 642 + batch.insert(&db.pending, keys::pending_key(metadata.index_id), &repo_key); 643 + batch.insert(&db.repo_metadata, &meta_key, ser_repo_metadata(&metadata)?); 644 + batch.commit().into_diagnostic()?; 645 + 646 + if !was_pending { 647 + db.update_gauge_diff(&old_gauge, &crate::types::GaugeState::Pending); 648 + ctx.state.notify_backfill(); 649 + } 650 + 651 + Ok(repo_state) 652 + } 653 + }
+7 -118
src/ingest/mod.rs
··· 1 1 use tokio::sync::mpsc; 2 - use tracing::warn; 3 2 4 3 pub mod firehose; 5 - #[cfg(feature = "relay")] 6 - pub mod relay_worker; 4 + #[cfg(feature = "indexer")] 5 + pub mod indexer; 6 + pub mod relay; 7 7 pub mod stream; 8 8 pub mod validation; 9 - #[cfg(feature = "events")] 10 - pub mod worker; 11 - 12 - use jacquard_common::types::crypto::PublicKey; 13 - use jacquard_common::types::did::Did; 14 - use miette::Result; 15 - use smol_str::{SmolStr, ToSmolStr}; 16 9 use url::Url; 17 10 18 - use crate::ingest::stream::{AccountStatus, SubscribeReposMessage}; 19 - use crate::resolver::Resolver; 20 - use crate::types::{RepoState, RepoStatus}; 11 + use crate::ingest::stream::SubscribeReposMessage; 21 12 22 13 #[derive(Debug)] 23 14 pub enum IngestMessage { 24 15 Firehose { 25 - relay: Url, 16 + url: Url, 26 17 /// true when `relay` is a direct PDS connection (not an aggregating relay). 27 18 /// enables host authority enforcement in the worker. 28 19 is_pds: bool, 29 20 msg: SubscribeReposMessage<'static>, 30 21 }, 31 - BackfillFinished(Did<'static>), 32 22 } 33 23 34 - pub type BufferTx = mpsc::UnboundedSender<IngestMessage>; 35 - pub type BufferRx = mpsc::UnboundedReceiver<IngestMessage>; 36 - 37 - /// outcome of a host authority check. 38 - enum AuthorityOutcome { 39 - /// stored pds matched the source host immediately. 40 - Authorized, 41 - /// pds migrated: doc now points to this host, but our stored state was stale. 42 - WasStale, 43 - /// host did not match even after doc resolution. 44 - WrongHost { expected: SmolStr }, 45 - } 46 - 47 - fn pds_host(pds: Option<&str>) -> Option<SmolStr> { 48 - // todo: add faster host parsing since we only need that 49 - pds.and_then(|pds| Url::parse(pds).ok()).map(|u| { 50 - u.host_str() 51 - .map(SmolStr::new) 52 - .expect("that there is host in pds url") 53 - }) 54 - } 55 - 56 - /// invalidates the resolver cache for `did`, fetches a fresh document, and updates `repo_state`. 57 - /// 58 - /// panics if called outside a tokio runtime context. 59 - fn refresh_doc(resolver: &Resolver, did: &Did, repo_state: &mut RepoState) -> Result<()> { 60 - resolver.invalidate_sync(did); 61 - let doc = tokio::runtime::Handle::current() 62 - .block_on(resolver.resolve_doc(did)) 63 - .map_err(|e| miette::miette!("{e}"))?; 64 - repo_state.update_from_doc(doc); 65 - repo_state.touch(); 66 - Ok(()) 67 - } 68 - 69 - /// checks that `source_host` is the authoritative PDS for `did`. 70 - /// 71 - /// updates `repo_state` in place when a doc refresh is performed (i.e. on any outcome other than 72 - /// `Authorized`). callers that persist state (e.g. the indexer worker) should write `repo_state` 73 - /// to their batch after this call when the outcome is not `Authorized`. 74 - /// 75 - /// panics if called outside a tokio runtime context. 76 - fn check_host_authority( 77 - resolver: &Resolver, 78 - did: &Did, 79 - repo_state: &mut RepoState, 80 - source_host: &str, 81 - ) -> Result<AuthorityOutcome> { 82 - let expected = pds_host(repo_state.pds.as_deref()); 83 - if expected.as_deref() == Some(source_host) { 84 - return Ok(AuthorityOutcome::Authorized); 85 - } 86 - 87 - // try again once 88 - refresh_doc(resolver, did, repo_state)?; 89 - let Some(expected) = pds_host(repo_state.pds.as_deref()) else { 90 - miette::bail!("can't get pds host???"); 91 - }; 92 - if expected.as_str() == source_host { 93 - Ok(AuthorityOutcome::WasStale) 94 - } else { 95 - Ok(AuthorityOutcome::WrongHost { expected }) 96 - } 97 - } 98 - 99 - /// resolves the signing key for `did` if `verify_signatures` is true. 100 - /// 101 - /// panics if called outside a tokio runtime context. 102 - fn fetch_key( 103 - resolver: &Resolver, 104 - verify_signatures: bool, 105 - did: &Did, 106 - ) -> Result<Option<PublicKey<'static>>> { 107 - if verify_signatures { 108 - let key = tokio::runtime::Handle::current() 109 - .block_on(resolver.resolve_signing_key(did)) 110 - .map_err(|e| miette::miette!("{e}"))?; 111 - Ok(Some(key)) 112 - } else { 113 - Ok(None) 114 - } 115 - } 116 - 117 - /// maps an inactive account status to the corresponding `RepoStatus`. 118 - /// panics on `AccountStatus::Deleted`, caller must handle that 119 - fn inactive_account_repo_status(did: &Did, status: &Option<AccountStatus<'_>>) -> RepoStatus { 120 - match status { 121 - Some(AccountStatus::Takendown) => RepoStatus::Takendown, 122 - Some(AccountStatus::Suspended) => RepoStatus::Suspended, 123 - Some(AccountStatus::Deactivated) => RepoStatus::Deactivated, 124 - Some(AccountStatus::Throttled) => RepoStatus::Error("throttled".into()), 125 - Some(AccountStatus::Desynchronized) => RepoStatus::Error("desynchronized".into()), 126 - Some(AccountStatus::Other(s)) => { 127 - warn!(did = %did, status = %s, "unknown account status"); 128 - RepoStatus::Error(s.to_smolstr()) 129 - } 130 - Some(AccountStatus::Deleted) => unreachable!("deleted is handled before status mapping"), 131 - None => { 132 - warn!(did = %did, "account inactive but no status provided"); 133 - RepoStatus::Error("unknown".into()) 134 - } 135 - } 136 - } 24 + pub type BufferTx = mpsc::Sender<IngestMessage>; 25 + pub type BufferRx = mpsc::Receiver<IngestMessage>;
+872
src/ingest/relay.rs
··· 1 + use std::collections::hash_map::DefaultHasher; 2 + use std::hash::{Hash, Hasher}; 3 + use std::sync::Arc; 4 + #[cfg(feature = "relay")] 5 + use std::sync::atomic::Ordering; 6 + 7 + use fjall::OwnedWriteBatch; 8 + 9 + use jacquard_api::com_atproto::sync::get_repo_status::{ 10 + GetRepoStatus, GetRepoStatusError, GetRepoStatusOutputStatus, 11 + }; 12 + use jacquard_common::types::crypto::PublicKey; 13 + use jacquard_common::types::did::Did; 14 + use jacquard_common::xrpc::{XrpcError, XrpcExt}; 15 + use jacquard_common::{CowStr, IntoStatic}; 16 + use miette::{IntoDiagnostic, Result}; 17 + use tokio::runtime::Handle; 18 + use tokio::sync::mpsc; 19 + use tracing::{debug, error, info, info_span, trace, warn}; 20 + use url::Url; 21 + 22 + use crate::db::{self, keys}; 23 + use crate::ingest::stream::AccountStatus; 24 + #[cfg(feature = "relay")] 25 + use crate::ingest::stream::encode_frame; 26 + use crate::ingest::stream::{Account, Commit, Identity, InfoName, SubscribeReposMessage, Sync}; 27 + use crate::ingest::validation::{ 28 + CommitValidationError, SyncValidationError, ValidatedCommit, ValidatedSync, ValidationContext, 29 + ValidationOptions, 30 + }; 31 + use crate::ingest::{BufferRx, IngestMessage}; 32 + use crate::state::AppState; 33 + #[cfg(feature = "relay")] 34 + use crate::types::RelayBroadcast; 35 + use crate::types::{RepoState, RepoStatus}; 36 + use smol_str::{SmolStr, ToSmolStr}; 37 + 38 + struct WorkerContext<'a> { 39 + verify_signatures: bool, 40 + state: &'a AppState, 41 + vctx: ValidationContext<'a>, 42 + batch: OwnedWriteBatch, 43 + #[cfg(feature = "relay")] 44 + pending_broadcasts: Vec<RelayBroadcast>, 45 + #[cfg(feature = "indexer")] 46 + pending_hook_messages: Vec<crate::ingest::indexer::IndexerMessage>, 47 + #[cfg(feature = "indexer")] 48 + hook: crate::ingest::indexer::IndexerTx, 49 + http: reqwest::Client, 50 + } 51 + 52 + struct WorkerMessage { 53 + is_pds: bool, 54 + firehose: Url, 55 + msg: SubscribeReposMessage<'static>, 56 + } 57 + 58 + pub struct RelayWorker { 59 + state: Arc<AppState>, 60 + rx: BufferRx, 61 + #[cfg(feature = "indexer")] 62 + hook: crate::ingest::indexer::IndexerTx, 63 + verify_signatures: bool, 64 + num_shards: usize, 65 + validation_opts: Arc<ValidationOptions>, 66 + http: reqwest::Client, 67 + } 68 + 69 + impl RelayWorker { 70 + pub fn new( 71 + state: Arc<AppState>, 72 + rx: BufferRx, 73 + #[cfg(feature = "indexer")] hook: crate::ingest::indexer::IndexerTx, 74 + verify_signatures: bool, 75 + num_shards: usize, 76 + validation_opts: ValidationOptions, 77 + ) -> Self { 78 + Self { 79 + state, 80 + rx, 81 + #[cfg(feature = "indexer")] 82 + hook, 83 + verify_signatures, 84 + num_shards, 85 + validation_opts: Arc::new(validation_opts), 86 + http: reqwest::Client::new(), 87 + } 88 + } 89 + 90 + pub fn run(mut self, handle: Handle) -> Result<()> { 91 + let mut shards = Vec::with_capacity(self.num_shards); 92 + 93 + for i in 0..self.num_shards { 94 + let (tx, rx) = mpsc::unbounded_channel(); 95 + shards.push(tx); 96 + 97 + let state = self.state.clone(); 98 + #[cfg(feature = "indexer")] 99 + let hook = self.hook.clone(); 100 + let verify = self.verify_signatures; 101 + let h = handle.clone(); 102 + let opts = self.validation_opts.clone(); 103 + let http = self.http.clone(); 104 + 105 + std::thread::Builder::new() 106 + .name(format!("relay-shard-{i}")) 107 + .spawn(move || { 108 + Self::shard( 109 + i, 110 + rx, 111 + state, 112 + #[cfg(feature = "indexer")] 113 + hook, 114 + verify, 115 + h, 116 + opts, 117 + http, 118 + ); 119 + }) 120 + .into_diagnostic()?; 121 + } 122 + 123 + info!(num = self.num_shards, "relay worker: started shards"); 124 + 125 + let _g = handle.enter(); 126 + 127 + while let Some(msg) = self.rx.blocking_recv() { 128 + let IngestMessage::Firehose { url, is_pds, msg } = msg; 129 + 130 + // #info only pertains to us, the direct consumer 131 + if let SubscribeReposMessage::Info(inf) = msg { 132 + match inf.name { 133 + InfoName::OutdatedCursor => { 134 + // todo: handle 135 + } 136 + InfoName::Other(name) => { 137 + let message = inf 138 + .message 139 + .unwrap_or_else(|| CowStr::Borrowed("<no message>")); 140 + info!(name = %name, "relay sent info: {message}"); 141 + } 142 + } 143 + continue; 144 + } 145 + 146 + let shard_idx = { 147 + let did = match &msg { 148 + SubscribeReposMessage::Commit(c) => &c.repo, 149 + SubscribeReposMessage::Identity(i) => &i.did, 150 + SubscribeReposMessage::Account(a) => &a.did, 151 + SubscribeReposMessage::Sync(s) => &s.did, 152 + _ => continue, 153 + }; 154 + let mut hasher = DefaultHasher::new(); 155 + did.hash(&mut hasher); 156 + let idx = (hasher.finish() as usize) % self.num_shards; 157 + idx 158 + }; 159 + 160 + if let Err(e) = shards[shard_idx].send(WorkerMessage { 161 + firehose: url, 162 + is_pds, 163 + msg, 164 + }) { 165 + error!(shard = shard_idx, err = %e, "relay worker: failed to send to shard"); 166 + break; 167 + } 168 + } 169 + 170 + Err(miette::miette!("relay worker dispatcher shutting down")) 171 + } 172 + 173 + fn shard( 174 + id: usize, 175 + mut rx: mpsc::UnboundedReceiver<WorkerMessage>, 176 + state: Arc<AppState>, 177 + #[cfg(feature = "indexer")] hook: crate::ingest::indexer::IndexerTx, 178 + verify_signatures: bool, 179 + handle: Handle, 180 + validation_opts: Arc<ValidationOptions>, 181 + http: reqwest::Client, 182 + ) { 183 + let _guard = handle.enter(); 184 + let span = info_span!("worker_shard", shard = id); 185 + let _entered = span.clone().entered(); 186 + debug!("relay shard started"); 187 + 188 + let mut ctx = WorkerContext { 189 + verify_signatures, 190 + state: &state, 191 + vctx: ValidationContext { 192 + opts: &validation_opts, 193 + }, 194 + batch: state.db.inner.batch(), 195 + #[cfg(feature = "relay")] 196 + pending_broadcasts: Vec::with_capacity(2), 197 + #[cfg(feature = "indexer")] 198 + pending_hook_messages: Vec::with_capacity(2), 199 + #[cfg(feature = "indexer")] 200 + hook, 201 + http, 202 + }; 203 + 204 + while let Some(msg) = rx.blocking_recv() { 205 + let (did, seq) = match &msg.msg { 206 + SubscribeReposMessage::Commit(c) => (c.repo.clone(), c.seq), 207 + SubscribeReposMessage::Identity(i) => (i.did.clone(), i.seq), 208 + SubscribeReposMessage::Account(a) => (a.did.clone(), a.seq), 209 + SubscribeReposMessage::Sync(s) => (s.did.clone(), s.seq), 210 + _ => continue, 211 + }; 212 + 213 + let firehose = msg.firehose.clone(); 214 + let _span = info_span!("relay", did = %did, firehose = %firehose, seq = %seq).entered(); 215 + 216 + if let Err(e) = Self::process_message(&mut ctx, msg) { 217 + error!(did = %did, err = %e, "relay shard: error processing message"); 218 + } 219 + 220 + let res = std::mem::replace(&mut ctx.batch, ctx.state.db.inner.batch()).commit(); 221 + if let Err(e) = res { 222 + error!(shard = id, err = %e, "relay shard: failed to commit batch"); 223 + continue; 224 + } 225 + 226 + #[cfg(feature = "relay")] 227 + for broadcast in ctx.pending_broadcasts.drain(..) { 228 + let _ = state.db.relay_broadcast_tx.send(broadcast); 229 + } 230 + #[cfg(feature = "indexer")] 231 + for msg in ctx.pending_hook_messages.drain(..) { 232 + let _ = ctx.hook.blocking_send(msg); 233 + } 234 + 235 + // advance cursor for this firehose only if we are the terminal consumer (relay mode) 236 + // in events mode, FirehoseWorker will advance the cursor after processing 237 + #[cfg(feature = "relay")] 238 + { 239 + ctx.state 240 + .firehose_cursors 241 + .peek_with(&firehose, |_, c| c.store(seq, Ordering::SeqCst)); 242 + } 243 + } 244 + } 245 + 246 + fn process_message(ctx: &mut WorkerContext, msg: WorkerMessage) -> Result<()> { 247 + let Some(mut repo_state) = ctx.load_repo_state(&msg)? else { 248 + return Ok(()); 249 + }; 250 + let did = msg.msg.did().expect("already checked for did"); 251 + 252 + if let Some(host) = msg.firehose.host_str() 253 + && msg.is_pds 254 + { 255 + let outcome = ctx.check_host_authority(did, &mut repo_state, host)?; 256 + if let AuthorityOutcome::WrongHost { expected } = outcome { 257 + warn!(got = host, expected = %expected, "message rejected: wrong host"); 258 + return Ok(()); 259 + } 260 + } 261 + 262 + match msg.msg { 263 + SubscribeReposMessage::Commit(commit) => { 264 + trace!("processing commit"); 265 + Self::handle_commit(ctx, &mut repo_state, &msg.firehose, *commit) 266 + } 267 + SubscribeReposMessage::Sync(sync) => { 268 + debug!("processing sync"); 269 + Self::handle_sync(ctx, &mut repo_state, &msg.firehose, *sync) 270 + } 271 + SubscribeReposMessage::Identity(identity) => { 272 + debug!("processing identity"); 273 + Self::handle_identity(ctx, &mut repo_state, &msg.firehose, *identity, msg.is_pds) 274 + } 275 + SubscribeReposMessage::Account(account) => { 276 + debug!("processing account"); 277 + Self::handle_account(ctx, &mut repo_state, &msg.firehose, *account) 278 + } 279 + _ => Ok(()), 280 + } 281 + } 282 + 283 + fn handle_commit( 284 + ctx: &mut WorkerContext, 285 + repo_state: &mut RepoState, 286 + #[allow(unused_variables)] firehose: &Url, 287 + #[allow(unused_mut)] mut commit: Commit<'static>, 288 + ) -> Result<()> { 289 + if !repo_state.active { 290 + return Ok(()); 291 + } 292 + 293 + repo_state.advance_message_time(commit.time.0.timestamp_millis()); 294 + 295 + let Some(validated) = ctx.validate_commit(repo_state, &commit)? else { 296 + return Ok(()); 297 + }; 298 + let ValidatedCommit { 299 + chain_break, 300 + commit_obj, 301 + parsed_blocks, 302 + .. 303 + } = validated; 304 + 305 + if chain_break.is_broken() { 306 + // chain breaks are not grounds for blocking in relay mode 307 + warn!(broken = ?chain_break, "relay: chain break, forwarding anyway"); 308 + } 309 + 310 + let repo_key = keys::repo_key(&commit.repo); 311 + 312 + #[cfg(feature = "indexer")] 313 + { 314 + ctx.pending_hook_messages 315 + .push(crate::ingest::indexer::IndexerMessage::Event(Box::new( 316 + crate::ingest::indexer::IndexerEvent { 317 + seq: commit.seq, 318 + firehose: firehose.clone(), 319 + data: crate::ingest::indexer::IndexerEventData::Commit( 320 + crate::ingest::indexer::IndexerCommitData { 321 + commit, 322 + chain_break: chain_break.is_broken(), 323 + parsed_blocks, 324 + }, 325 + ), 326 + }, 327 + ))); 328 + } 329 + #[cfg(feature = "relay")] 330 + { 331 + ctx.queue_emit(|seq| { 332 + commit.seq = seq; 333 + encode_frame("#commit", &commit) 334 + })?; 335 + } 336 + 337 + repo_state.root = Some(commit_obj.into()); 338 + repo_state.touch(); 339 + ctx.batch.insert( 340 + &ctx.state.db.repos, 341 + repo_key, 342 + db::ser_repo_state(repo_state)?, 343 + ); 344 + 345 + Ok(()) 346 + } 347 + 348 + fn handle_sync( 349 + ctx: &mut WorkerContext, 350 + repo_state: &mut RepoState, 351 + #[allow(unused_variables)] firehose: &Url, 352 + #[allow(unused_mut)] mut sync: Sync<'static>, 353 + ) -> Result<()> { 354 + if !repo_state.active { 355 + return Ok(()); 356 + } 357 + 358 + repo_state.advance_message_time(sync.time.0.timestamp_millis()); 359 + 360 + let Some(validated) = ctx.validate_sync(repo_state, &sync)? else { 361 + return Ok(()); 362 + }; 363 + 364 + let repo_key = keys::repo_key(&sync.did); 365 + 366 + #[cfg(feature = "indexer")] 367 + { 368 + ctx.pending_hook_messages 369 + .push(crate::ingest::indexer::IndexerMessage::Event(Box::new( 370 + crate::ingest::indexer::IndexerEvent { 371 + seq: sync.seq, 372 + firehose: firehose.clone(), 373 + data: crate::ingest::indexer::IndexerEventData::Sync( 374 + sync.did.into_static(), 375 + ), 376 + }, 377 + ))); 378 + } 379 + #[cfg(feature = "relay")] 380 + { 381 + ctx.queue_emit(|seq| { 382 + sync.seq = seq; 383 + encode_frame("#sync", &sync) 384 + })?; 385 + } 386 + 387 + repo_state.root = Some(validated.commit_obj.into()); 388 + repo_state.touch(); 389 + ctx.batch.insert( 390 + &ctx.state.db.repos, 391 + repo_key, 392 + db::ser_repo_state(repo_state)?, 393 + ); 394 + 395 + Ok(()) 396 + } 397 + 398 + fn handle_identity( 399 + ctx: &mut WorkerContext, 400 + repo_state: &mut RepoState, 401 + #[allow(unused_variables)] firehose: &Url, 402 + mut identity: Identity<'static>, 403 + is_pds: bool, 404 + ) -> Result<()> { 405 + let event_ms = identity.time.0.timestamp_millis(); 406 + if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 407 + debug!("skipping stale/duplicate identity event"); 408 + return Ok(()); 409 + } 410 + repo_state.advance_message_time(event_ms); 411 + 412 + #[cfg(feature = "indexer")] 413 + let (was_handle, was_signing_key) = ( 414 + repo_state.handle.clone().map(IntoStatic::into_static), 415 + repo_state.signing_key.clone().map(IntoStatic::into_static), 416 + ); 417 + 418 + // refresh did doc if a pds sent this event 419 + // or if there is no handle specified 420 + if is_pds || identity.handle.is_none() { 421 + ctx.state.resolver.invalidate_sync(&identity.did); 422 + let doc = Handle::current().block_on(ctx.state.resolver.resolve_doc(&identity.did)); 423 + match doc { 424 + Ok(doc) => { 425 + repo_state.update_from_doc(doc); 426 + } 427 + Err(err) => { 428 + warn!(err = %err, "couldnt fetch identity"); 429 + } 430 + } 431 + } 432 + 433 + // don't pass handle through if it doesnt match ours for pds events 434 + if is_pds && repo_state.handle != identity.handle { 435 + identity.handle = None; 436 + } 437 + 438 + let repo_key = keys::repo_key(&identity.did); 439 + 440 + #[cfg(feature = "indexer")] 441 + { 442 + let changed = 443 + repo_state.handle != was_handle || repo_state.signing_key != was_signing_key; 444 + ctx.pending_hook_messages 445 + .push(crate::ingest::indexer::IndexerMessage::Event(Box::new( 446 + crate::ingest::indexer::IndexerEvent { 447 + seq: identity.seq, 448 + firehose: firehose.clone(), 449 + data: crate::ingest::indexer::IndexerEventData::Identity( 450 + crate::ingest::indexer::IndexerIdentityData { identity, changed }, 451 + ), 452 + }, 453 + ))); 454 + } 455 + #[cfg(feature = "relay")] 456 + { 457 + ctx.queue_emit(|seq| { 458 + identity.seq = seq; 459 + encode_frame("#identity", &identity) 460 + })?; 461 + } 462 + 463 + ctx.batch.insert( 464 + &ctx.state.db.repos, 465 + repo_key, 466 + db::ser_repo_state(repo_state)?, 467 + ); 468 + 469 + Ok(()) 470 + } 471 + 472 + fn handle_account( 473 + ctx: &mut WorkerContext, 474 + repo_state: &mut RepoState, 475 + #[allow(unused_variables)] firehose: &Url, 476 + #[allow(unused_mut)] mut account: Account<'static>, 477 + ) -> Result<()> { 478 + let event_ms = account.time.0.timestamp_millis(); 479 + if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 480 + debug!("skipping stale/duplicate account event"); 481 + return Ok(()); 482 + } 483 + 484 + repo_state.advance_message_time(event_ms); 485 + 486 + #[cfg(feature = "indexer")] 487 + let (was_active, was_status) = (repo_state.active, repo_state.status.clone()); 488 + 489 + repo_state.active = account.active; 490 + if !account.active { 491 + use crate::ingest::stream::AccountStatus; 492 + match &account.status { 493 + Some(AccountStatus::Deleted) => { 494 + // keep a Deleted tombstone so any stale commits that arrive later 495 + // (e.g. from the upstream backfill window) are not forwarded. 496 + // per spec: "if any further #commit messages are emitted for the repo, 497 + // all downstream services should ignore the event and not pass it through." 498 + repo_state.status = RepoStatus::Deleted; 499 + } 500 + status => { 501 + repo_state.status = ctx.inactive_account_repo_status(&account.did, status); 502 + } 503 + } 504 + } else { 505 + // active=true: desynchronized/throttled may still carry active=true per spec. 506 + // anything else (including unknown statuses) is treated as synced. 507 + use crate::ingest::stream::AccountStatus; 508 + repo_state.status = match &account.status { 509 + Some(AccountStatus::Desynchronized) => RepoStatus::Desynchronized, 510 + Some(AccountStatus::Throttled) => RepoStatus::Throttled, 511 + _ => RepoStatus::Synced, 512 + }; 513 + } 514 + 515 + let repo_key = keys::repo_key(&account.did); 516 + 517 + #[cfg(feature = "indexer")] 518 + { 519 + let changed = repo_state.active != was_active || repo_state.status != was_status; 520 + ctx.pending_hook_messages 521 + .push(crate::ingest::indexer::IndexerMessage::Event(Box::new( 522 + crate::ingest::indexer::IndexerEvent { 523 + seq: account.seq, 524 + firehose: firehose.clone(), 525 + data: crate::ingest::indexer::IndexerEventData::Account( 526 + crate::ingest::indexer::IndexerAccountData { 527 + account, 528 + was_active, 529 + changed, 530 + }, 531 + ), 532 + }, 533 + ))); 534 + } 535 + #[cfg(feature = "relay")] 536 + { 537 + ctx.queue_emit(|seq| { 538 + account.seq = seq; 539 + encode_frame("#account", &account) 540 + })?; 541 + } 542 + 543 + repo_state.touch(); 544 + ctx.batch.insert( 545 + &ctx.state.db.repos, 546 + repo_key, 547 + db::ser_repo_state(repo_state)?, 548 + ); 549 + 550 + Ok(()) 551 + } 552 + } 553 + 554 + impl WorkerContext<'_> { 555 + fn check_host_authority( 556 + &mut self, 557 + did: &Did, 558 + repo_state: &mut RepoState, 559 + source_host: &str, 560 + ) -> Result<AuthorityOutcome> { 561 + let expected = pds_host(repo_state.pds.as_deref()); 562 + if expected.as_deref() == Some(source_host) { 563 + return Ok(AuthorityOutcome::Authorized); 564 + } 565 + 566 + // try again once 567 + self.refresh_doc(did, repo_state)?; 568 + let Some(expected) = pds_host(repo_state.pds.as_deref()) else { 569 + miette::bail!("can't get pds host???"); 570 + }; 571 + 572 + if expected.as_str() == source_host { 573 + Ok(AuthorityOutcome::WasStale) 574 + } else { 575 + Ok(AuthorityOutcome::WrongHost { expected }) 576 + } 577 + } 578 + 579 + fn refresh_doc(&mut self, did: &Did, repo_state: &mut RepoState) -> Result<()> { 580 + let db = &self.state.db; 581 + self.state.resolver.invalidate_sync(did); 582 + let doc = Handle::current() 583 + .block_on(self.state.resolver.resolve_doc(did)) 584 + .map_err(|e| miette::miette!("{e}"))?; 585 + repo_state.update_from_doc(doc); 586 + repo_state.touch(); 587 + 588 + self.batch.insert( 589 + &db.repos, 590 + keys::repo_key(did), 591 + db::ser_repo_state(repo_state)?, 592 + ); 593 + Ok(()) 594 + } 595 + 596 + fn validate_commit<'c>( 597 + &mut self, 598 + repo_state: &mut RepoState, 599 + commit: &'c Commit<'c>, 600 + ) -> Result<Option<ValidatedCommit<'c>>> { 601 + let did = &commit.repo; 602 + let key = self.fetch_key(did)?; 603 + match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 604 + Ok(v) => return Ok(Some(v)), 605 + Err(CommitValidationError::StaleRev) => { 606 + trace!("skipping replayed commit"); 607 + return Ok(None); 608 + } 609 + Err(CommitValidationError::SigFailure) => {} 610 + Err(e) => { 611 + warn!(err = %e, "commit rejected"); 612 + return Ok(None); 613 + } 614 + } 615 + 616 + self.refresh_doc(did, repo_state)?; 617 + let key = self.fetch_key(did)?; 618 + match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 619 + Ok(v) => Ok(Some(v)), 620 + Err(e) => { 621 + warn!(err = %e, "commit rejected after key refresh"); 622 + Ok(None) 623 + } 624 + } 625 + } 626 + 627 + fn validate_sync( 628 + &mut self, 629 + repo_state: &mut RepoState, 630 + sync: &Sync<'_>, 631 + ) -> Result<Option<ValidatedSync>> { 632 + let did = &sync.did; 633 + let key = self.fetch_key(did)?; 634 + match self.vctx.validate_sync(sync, key.as_ref()) { 635 + Ok(v) => return Ok(Some(v)), 636 + Err(SyncValidationError::SigFailure) => {} 637 + Err(e) => { 638 + warn!(err = %e, "sync rejected"); 639 + return Ok(None); 640 + } 641 + } 642 + 643 + self.refresh_doc(did, repo_state)?; 644 + let key = self.fetch_key(did)?; 645 + match self.vctx.validate_sync(sync, key.as_ref()) { 646 + Ok(v) => Ok(Some(v)), 647 + Err(e) => { 648 + warn!(err = %e, "sync rejected after key refresh"); 649 + Ok(None) 650 + } 651 + } 652 + } 653 + 654 + fn fetch_key(&self, did: &Did) -> Result<Option<PublicKey<'static>>> { 655 + if self.verify_signatures { 656 + let key = Handle::current() 657 + .block_on(self.state.resolver.resolve_signing_key(did)) 658 + .map_err(|e| miette::miette!("{e}"))?; 659 + Ok(Some(key)) 660 + } else { 661 + Ok(None) 662 + } 663 + } 664 + 665 + /// maps an inactive account status to the corresponding `RepoStatus`. 666 + /// panics on `AccountStatus::Deleted`, caller must handle that 667 + fn inactive_account_repo_status( 668 + &self, 669 + did: &Did, 670 + status: &Option<AccountStatus<'_>>, 671 + ) -> RepoStatus { 672 + match status { 673 + Some(AccountStatus::Takendown) => RepoStatus::Takendown, 674 + Some(AccountStatus::Suspended) => RepoStatus::Suspended, 675 + Some(AccountStatus::Deactivated) => RepoStatus::Deactivated, 676 + Some(AccountStatus::Throttled) => RepoStatus::Throttled, 677 + Some(AccountStatus::Desynchronized) => RepoStatus::Desynchronized, 678 + Some(AccountStatus::Other(s)) => { 679 + warn!(did = %did, status = %s, "unknown account status"); 680 + RepoStatus::Error(s.to_smolstr()) 681 + } 682 + Some(AccountStatus::Deleted) => { 683 + unreachable!("deleted is handled before status mapping") 684 + } 685 + None => { 686 + warn!(did = %did, "account inactive but no status provided"); 687 + RepoStatus::Error("unknown".into()) 688 + } 689 + } 690 + } 691 + 692 + async fn check_repo_status( 693 + &self, 694 + did: &Did<'_>, 695 + pds: &Url, 696 + ) -> Result<Option<RepoState<'static>>> { 697 + let req = GetRepoStatus::new().did(did.clone().into_static()).build(); 698 + let resp = self 699 + .http 700 + .xrpc(crate::util::url_to_fluent_uri(pds)) 701 + .send(&req) 702 + .await; 703 + 704 + let output = match resp { 705 + Err(_) => return Ok(None), 706 + Ok(r) => match r.into_output() { 707 + Ok(o) => o, 708 + Err(XrpcError::Xrpc(GetRepoStatusError::RepoNotFound(_))) => { 709 + // pds explicitly says it doesn't have this repo 710 + // we shouldnt really get here unless the pds is buggy? 711 + // or somehow the repo gets gon right after we receive the event 712 + let mut repo_state = RepoState::backfilling(); 713 + repo_state.active = false; 714 + repo_state.status = RepoStatus::Error("not_found".into()); 715 + return Ok(Some(repo_state)); 716 + } 717 + Err(_) => return Ok(None), 718 + }, 719 + }; 720 + 721 + let mut repo_state = RepoState::backfilling(); 722 + repo_state.active = output.active; 723 + repo_state.status = match output.status { 724 + Some(GetRepoStatusOutputStatus::Takendown) => RepoStatus::Takendown, 725 + Some(GetRepoStatusOutputStatus::Suspended) => RepoStatus::Suspended, 726 + Some(GetRepoStatusOutputStatus::Deactivated) => RepoStatus::Deactivated, 727 + Some(GetRepoStatusOutputStatus::Deleted) => RepoStatus::Deleted, 728 + Some(GetRepoStatusOutputStatus::Desynchronized) => RepoStatus::Desynchronized, 729 + Some(GetRepoStatusOutputStatus::Throttled) => RepoStatus::Throttled, 730 + Some(GetRepoStatusOutputStatus::Other(s)) => RepoStatus::Error(s.into()), 731 + None => output 732 + .active 733 + .then_some(RepoStatus::Synced) 734 + .unwrap_or_else(|| RepoStatus::Error("unknown".into())), 735 + }; 736 + 737 + Ok(Some(repo_state)) 738 + } 739 + 740 + fn load_repo_state(&mut self, msg: &WorkerMessage) -> Result<Option<RepoState<'static>>> { 741 + let db = &self.state.db; 742 + let did = msg.msg.did().expect("we checked if valid"); 743 + let repo_key = keys::repo_key(did); 744 + let metadata_key = keys::repo_metadata_key(did); 745 + 746 + let metadata = db 747 + .repo_metadata 748 + .get(&metadata_key) 749 + .into_diagnostic()? 750 + .map(|bytes| db::deser_repo_metadata(&bytes)) 751 + .transpose()?; 752 + 753 + if metadata.map_or(false, |m| !m.tracked) { 754 + trace!(did = %did, "ignoring message, repo is explicitly untracked"); 755 + return Ok(None); 756 + } 757 + 758 + let repo_state_opt = db 759 + .repos 760 + .get(&repo_key) 761 + .into_diagnostic()? 762 + .map(|bytes| db::deser_repo_state(bytes.as_ref()).map(|s| s.into_static())) 763 + .transpose()?; 764 + 765 + if let Some(repo_state) = repo_state_opt { 766 + return Ok(Some(repo_state)); 767 + } 768 + 769 + #[cfg(feature = "indexer")] 770 + { 771 + let filter = self.state.filter.load(); 772 + if filter.mode == crate::filter::FilterMode::Filter && !filter.signals.is_empty() { 773 + let commit = match &msg.msg { 774 + SubscribeReposMessage::Commit(c) => c, 775 + _ => return Ok(None), 776 + }; 777 + let touches_signal = commit.ops.iter().any(|op| { 778 + op.path 779 + .split_once('/') 780 + .map(|(col, _)| { 781 + let m = filter.matches_signal(col); 782 + debug!( 783 + did = %did, path = %op.path, col = %col, 784 + signals = ?filter.signals, matched = m, 785 + "signal check" 786 + ); 787 + m 788 + }) 789 + .unwrap_or(false) 790 + }); 791 + if !touches_signal { 792 + trace!(did = %did, "dropping commit, no signal-matching ops"); 793 + return Ok(None); 794 + } 795 + } 796 + } 797 + 798 + debug!(did = %did, "discovered new account from firehose, queueing backfill"); 799 + 800 + // resolve doc to initialize repo state 801 + self.state.resolver.invalidate_sync(did); 802 + let doc = tokio::runtime::Handle::current() 803 + .block_on(self.state.resolver.resolve_doc(did)) 804 + .into_diagnostic()?; 805 + 806 + // if it's a PDS, verify it's the authoritative one 807 + if msg.is_pds { 808 + let pds_host = doc.pds.host_str().map(|h| h.to_string()); 809 + if pds_host.as_deref() != msg.firehose.host_str() { 810 + warn!(did = %did, got = ?pds_host, expected = ?msg.firehose.host_str(), "message rejected: wrong host for new account"); 811 + return Ok(None); 812 + } 813 + } 814 + 815 + // try to get upstream status 816 + let mut repo_state = tokio::runtime::Handle::current() 817 + .block_on(self.check_repo_status(did, &doc.pds)) 818 + .ok() 819 + .flatten() 820 + .unwrap_or_else(RepoState::backfilling); 821 + 822 + repo_state.update_from_doc(doc); 823 + 824 + self.batch.insert( 825 + &db.repos, 826 + &repo_key, 827 + crate::db::ser_repo_state(&repo_state)?, 828 + ); 829 + 830 + #[cfg(feature = "indexer")] 831 + { 832 + self.pending_hook_messages 833 + .push(crate::ingest::indexer::IndexerMessage::NewRepo( 834 + did.clone().into_static(), 835 + )); 836 + } 837 + 838 + db.update_count("repos", 1); 839 + 840 + Ok(Some(repo_state)) 841 + } 842 + 843 + #[cfg(feature = "relay")] 844 + fn queue_emit(&mut self, make_frame: impl FnOnce(i64) -> Result<bytes::Bytes>) -> Result<()> { 845 + let db = &self.state.db; 846 + let seq = db.next_relay_seq.fetch_add(1, Ordering::SeqCst); 847 + let frame = make_frame(seq as i64)?; 848 + self.batch 849 + .insert(&db.relay_events, keys::relay_event_key(seq), frame.as_ref()); 850 + self.pending_broadcasts.push(RelayBroadcast::Persisted(seq)); 851 + Ok(()) 852 + } 853 + } 854 + 855 + /// outcome of a host authority check. 856 + enum AuthorityOutcome { 857 + /// stored pds matched the source host immediately. 858 + Authorized, 859 + /// pds migrated: doc now points to this host, but our stored state was stale. 860 + WasStale, 861 + /// host did not match even after doc resolution. 862 + WrongHost { expected: SmolStr }, 863 + } 864 + 865 + fn pds_host(pds: Option<&str>) -> Option<SmolStr> { 866 + // todo: add faster host parsing since we only need that 867 + pds.and_then(|pds| Url::parse(pds).ok()).map(|u| { 868 + u.host_str() 869 + .map(SmolStr::new) 870 + .expect("that there is host in pds url") 871 + }) 872 + }
-529
src/ingest/relay_worker.rs
··· 1 - use std::collections::hash_map::DefaultHasher; 2 - use std::hash::{Hash, Hasher}; 3 - use std::sync::Arc; 4 - use std::sync::atomic::Ordering; 5 - 6 - use fjall::OwnedWriteBatch; 7 - 8 - use jacquard_common::types::crypto::PublicKey; 9 - use jacquard_common::types::did::Did; 10 - use jacquard_common::{CowStr, IntoStatic}; 11 - use miette::{IntoDiagnostic, Result}; 12 - use tokio::runtime::Handle; 13 - use tokio::sync::mpsc; 14 - use tracing::{debug, error, info, info_span, trace, warn}; 15 - use url::Url; 16 - 17 - use crate::db::{self, keys}; 18 - use crate::ingest::stream::{ 19 - Account, Commit, Identity, InfoName, SubscribeReposMessage, Sync, encode_frame, 20 - }; 21 - use crate::ingest::validation::{ 22 - CommitValidationError, SyncValidationError, ValidatedCommit, ValidatedSync, ValidationContext, 23 - ValidationOptions, 24 - }; 25 - use crate::ingest::{BufferRx, IngestMessage}; 26 - use crate::state::AppState; 27 - use crate::types::{RelayBroadcast, RepoState, RepoStatus}; 28 - 29 - struct WorkerContext<'a> { 30 - verify_signatures: bool, 31 - state: &'a AppState, 32 - vctx: ValidationContext<'a>, 33 - batch: OwnedWriteBatch, 34 - pending_broadcasts: Vec<RelayBroadcast>, 35 - } 36 - 37 - struct WorkerMessage { 38 - is_pds: bool, 39 - firehose: Url, 40 - msg: SubscribeReposMessage<'static>, 41 - } 42 - 43 - pub struct RelayWorker { 44 - state: Arc<AppState>, 45 - rx: BufferRx, 46 - verify_signatures: bool, 47 - num_shards: usize, 48 - validation_opts: Arc<ValidationOptions>, 49 - } 50 - 51 - impl RelayWorker { 52 - pub fn new( 53 - state: Arc<AppState>, 54 - rx: BufferRx, 55 - verify_signatures: bool, 56 - num_shards: usize, 57 - validation_opts: ValidationOptions, 58 - ) -> Self { 59 - Self { 60 - state, 61 - rx, 62 - verify_signatures, 63 - num_shards, 64 - validation_opts: Arc::new(validation_opts), 65 - } 66 - } 67 - 68 - pub fn run(mut self, handle: Handle) -> Result<()> { 69 - let mut shards = Vec::with_capacity(self.num_shards); 70 - 71 - for i in 0..self.num_shards { 72 - let (tx, rx) = mpsc::unbounded_channel(); 73 - shards.push(tx); 74 - 75 - let state = self.state.clone(); 76 - let verify = self.verify_signatures; 77 - let h = handle.clone(); 78 - let opts = self.validation_opts.clone(); 79 - 80 - std::thread::Builder::new() 81 - .name(format!("relay-shard-{i}")) 82 - .spawn(move || { 83 - Self::shard(i, rx, state, verify, h, opts); 84 - }) 85 - .into_diagnostic()?; 86 - } 87 - 88 - info!(num = self.num_shards, "relay worker: started shards"); 89 - 90 - let _g = handle.enter(); 91 - 92 - while let Some(msg) = self.rx.blocking_recv() { 93 - let IngestMessage::Firehose { 94 - relay: firehose, 95 - is_pds, 96 - msg, 97 - } = msg 98 - else { 99 - continue; 100 - }; 101 - 102 - // #info only pertains to us, the direct consumer 103 - if let SubscribeReposMessage::Info(inf) = msg { 104 - match inf.name { 105 - InfoName::OutdatedCursor => { 106 - // todo: handle 107 - } 108 - InfoName::Other(name) => { 109 - let message = inf 110 - .message 111 - .unwrap_or_else(|| CowStr::Borrowed("<no message>")); 112 - info!(name = %name, "relay sent info: {message}"); 113 - } 114 - } 115 - continue; 116 - } 117 - 118 - let shard_idx = { 119 - let did = match &msg { 120 - SubscribeReposMessage::Commit(c) => &c.repo, 121 - SubscribeReposMessage::Identity(i) => &i.did, 122 - SubscribeReposMessage::Account(a) => &a.did, 123 - SubscribeReposMessage::Sync(s) => &s.did, 124 - _ => continue, 125 - }; 126 - let mut hasher = DefaultHasher::new(); 127 - did.hash(&mut hasher); 128 - let idx = (hasher.finish() as usize) % self.num_shards; 129 - idx 130 - }; 131 - 132 - if let Err(e) = shards[shard_idx].send(WorkerMessage { 133 - firehose, 134 - is_pds, 135 - msg, 136 - }) { 137 - error!(shard = shard_idx, err = %e, "relay worker: failed to send to shard"); 138 - break; 139 - } 140 - } 141 - 142 - Err(miette::miette!("relay worker dispatcher shutting down")) 143 - } 144 - 145 - fn shard( 146 - id: usize, 147 - mut rx: mpsc::UnboundedReceiver<WorkerMessage>, 148 - state: Arc<AppState>, 149 - verify_signatures: bool, 150 - handle: Handle, 151 - validation_opts: Arc<ValidationOptions>, 152 - ) { 153 - let _guard = handle.enter(); 154 - let span = info_span!("worker_shard", shard = id, did = tracing::field::Empty); 155 - let _entered = span.clone().entered(); 156 - debug!("relay shard started"); 157 - 158 - let mut ctx = WorkerContext { 159 - verify_signatures, 160 - state: &state, 161 - vctx: ValidationContext { 162 - opts: &validation_opts, 163 - }, 164 - batch: state.db.inner.batch(), 165 - pending_broadcasts: Vec::with_capacity(1), 166 - }; 167 - 168 - while let Some(msg) = rx.blocking_recv() { 169 - let (did, seq) = match &msg.msg { 170 - SubscribeReposMessage::Commit(c) => (&c.repo, c.seq), 171 - SubscribeReposMessage::Identity(i) => (&i.did, i.seq), 172 - SubscribeReposMessage::Account(a) => (&a.did, a.seq), 173 - SubscribeReposMessage::Sync(s) => (&s.did, s.seq), 174 - _ => continue, 175 - }; 176 - 177 - span.record("did", &**did); 178 - 179 - let firehose = msg.firehose.clone(); 180 - if let Err(e) = Self::process_message(&mut ctx, msg) { 181 - error!(err = %e, "relay shard: error processing message"); 182 - } 183 - 184 - let res = std::mem::replace(&mut ctx.batch, ctx.state.db.inner.batch()).commit(); 185 - if let Err(e) = res { 186 - error!(shard = id, err = %e, "relay shard: failed to commit batch"); 187 - continue; 188 - } 189 - 190 - for broadcast in ctx.pending_broadcasts.drain(..) { 191 - let _ = state.db.relay_broadcast_tx.send(broadcast); 192 - } 193 - 194 - // advance cursor for this firehose 195 - ctx.state 196 - .firehose_cursors 197 - .peek_with(&firehose, |_, c| c.store(seq, Ordering::SeqCst)); 198 - } 199 - } 200 - 201 - fn process_message(ctx: &mut WorkerContext, msg: WorkerMessage) -> Result<()> { 202 - let did = msg 203 - .msg 204 - .did() 205 - .expect("that we checked if we are in valid commit"); 206 - let mut repo_state = ctx.load_repo_state(did)?; 207 - 208 - if let Some(host) = msg.firehose.host_str() 209 - && msg.is_pds 210 - { 211 - let outcome = ctx.check_host_authority(did, &mut repo_state, host)?; 212 - if let super::AuthorityOutcome::WrongHost { expected } = outcome { 213 - warn!(got = host, expected = %expected, "message rejected: wrong host"); 214 - return Ok(()); 215 - } 216 - } 217 - 218 - match msg.msg { 219 - SubscribeReposMessage::Commit(commit) => { 220 - trace!("processing commit"); 221 - Self::handle_commit(ctx, &mut repo_state, *commit) 222 - } 223 - SubscribeReposMessage::Sync(sync) => { 224 - debug!("processing sync"); 225 - Self::handle_sync(ctx, &mut repo_state, *sync) 226 - } 227 - SubscribeReposMessage::Identity(identity) => { 228 - debug!("processing identity"); 229 - Self::handle_identity(ctx, &mut repo_state, *identity, msg.is_pds) 230 - } 231 - SubscribeReposMessage::Account(account) => { 232 - debug!("processing account"); 233 - Self::handle_account(ctx, &mut repo_state, *account) 234 - } 235 - _ => Ok(()), 236 - } 237 - } 238 - 239 - fn handle_commit( 240 - ctx: &mut WorkerContext, 241 - repo_state: &mut RepoState, 242 - mut commit: Commit<'static>, 243 - ) -> Result<()> { 244 - if repo_state.status != RepoStatus::Synced { 245 - return Ok(()); 246 - } 247 - 248 - let Some(validated) = ctx.validate_commit(repo_state, &commit)? else { 249 - return Ok(()); 250 - }; 251 - let ValidatedCommit { 252 - chain_break, 253 - commit_obj, 254 - .. 255 - } = validated; 256 - 257 - if chain_break.is_broken() { 258 - warn!(broken = ?chain_break, "out of sync"); 259 - // todo: we need Desynchronized on RepoStatus (and Throttled) 260 - repo_state.status = RepoStatus::Error("desynchronized".into()); 261 - } 262 - 263 - let repo_key = keys::repo_key(&commit.repo); 264 - ctx.queue_emit(|seq| { 265 - commit.seq = seq; 266 - encode_frame("#commit", &commit) 267 - })?; 268 - 269 - repo_state.root = Some(commit_obj.into()); 270 - repo_state.touch(); 271 - ctx.batch.insert( 272 - &ctx.state.db.repos, 273 - repo_key, 274 - db::ser_repo_state(repo_state)?, 275 - ); 276 - 277 - Ok(()) 278 - } 279 - 280 - fn handle_sync( 281 - ctx: &mut WorkerContext, 282 - repo_state: &mut RepoState, 283 - mut sync: Sync<'static>, 284 - ) -> Result<()> { 285 - if repo_state.status != RepoStatus::Synced { 286 - return Ok(()); 287 - } 288 - 289 - let Some(validated) = ctx.validate_sync(repo_state, &sync)? else { 290 - return Ok(()); 291 - }; 292 - 293 - let repo_key = keys::repo_key(&sync.did); 294 - ctx.queue_emit(|seq| { 295 - sync.seq = seq; 296 - encode_frame("#sync", &sync) 297 - })?; 298 - 299 - repo_state.root = Some(validated.commit_obj.into()); 300 - repo_state.touch(); 301 - ctx.batch.insert( 302 - &ctx.state.db.repos, 303 - repo_key, 304 - db::ser_repo_state(repo_state)?, 305 - ); 306 - 307 - Ok(()) 308 - } 309 - 310 - fn handle_identity( 311 - ctx: &mut WorkerContext, 312 - repo_state: &mut RepoState, 313 - mut identity: Identity<'static>, 314 - is_pds: bool, 315 - ) -> Result<()> { 316 - let event_ms = identity.time.0.timestamp_millis(); 317 - if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 318 - debug!("skipping stale/duplicate identity event"); 319 - return Ok(()); 320 - } 321 - repo_state.advance_message_time(event_ms); 322 - 323 - // refresh did doc if a pds sent this event 324 - // or if there is no handle specified 325 - if is_pds || identity.handle.is_none() { 326 - ctx.state.resolver.invalidate_sync(&identity.did); 327 - let doc = Handle::current().block_on(ctx.state.resolver.resolve_doc(&identity.did)); 328 - match doc { 329 - Ok(doc) => { 330 - repo_state.update_from_doc(doc); 331 - } 332 - Err(err) => { 333 - warn!(err = %err, "couldnt fetch identity"); 334 - } 335 - } 336 - } 337 - 338 - // don't pass handle through if it doesnt match ours for pds events 339 - if is_pds && repo_state.handle != identity.handle { 340 - identity.handle = None; 341 - } 342 - 343 - let repo_key = keys::repo_key(&identity.did); 344 - ctx.queue_emit(|seq| { 345 - identity.seq = seq; 346 - encode_frame("#identity", &identity) 347 - })?; 348 - 349 - ctx.batch.insert( 350 - &ctx.state.db.repos, 351 - repo_key, 352 - db::ser_repo_state(repo_state)?, 353 - ); 354 - 355 - Ok(()) 356 - } 357 - 358 - fn handle_account( 359 - ctx: &mut WorkerContext, 360 - repo_state: &mut RepoState, 361 - mut account: Account<'static>, 362 - ) -> Result<()> { 363 - let event_ms = account.time.0.timestamp_millis(); 364 - if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 365 - debug!("skipping stale/duplicate account event"); 366 - return Ok(()); 367 - } 368 - repo_state.advance_message_time(event_ms); 369 - 370 - if !account.active { 371 - use crate::ingest::stream::AccountStatus; 372 - match &account.status { 373 - Some(AccountStatus::Deleted) => { 374 - // todo: dont remove repo state? 375 - // forward the event and remove repo state 376 - let repo_key = keys::repo_key(&account.did); 377 - ctx.queue_emit(|seq| { 378 - account.seq = seq; 379 - encode_frame("#account", &account) 380 - })?; 381 - ctx.batch.remove(&ctx.state.db.repos, repo_key); 382 - return Ok(()); 383 - } 384 - status => { 385 - repo_state.status = super::inactive_account_repo_status(&account.did, status); 386 - } 387 - } 388 - } else { 389 - repo_state.status = RepoStatus::Synced; 390 - } 391 - 392 - let repo_key = keys::repo_key(&account.did); 393 - ctx.queue_emit(|seq| { 394 - account.seq = seq; 395 - encode_frame("#account", &account) 396 - })?; 397 - 398 - repo_state.touch(); 399 - ctx.batch.insert( 400 - &ctx.state.db.repos, 401 - repo_key, 402 - db::ser_repo_state(repo_state)?, 403 - ); 404 - 405 - Ok(()) 406 - } 407 - } 408 - 409 - impl WorkerContext<'_> { 410 - fn check_host_authority( 411 - &mut self, 412 - did: &Did, 413 - repo_state: &mut RepoState, 414 - source_host: &str, 415 - ) -> Result<super::AuthorityOutcome> { 416 - let outcome = 417 - super::check_host_authority(&self.state.resolver, did, repo_state, source_host)?; 418 - if !matches!(outcome, super::AuthorityOutcome::Authorized) { 419 - self.batch.insert( 420 - &self.state.db.repos, 421 - keys::repo_key(did), 422 - db::ser_repo_state(repo_state)?, 423 - ); 424 - } 425 - Ok(outcome) 426 - } 427 - 428 - fn refresh_doc(&mut self, did: &Did, repo_state: &mut RepoState) -> Result<()> { 429 - super::refresh_doc(&self.state.resolver, did, repo_state)?; 430 - self.batch.insert( 431 - &self.state.db.repos, 432 - keys::repo_key(did), 433 - db::ser_repo_state(repo_state)?, 434 - ); 435 - Ok(()) 436 - } 437 - 438 - fn validate_commit<'c>( 439 - &mut self, 440 - repo_state: &mut RepoState, 441 - commit: &'c Commit<'c>, 442 - ) -> Result<Option<ValidatedCommit<'c>>> { 443 - let did = &commit.repo; 444 - let key = self.fetch_key(did)?; 445 - match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 446 - Ok(v) => return Ok(Some(v)), 447 - Err(CommitValidationError::StaleRev) => { 448 - trace!("skipping replayed commit"); 449 - return Ok(None); 450 - } 451 - Err(CommitValidationError::SigFailure) => {} 452 - Err(e) => { 453 - warn!(err = %e, "commit rejected"); 454 - return Ok(None); 455 - } 456 - } 457 - 458 - self.refresh_doc(did, repo_state)?; 459 - let key = self.fetch_key(did)?; 460 - match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 461 - Ok(v) => Ok(Some(v)), 462 - Err(e) => { 463 - warn!(err = %e, "commit rejected after key refresh"); 464 - Ok(None) 465 - } 466 - } 467 - } 468 - 469 - fn validate_sync( 470 - &mut self, 471 - repo_state: &mut RepoState, 472 - sync: &Sync<'_>, 473 - ) -> Result<Option<ValidatedSync>> { 474 - let did = &sync.did; 475 - let key = self.fetch_key(did)?; 476 - match self.vctx.validate_sync(sync, key.as_ref()) { 477 - Ok(v) => return Ok(Some(v)), 478 - Err(SyncValidationError::SigFailure) => {} 479 - Err(e) => { 480 - warn!(err = %e, "sync rejected"); 481 - return Ok(None); 482 - } 483 - } 484 - 485 - self.refresh_doc(did, repo_state)?; 486 - let key = self.fetch_key(did)?; 487 - match self.vctx.validate_sync(sync, key.as_ref()) { 488 - Ok(v) => Ok(Some(v)), 489 - Err(e) => { 490 - warn!(err = %e, "sync rejected after key refresh"); 491 - Ok(None) 492 - } 493 - } 494 - } 495 - 496 - fn fetch_key(&self, did: &Did) -> Result<Option<PublicKey<'static>>> { 497 - super::fetch_key(&self.state.resolver, self.verify_signatures, did) 498 - } 499 - 500 - fn load_repo_state(&self, did: &Did) -> Result<RepoState<'static>> { 501 - let key = keys::repo_key(did); 502 - let Some(bytes) = self.state.db.repos.get(&key).into_diagnostic()? else { 503 - return Ok(RepoState { 504 - status: RepoStatus::Synced, 505 - root: None, 506 - last_updated_at: chrono::Utc::now().timestamp(), 507 - index_id: 0, 508 - tracked: true, 509 - handle: None, 510 - pds: None, 511 - signing_key: None, 512 - last_message_time: None, 513 - }); 514 - }; 515 - Ok(db::deser_repo_state(&bytes)?.into_static()) 516 - } 517 - 518 - fn queue_emit(&mut self, make_frame: impl FnOnce(i64) -> Result<bytes::Bytes>) -> Result<()> { 519 - let seq = self.state.db.next_relay_seq.fetch_add(1, Ordering::SeqCst); 520 - let frame = make_frame(seq as i64)?; 521 - self.batch.insert( 522 - &self.state.db.relay_events, 523 - keys::relay_event_key(seq), 524 - frame.as_ref(), 525 - ); 526 - self.pending_broadcasts.push(RelayBroadcast::Persisted(seq)); 527 - Ok(()) 528 - } 529 - }
+8 -10
src/ingest/stream.rs
··· 12 12 }, 13 13 }; 14 14 use miette::Diagnostic; 15 + use serde::{Deserialize, Serialize}; 15 16 use smol_str::format_smolstr; 16 17 use thiserror::Error; 17 18 use tokio::net::TcpStream; ··· 388 389 pub time: Datetime, 389 390 } 390 391 391 - #[derive(serde::Deserialize, serde::Serialize, Debug, Clone, jacquard_derive::IntoStatic)] 392 + #[derive(Deserialize, Serialize, Debug, Clone, jacquard_derive::IntoStatic)] 392 393 #[serde(rename_all = "camelCase")] 393 394 pub struct Sync<'a> { 394 395 #[serde(with = "jacquard_common::serde_bytes_helper")] ··· 515 516 } 516 517 } 517 518 518 - use serde::Deserialize; 519 - use serde_ipld_dagcbor::de::Deserializer; 520 - 521 519 // some relays send `""` for `since` when there is no previous revision instead of null 522 520 fn deserialize_tid_or_empty<'de, D>(deserializer: D) -> Result<Option<Tid>, D::Error> 523 521 where ··· 530 528 tracing::warn!("received since with empty string instead of null"); 531 529 Ok(None) 532 530 } 533 - Some(s) => s.parse::<Tid>().map(Some).map_err(serde::de::Error::custom), 531 + Some(s) => s.parse().map(Some).map_err(serde::de::Error::custom), 534 532 } 535 533 } 536 534 537 - #[derive(Debug, Deserialize, serde::Serialize)] 535 + #[derive(Debug, Deserialize, Serialize)] 538 536 struct EventHeader { 539 537 op: i64, 540 538 t: Option<String>, ··· 547 545 } 548 546 549 547 pub fn decode_frame<'i>(bytes: &'i [u8]) -> Result<SubscribeReposMessage<'i>, FirehoseError> { 550 - let mut de = Deserializer::from_slice(bytes); 548 + let mut de = serde_ipld_dagcbor::de::Deserializer::from_slice(bytes); 551 549 let header = EventHeader::deserialize(&mut de)?; 552 550 553 551 match header.op { ··· 579 577 } 580 578 581 579 #[cfg(feature = "relay")] 582 - #[derive(serde::Serialize)] 580 + #[derive(Serialize)] 583 581 struct EncodeHeader<'a> { 584 582 op: i64, 585 583 t: &'a str, 586 584 } 587 585 588 586 #[cfg(feature = "relay")] 589 - pub fn encode_frame<T: serde::Serialize>(t: &str, body: &T) -> miette::Result<bytes::Bytes> { 587 + pub fn encode_frame<T: serde::Serialize>(t: &str, msg: &T) -> miette::Result<bytes::Bytes> { 590 588 let mut buf = serde_ipld_dagcbor::to_vec(&EncodeHeader { op: 1, t }) 591 589 .map_err(|e| miette::miette!("encode_frame header: {e}"))?; 592 590 buf.extend_from_slice( 593 - &serde_ipld_dagcbor::to_vec(body).map_err(|e| miette::miette!("encode_frame body: {e}"))?, 591 + &serde_ipld_dagcbor::to_vec(msg).map_err(|e| miette::miette!("encode_frame body: {e}"))?, 594 592 ); 595 593 Ok(bytes::Bytes::from(buf)) 596 594 }
-962
src/ingest/worker.rs
··· 1 - use super::*; 2 - use crate::db::{self, keys}; 3 - use crate::filter::FilterMode; 4 - use crate::ingest::stream::{Account, Commit, Identity, SubscribeReposMessage, Sync}; 5 - use crate::ingest::validation::{ 6 - CommitValidationError, SyncValidationError, ValidatedCommit, ValidatedSync, ValidationContext, 7 - ValidationOptions, 8 - }; 9 - use crate::ops; 10 - use crate::resolver::{NoSigningKeyError, ResolverError}; 11 - use crate::state::AppState; 12 - use crate::types::{AccountEvt, BroadcastEvent, GaugeState, IdentityEvt, RepoState, RepoStatus}; 13 - 14 - use fjall::OwnedWriteBatch; 15 - 16 - use jacquard_common::IntoStatic; 17 - use jacquard_common::cowstr::ToCowStr; 18 - use jacquard_common::types::did::Did; 19 - use jacquard_repo::error::CommitError; 20 - use miette::{Diagnostic, IntoDiagnostic, Result}; 21 - use rand::Rng; 22 - use std::collections::hash_map::DefaultHasher; 23 - use std::hash::{Hash, Hasher}; 24 - use std::sync::Arc; 25 - use std::sync::atomic::Ordering::SeqCst; 26 - use thiserror::Error; 27 - use tokio::runtime::Handle; 28 - use tokio::sync::mpsc; 29 - use tracing::{debug, error, info, trace, warn}; 30 - 31 - #[derive(Debug, Diagnostic, Error)] 32 - enum IngestError { 33 - #[error("{0}")] 34 - Generic(miette::Report), 35 - 36 - #[error(transparent)] 37 - #[diagnostic(transparent)] 38 - Resolver(#[from] ResolverError), 39 - 40 - #[error(transparent)] 41 - #[diagnostic(transparent)] 42 - Commit(#[from] CommitError), 43 - 44 - #[error(transparent)] 45 - #[diagnostic(transparent)] 46 - NoSigningKey(#[from] NoSigningKeyError), 47 - } 48 - 49 - impl From<miette::Report> for IngestError { 50 - fn from(report: miette::Report) -> Self { 51 - IngestError::Generic(report) 52 - } 53 - } 54 - 55 - // gate returned by check_repo_state, tells the shard loop what to do with the message 56 - enum ProcessGate<'s, 'c> { 57 - // did not exist in db, newly queued for backfill, drop 58 - NewRepo, 59 - // explicitly untracked, backfilling, or in error, drop 60 - Drop, 61 - // inactive repo receiving a non-account message, buffer the commit if present, drop otherwise 62 - Buffer(Option<&'c Commit<'c>>), 63 - // ready to process with the latest state 64 - Ready(RepoState<'s>), 65 - } 66 - 67 - // result returned by a message handler after the gate has been resolved 68 - #[derive(Debug)] 69 - enum RepoProcessResult<'s, 'c> { 70 - // message processed successfully, here is the (possibly updated) state 71 - Ok(RepoState<'s>), 72 - // repo was deleted as part of processing 73 - Deleted, 74 - // needs backfill; carries the triggering commit to buffer (None when already in the buffer) 75 - NeedsBackfill(Option<&'c Commit<'c>>), 76 - } 77 - 78 - pub struct FirehoseWorker { 79 - state: Arc<AppState>, 80 - rx: BufferRx, 81 - verify_signatures: bool, 82 - ephemeral: bool, 83 - num_shards: usize, 84 - validation_opts: Arc<ValidationOptions>, 85 - } 86 - 87 - struct WorkerContext<'a> { 88 - verify_signatures: bool, 89 - ephemeral: bool, 90 - state: &'a AppState, 91 - batch: OwnedWriteBatch, 92 - added_blocks: &'a mut i64, 93 - records_delta: &'a mut i64, 94 - broadcast_events: &'a mut Vec<BroadcastEvent>, 95 - vctx: ValidationContext<'a>, 96 - } 97 - 98 - impl FirehoseWorker { 99 - pub fn new( 100 - state: Arc<AppState>, 101 - rx: BufferRx, 102 - verify_signatures: bool, 103 - ephemeral: bool, 104 - num_shards: usize, 105 - validation_opts: ValidationOptions, 106 - ) -> Self { 107 - Self { 108 - state, 109 - rx, 110 - verify_signatures, 111 - ephemeral, 112 - num_shards, 113 - validation_opts: Arc::new(validation_opts), 114 - } 115 - } 116 - 117 - // starts the worker threads and the main dispatch loop 118 - // the dispatch loop reads from the firehose channel and 119 - // distributes messages to shards based on the hash of the DID 120 - pub fn run(mut self, handle: Handle) -> Result<()> { 121 - let mut shards = Vec::with_capacity(self.num_shards); 122 - 123 - for i in 0..self.num_shards { 124 - // unbounded here so we dont block other shards potentially 125 - // if one has a small lag or something 126 - let (tx, rx) = mpsc::unbounded_channel(); 127 - shards.push(tx); 128 - 129 - let state = self.state.clone(); 130 - let verify = self.verify_signatures; 131 - let ephemeral = self.ephemeral; 132 - let handle = handle.clone(); 133 - let validation_opts = self.validation_opts.clone(); 134 - 135 - std::thread::Builder::new() 136 - .name(format!("ingest-shard-{i}")) 137 - .spawn(move || { 138 - Self::shard(i, rx, state, verify, ephemeral, handle, validation_opts); 139 - }) 140 - .into_diagnostic()?; 141 - } 142 - 143 - info!(num = self.num_shards, "started shards"); 144 - 145 - let _g = handle.enter(); 146 - 147 - // dispatch loop 148 - while let Some(msg) = self.rx.blocking_recv() { 149 - let did = match &msg { 150 - IngestMessage::Firehose { msg: m, .. } => match m { 151 - SubscribeReposMessage::Commit(c) => &c.repo, 152 - SubscribeReposMessage::Identity(i) => &i.did, 153 - SubscribeReposMessage::Account(a) => &a.did, 154 - SubscribeReposMessage::Sync(s) => &s.did, 155 - _ => continue, 156 - }, 157 - IngestMessage::BackfillFinished(did) => did, 158 - }; 159 - 160 - // todo: consider using a different hasher? 161 - let mut hasher = DefaultHasher::new(); 162 - did.hash(&mut hasher); 163 - let hash = hasher.finish(); 164 - let shard_idx = (hash as usize) % self.num_shards; 165 - 166 - if let Err(e) = shards[shard_idx].send(msg) { 167 - error!(shard = shard_idx, err = %e, "failed to send message to shard, shard panicked?"); 168 - break; 169 - } 170 - } 171 - 172 - Err(miette::miette!( 173 - "firehose worker dispatcher shutting down, shard died?" 174 - )) 175 - } 176 - 177 - #[inline(always)] 178 - fn shard( 179 - id: usize, 180 - mut rx: mpsc::UnboundedReceiver<IngestMessage>, 181 - state: Arc<AppState>, 182 - verify_signatures: bool, 183 - ephemeral: bool, 184 - handle: Handle, 185 - validation_opts: Arc<ValidationOptions>, 186 - ) { 187 - let _guard = handle.enter(); 188 - debug!(shard = id, "shard started"); 189 - 190 - let mut broadcast_events = Vec::new(); 191 - 192 - while let Some(msg) = rx.blocking_recv() { 193 - let batch = state.db.inner.batch(); 194 - broadcast_events.clear(); 195 - 196 - let mut added_blocks = 0; 197 - let mut records_delta = 0; 198 - 199 - let mut ctx = WorkerContext { 200 - state: &state, 201 - batch, 202 - added_blocks: &mut added_blocks, 203 - records_delta: &mut records_delta, 204 - broadcast_events: &mut broadcast_events, 205 - vctx: ValidationContext { 206 - opts: &validation_opts, 207 - }, 208 - verify_signatures, 209 - ephemeral, 210 - }; 211 - 212 - match msg { 213 - IngestMessage::BackfillFinished(did) => { 214 - debug!(did = %did, "backfill finished, verifying state and draining buffer"); 215 - 216 - let repo_key = keys::repo_key(&did); 217 - if let Ok(Some(state_bytes)) = state.db.repos.get(&repo_key).into_diagnostic() { 218 - match crate::db::deser_repo_state(&state_bytes) { 219 - Ok(repo_state) => { 220 - let repo_state = repo_state.into_static(); 221 - 222 - match Self::drain_resync_buffer(&mut ctx, &did, repo_state) { 223 - Ok(RepoProcessResult::Ok(s)) => { 224 - // TODO: there might be a race condition here where we get a new commit 225 - // while the resync buffer is being drained, we should handle that probably 226 - // but also it should still be fine since we'll sync eventually anyway 227 - let res = ops::update_repo_status( 228 - &mut ctx.batch, 229 - &state.db, 230 - &did, 231 - s, 232 - RepoStatus::Synced, 233 - ); 234 - if let Err(e) = res { 235 - // this can only fail if serde retry fails which would be really weird 236 - error!(did = %did, err = %e, "failed to transition to synced"); 237 - } 238 - } 239 - // we don't have to handle this since drain_resync_buffer doesn't delete 240 - // the commits from the resync buffer so they will get retried later 241 - Ok(RepoProcessResult::NeedsBackfill(_)) => {} 242 - Ok(RepoProcessResult::Deleted) => {} 243 - Err(e) => { 244 - error!(did = %did, err = %e, "failed to drain resync buffer") 245 - } 246 - }; 247 - } 248 - Err(e) => error!(did = %did, err = %e, "failed to deser repo state"), 249 - } 250 - } 251 - } 252 - IngestMessage::Firehose { 253 - relay: firehose, 254 - is_pds, 255 - msg, 256 - } => { 257 - let _span = tracing::info_span!("firehose", relay = %firehose).entered(); 258 - let (did, seq) = match &msg { 259 - SubscribeReposMessage::Commit(c) => (&c.repo, c.seq), 260 - SubscribeReposMessage::Identity(i) => (&i.did, i.seq), 261 - SubscribeReposMessage::Account(a) => (&a.did, a.seq), 262 - SubscribeReposMessage::Sync(s) => (&s.did, s.seq), 263 - _ => continue, 264 - }; 265 - 266 - let gate = match Self::check_repo_state(&mut ctx, did, &msg) { 267 - Ok(g) => g, 268 - Err(e) => { 269 - if let IngestError::Generic(ref r) = e { 270 - db::check_poisoned_report(r); 271 - } 272 - error!(did = %did, err = %e, "error in check_repo_state"); 273 - state 274 - .firehose_cursors 275 - .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 276 - continue; 277 - } 278 - }; 279 - 280 - match gate { 281 - ProcessGate::NewRepo | ProcessGate::Drop => {} 282 - ProcessGate::Buffer(commit) => { 283 - if let Some(commit) = commit { 284 - if let Err(e) = 285 - ops::persist_to_resync_buffer(&state.db, did, commit) 286 - { 287 - error!( 288 - did = %did, err = %e, 289 - "failed to persist commit to resync_buffer" 290 - ); 291 - } 292 - } 293 - } 294 - ProcessGate::Ready(mut repo_state) => { 295 - // first validate the pds host 296 - if let Some(host) = firehose.host_str() 297 - && is_pds 298 - { 299 - let authority = match Self::check_host_authority( 300 - &mut ctx, 301 - did, 302 - &mut repo_state, 303 - host, 304 - ) { 305 - Ok(a) => a, 306 - Err(e) => { 307 - error!(did = %did, err = %e, "failed to check host authority"); 308 - state 309 - .firehose_cursors 310 - .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 311 - continue; 312 - } 313 - }; 314 - match authority { 315 - AuthorityOutcome::Authorized => {} 316 - AuthorityOutcome::WasStale => { 317 - // pds migrated: our data may be stale, backfill from the new host 318 - warn!(did = %did, source_host = host, "pds migration detected, triggering backfill"); 319 - if let Err(e) = 320 - Self::trigger_backfill(&mut ctx, did, repo_state) 321 - { 322 - error!(did = %did, err = %e, "failed to trigger backfill"); 323 - } else if let SubscribeReposMessage::Commit(commit) = &msg { 324 - if let Err(e) = ops::persist_to_resync_buffer( 325 - &state.db, did, commit, 326 - ) { 327 - error!( 328 - did = %did, err = %e, 329 - "failed to persist commit to resync_buffer" 330 - ); 331 - } 332 - } 333 - state 334 - .firehose_cursors 335 - .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 336 - continue; 337 - } 338 - // todo: ideally ban pds 339 - AuthorityOutcome::WrongHost { expected } => { 340 - warn!(did = %did, got = host, expected = %expected, "commit rejected: wrong host"); 341 - state 342 - .firehose_cursors 343 - .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 344 - continue; 345 - } 346 - } 347 - } 348 - 349 - let pre_status = repo_state.status.clone(); 350 - 351 - // if it was in deactivated/takendown/suspended state, we can mark it 352 - // as synced because we are receiving an active=true account event now. 353 - // we do this before dispatching so handle_account sees pre_status correctly 354 - if matches!( 355 - pre_status, 356 - RepoStatus::Deactivated 357 - | RepoStatus::Suspended 358 - | RepoStatus::Takendown 359 - ) { 360 - if let SubscribeReposMessage::Account(acc) = &msg { 361 - if acc.active { 362 - match ops::update_repo_status( 363 - &mut ctx.batch, 364 - &ctx.state.db, 365 - did, 366 - repo_state, 367 - RepoStatus::Synced, 368 - ) { 369 - Ok(rs) => { 370 - repo_state = rs; 371 - ctx.state.db.update_gauge_diff( 372 - &GaugeState::Resync(None), 373 - &GaugeState::Synced, 374 - ); 375 - } 376 - Err(e) => { 377 - error!( 378 - did = %did, err = %e, 379 - "failed to transition inactive repo to synced" 380 - ); 381 - state 382 - .firehose_cursors 383 - .peek_with(&firehose, |_, c| { 384 - c.store(seq, SeqCst) 385 - }); 386 - continue; 387 - } 388 - } 389 - } 390 - } 391 - } 392 - 393 - match Self::process_message(&mut ctx, &msg, did, repo_state, pre_status) 394 - { 395 - Ok(RepoProcessResult::Ok(_)) => {} 396 - Ok(RepoProcessResult::Deleted) => { 397 - state.db.update_count("repos", -1); 398 - } 399 - Ok(RepoProcessResult::NeedsBackfill(Some(commit))) => { 400 - if let Err(e) = 401 - ops::persist_to_resync_buffer(&state.db, did, commit) 402 - { 403 - error!( 404 - did = %did, err = %e, 405 - "failed to persist commit to resync_buffer" 406 - ); 407 - } 408 - } 409 - Ok(RepoProcessResult::NeedsBackfill(None)) => {} 410 - Err(e) => { 411 - if let IngestError::Generic(ref r) = e { 412 - db::check_poisoned_report(r); 413 - } 414 - error!(did = %did, err = %e, "error processing message"); 415 - if Self::check_if_retriable_failure(&e) { 416 - if let SubscribeReposMessage::Commit(commit) = &msg { 417 - if let Err(e) = ops::persist_to_resync_buffer( 418 - &state.db, did, commit, 419 - ) { 420 - error!( 421 - did = %did, err = %e, 422 - "failed to persist commit to resync_buffer" 423 - ); 424 - } 425 - } 426 - } 427 - } 428 - } 429 - } 430 - } 431 - 432 - state 433 - .firehose_cursors 434 - .peek_with(&firehose, |_, c| c.store(seq, SeqCst)); 435 - } 436 - } 437 - 438 - if let Err(e) = ctx.batch.commit() { 439 - error!(shard = id, err = %e, "failed to commit batch"); 440 - } 441 - 442 - if added_blocks > 0 { 443 - state.db.update_count("blocks", added_blocks); 444 - } 445 - if records_delta != 0 { 446 - state.db.update_count("records", records_delta); 447 - } 448 - for evt in broadcast_events.drain(..) { 449 - let _ = state.db.event_tx.send(evt); 450 - } 451 - 452 - // state.db.inner.persist(fjall::PersistMode::Buffer).ok(); 453 - } 454 - } 455 - 456 - // don't retry commit or sync on key fetch errors 457 - // since we'll just try again later if we get commit or sync again 458 - fn check_if_retriable_failure(e: &IngestError) -> bool { 459 - matches!( 460 - e, 461 - IngestError::Generic(_) 462 - | IngestError::Resolver(ResolverError::Ratelimited) 463 - | IngestError::Resolver(ResolverError::Transport(_)) 464 - ) 465 - } 466 - 467 - fn process_message<'s, 'c>( 468 - ctx: &mut WorkerContext, 469 - msg: &'c SubscribeReposMessage<'static>, 470 - did: &Did, 471 - repo_state: RepoState<'s>, 472 - pre_status: RepoStatus, 473 - ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 474 - match msg { 475 - SubscribeReposMessage::Commit(commit) => { 476 - trace!(did = %did, "processing commit"); 477 - Self::handle_commit(ctx, did, repo_state, commit) 478 - } 479 - SubscribeReposMessage::Sync(sync) => { 480 - debug!(did = %did, "processing sync"); 481 - Self::handle_sync(ctx, did, repo_state, sync) 482 - } 483 - SubscribeReposMessage::Identity(identity) => { 484 - debug!(did = %did, "processing identity"); 485 - Self::handle_identity(ctx, did, repo_state, identity) 486 - } 487 - SubscribeReposMessage::Account(account) => { 488 - debug!(did = %did, "processing account"); 489 - Self::handle_account(ctx, did, repo_state, pre_status, account) 490 - } 491 - _ => { 492 - warn!(did = %did, "unknown message type in buffer"); 493 - Ok(RepoProcessResult::Ok(repo_state)) 494 - } 495 - } 496 - } 497 - 498 - fn handle_commit<'s, 'c>( 499 - ctx: &mut WorkerContext, 500 - did: &Did, 501 - mut repo_state: RepoState<'s>, 502 - commit: &'c Commit<'c>, 503 - ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 504 - repo_state.advance_message_time(commit.time.0.timestamp_millis()); 505 - 506 - let Some(validated) = ctx.validate_commit(did, &mut repo_state, commit)? else { 507 - return Ok(RepoProcessResult::Ok(repo_state)); 508 - }; 509 - 510 - if validated.chain_break.is_broken() { 511 - warn!( 512 - did = %did, 513 - broken = ?validated.chain_break, 514 - "chain break detected, triggering backfill" 515 - ); 516 - Self::trigger_backfill(ctx, did, repo_state)?; 517 - // not updating repo state root commit since we are backfilling anyway 518 - return Ok(RepoProcessResult::NeedsBackfill(Some(commit))); 519 - } 520 - 521 - let res = ops::apply_commit( 522 - &mut ctx.batch, 523 - &ctx.state.db, 524 - repo_state, 525 - validated, 526 - &ctx.state.filter.load(), 527 - ctx.ephemeral, 528 - )?; 529 - let repo_state = res.repo_state; 530 - *ctx.added_blocks += res.blocks_count; 531 - *ctx.records_delta += res.records_delta; 532 - ctx.broadcast_events.push(BroadcastEvent::Persisted( 533 - ctx.state.db.next_event_id.load(SeqCst) - 1, 534 - )); 535 - 536 - Ok(RepoProcessResult::Ok(repo_state)) 537 - } 538 - 539 - fn handle_sync<'s, 'c>( 540 - ctx: &mut WorkerContext, 541 - did: &Did, 542 - mut repo_state: RepoState<'s>, 543 - sync: &'c Sync<'c>, 544 - ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 545 - repo_state.advance_message_time(sync.time.0.timestamp_millis()); 546 - 547 - let Some(validated) = ctx.validate_sync(did, &mut repo_state, sync)? else { 548 - return Ok(RepoProcessResult::Ok(repo_state)); 549 - }; 550 - 551 - // skip noop syncs (data CID unchanged) 552 - if let Some(current_commit) = &repo_state.root { 553 - if current_commit.data == validated.commit_obj.data { 554 - debug!(did = %did, "skipping noop sync"); 555 - return Ok(RepoProcessResult::Ok(repo_state)); 556 - } 557 - 558 - if validated.commit_obj.rev.as_str() <= current_commit.rev.to_tid().as_str() { 559 - debug!(did = %did, "skipping replayed sync"); 560 - return Ok(RepoProcessResult::Ok(repo_state)); 561 - } 562 - } 563 - // not updating repo state root commit since we are backfilling anyway 564 - 565 - warn!(did = %did, "sync event, triggering backfill"); 566 - let repo_state = Self::trigger_backfill(ctx, did, repo_state)?; 567 - Ok(RepoProcessResult::Ok(repo_state)) 568 - } 569 - 570 - fn handle_identity<'s>( 571 - ctx: &mut WorkerContext, 572 - did: &Did, 573 - mut repo_state: RepoState<'s>, 574 - identity: &Identity<'_>, 575 - ) -> Result<RepoProcessResult<'s, 'static>, IngestError> { 576 - let event_ms = identity.time.0.timestamp_millis(); 577 - if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 578 - debug!(did = %did, "skipping stale/duplicate identity event"); 579 - return Ok(RepoProcessResult::Ok(repo_state)); 580 - } 581 - repo_state.advance_message_time(event_ms); 582 - 583 - // todo: make this match relay sync behaviour 584 - let changed = if identity.handle.is_none() { 585 - // no handle sent is basically "invalidate your caches" 586 - ctx.state.resolver.invalidate_sync(did); 587 - let doc = Handle::current().block_on(ctx.state.resolver.resolve_doc(did))?; 588 - repo_state.update_from_doc(doc) 589 - } else { 590 - let old_handle = repo_state.handle.clone(); 591 - repo_state.handle = identity 592 - .handle 593 - .clone() 594 - .map(IntoStatic::into_static) 595 - .or(repo_state.handle); 596 - repo_state.handle != old_handle 597 - }; 598 - 599 - repo_state.touch(); 600 - ctx.batch.insert( 601 - &ctx.state.db.repos, 602 - keys::repo_key(did), 603 - crate::db::ser_repo_state(&repo_state)?, 604 - ); 605 - 606 - if changed { 607 - let evt = IdentityEvt { 608 - did: did.clone().into_static(), 609 - handle: repo_state.handle.clone().map(IntoStatic::into_static), 610 - }; 611 - ctx.broadcast_events 612 - .push(ops::make_identity_event(&ctx.state.db, evt)); 613 - } 614 - 615 - Ok(RepoProcessResult::Ok(repo_state)) 616 - } 617 - 618 - fn handle_account<'s, 'c>( 619 - ctx: &mut WorkerContext, 620 - did: &Did, 621 - mut repo_state: RepoState<'s>, 622 - pre_status: RepoStatus, 623 - account: &'c Account<'c>, 624 - ) -> Result<RepoProcessResult<'s, 'c>, IngestError> { 625 - let event_ms = account.time.0.timestamp_millis(); 626 - if repo_state.last_message_time.is_some_and(|t| event_ms <= t) { 627 - debug!(did = %did, "skipping stale/duplicate account event"); 628 - return Ok(RepoProcessResult::Ok(repo_state)); 629 - } 630 - repo_state.advance_message_time(event_ms); 631 - 632 - // get active before we do any mutations 633 - let was_inactive = matches!( 634 - pre_status, 635 - RepoStatus::Deactivated | RepoStatus::Takendown | RepoStatus::Suspended 636 - ); 637 - let is_inactive = !account.active; 638 - let evt = AccountEvt { 639 - did: did.clone().into_static(), 640 - active: account.active, 641 - status: account.status.as_ref().map(|s| s.to_cowstr().into_static()), 642 - }; 643 - 644 - ctx.refresh_doc(&mut repo_state, did)?; 645 - 646 - if !account.active { 647 - use crate::ingest::stream::AccountStatus; 648 - match &account.status { 649 - Some(AccountStatus::Deleted) => { 650 - debug!(did = %did, "account deleted, wiping data"); 651 - crate::ops::delete_repo(&mut ctx.batch, &ctx.state.db, did, &repo_state)?; 652 - return Ok(RepoProcessResult::Deleted); 653 - } 654 - status => { 655 - let target_status = inactive_account_repo_status(did, status); 656 - 657 - if repo_state.status == target_status { 658 - debug!(did = %did, ?target_status, "account status unchanged"); 659 - ctx.batch.insert( 660 - &ctx.state.db.repos, 661 - keys::repo_key(did), 662 - crate::db::ser_repo_state(&repo_state)?, 663 - ); 664 - return Ok(RepoProcessResult::Ok(repo_state)); 665 - } 666 - 667 - repo_state = ops::update_repo_status( 668 - &mut ctx.batch, 669 - &ctx.state.db, 670 - did, 671 - repo_state, 672 - target_status, 673 - )?; 674 - ctx.state 675 - .db 676 - .update_gauge_diff(&GaugeState::Synced, &GaugeState::Resync(None)); 677 - } 678 - } 679 - } else { 680 - // active=true: transition to synced is handled in the shard dispatch before calling this 681 - } 682 - 683 - if was_inactive != is_inactive || repo_state.status != pre_status { 684 - ctx.broadcast_events 685 - .push(ops::make_account_event(&ctx.state.db, evt)); 686 - } 687 - 688 - // persist last_message_time for paths that don't go through update_repo_status 689 - // (active=true and already synced). harmless double-write for the status-changed path 690 - ctx.batch.insert( 691 - &ctx.state.db.repos, 692 - keys::repo_key(did), 693 - crate::db::ser_repo_state(&repo_state)?, 694 - ); 695 - 696 - Ok(RepoProcessResult::Ok(repo_state)) 697 - } 698 - 699 - // checks the current state of the repo in the database and returns a gate 700 - // indicating what the shard loop should do with the message. 701 - // if the repo is new, creates initial state and triggers backfill 702 - // for synced repos with buffered commits, drains the buffer first 703 - // so events are applied in order 704 - fn check_repo_state<'s, 'c>( 705 - ctx: &mut WorkerContext, 706 - did: &Did<'_>, 707 - msg: &'c SubscribeReposMessage<'static>, 708 - ) -> Result<ProcessGate<'s, 'c>, IngestError> { 709 - let repo_key = keys::repo_key(&did); 710 - let Some(state_bytes) = ctx.state.db.repos.get(&repo_key).into_diagnostic()? else { 711 - let filter = ctx.state.filter.load(); 712 - 713 - if filter.mode == FilterMode::Filter && !filter.signals.is_empty() { 714 - let commit = match msg { 715 - SubscribeReposMessage::Commit(c) => c, 716 - _ => return Ok(ProcessGate::NewRepo), 717 - }; 718 - let touches_signal = commit.ops.iter().any(|op| { 719 - op.path 720 - .split_once('/') 721 - .map(|(col, _)| { 722 - let m = filter.matches_signal(col); 723 - debug!( 724 - did = %did, path = %op.path, col = %col, signals = ?filter.signals, matched = m, 725 - "signal check" 726 - ); 727 - m 728 - }) 729 - .unwrap_or(false) 730 - }); 731 - if !touches_signal { 732 - trace!(did = %did, "dropping commit, no signal-matching ops"); 733 - return Ok(ProcessGate::NewRepo); 734 - } 735 - } 736 - 737 - debug!(did = %did, "discovered new account from firehose, queueing backfill"); 738 - 739 - let repo_state = RepoState::untracked(rand::rng().next_u64()); 740 - let mut batch = ctx.state.db.inner.batch(); 741 - batch.insert( 742 - &ctx.state.db.repos, 743 - &repo_key, 744 - crate::db::ser_repo_state(&repo_state)?, 745 - ); 746 - batch.insert( 747 - &ctx.state.db.pending, 748 - keys::pending_key(repo_state.index_id), 749 - &repo_key, 750 - ); 751 - batch.commit().into_diagnostic()?; 752 - 753 - ctx.state.db.update_count("repos", 1); 754 - ctx.state 755 - .db 756 - .update_gauge_diff(&GaugeState::Synced, &GaugeState::Pending); 757 - 758 - ctx.state.notify_backfill(); 759 - 760 - return Ok(ProcessGate::NewRepo); 761 - }; 762 - 763 - let repo_state = crate::db::deser_repo_state(&state_bytes)?.into_static(); 764 - 765 - if !repo_state.tracked && repo_state.status != RepoStatus::Backfilling { 766 - trace!(did = %did, "ignoring message, repo is explicitly untracked"); 767 - return Ok(ProcessGate::Drop); 768 - } 769 - 770 - match &repo_state.status { 771 - RepoStatus::Synced => { 772 - // lazy drain: if there are buffered commits, drain them now before 773 - // applying the live message so events are applied in order 774 - if ops::has_buffered_commits(&ctx.state.db, did) { 775 - return match Self::drain_resync_buffer(ctx, did, repo_state)? { 776 - RepoProcessResult::Ok(rs) => Ok(ProcessGate::Ready(rs)), 777 - // gap triggered during drain, so drop the live message 778 - RepoProcessResult::NeedsBackfill(_) => Ok(ProcessGate::Drop), 779 - RepoProcessResult::Deleted => Ok(ProcessGate::Drop), 780 - }; 781 - } 782 - Ok(ProcessGate::Ready(repo_state)) 783 - } 784 - RepoStatus::Backfilling | RepoStatus::Error(_) => { 785 - debug!( 786 - did = %did, status = ?repo_state.status, 787 - "ignoring message, repo is backfilling or in error state" 788 - ); 789 - Ok(ProcessGate::Drop) 790 - } 791 - RepoStatus::Deactivated | RepoStatus::Suspended | RepoStatus::Takendown => { 792 - // account events always pass through because the 793 - // shard dispatch handles the active=true transition 794 - if let SubscribeReposMessage::Account(_) = msg { 795 - return Ok(ProcessGate::Ready(repo_state)); 796 - } 797 - // buffer commits and drop everything else until we get an active=true message 798 - let commit = match msg { 799 - SubscribeReposMessage::Commit(c) => Some(c.as_ref()), 800 - _ => None, 801 - }; 802 - Ok(ProcessGate::Buffer(commit)) 803 - } 804 - } 805 - } 806 - 807 - fn drain_resync_buffer<'s>( 808 - ctx: &mut WorkerContext, 809 - did: &Did, 810 - mut repo_state: RepoState<'s>, 811 - ) -> Result<RepoProcessResult<'s, 'static>, IngestError> { 812 - let prefix = keys::resync_buffer_prefix(did); 813 - 814 - for guard in ctx.state.db.resync_buffer.prefix(&prefix) { 815 - let (key, value) = guard.into_inner().into_diagnostic()?; 816 - let commit: Commit = rmp_serde::from_slice(&value).into_diagnostic()?; 817 - 818 - // buffered commits have already been source-checked on arrival; skip host check 819 - let res = Self::handle_commit(ctx, did, repo_state, &commit); 820 - let res = match res { 821 - Ok(r) => r, 822 - Err(e) => { 823 - if !Self::check_if_retriable_failure(&e) { 824 - ctx.batch.remove(&ctx.state.db.resync_buffer, key); 825 - } 826 - return Err(e); 827 - } 828 - }; 829 - match res { 830 - RepoProcessResult::Ok(rs) => { 831 - ctx.batch.remove(&ctx.state.db.resync_buffer, key); 832 - repo_state = rs; 833 - } 834 - RepoProcessResult::NeedsBackfill(_) => { 835 - // commit is already in the buffer, leave it there for the next backfill 836 - return Ok(RepoProcessResult::NeedsBackfill(None)); 837 - } 838 - RepoProcessResult::Deleted => { 839 - ctx.batch.remove(&ctx.state.db.resync_buffer, key); 840 - return Ok(RepoProcessResult::Deleted); 841 - } 842 - } 843 - } 844 - 845 - Ok(RepoProcessResult::Ok(repo_state)) 846 - } 847 - 848 - // transitions repo to Backfilling, commits the status change immediately (separate from 849 - // ctx.batch), updates the gauge, and pings the backfill worker. returns the updated state. 850 - fn trigger_backfill<'s>( 851 - ctx: &mut WorkerContext, 852 - did: &Did, 853 - repo_state: RepoState<'s>, 854 - ) -> Result<RepoState<'s>, IngestError> { 855 - let mut batch = ctx.state.db.inner.batch(); 856 - let repo_state = ops::update_repo_status( 857 - &mut batch, 858 - &ctx.state.db, 859 - did, 860 - repo_state, 861 - RepoStatus::Backfilling, 862 - )?; 863 - batch.commit().into_diagnostic()?; 864 - ctx.state 865 - .db 866 - .update_gauge_diff(&GaugeState::Synced, &GaugeState::Pending); 867 - ctx.state.notify_backfill(); 868 - Ok(repo_state) 869 - } 870 - 871 - fn check_host_authority( 872 - ctx: &mut WorkerContext, 873 - did: &Did, 874 - repo_state: &mut RepoState, 875 - source_host: &str, 876 - ) -> Result<AuthorityOutcome, IngestError> { 877 - let outcome = 878 - super::check_host_authority(&ctx.state.resolver, did, repo_state, source_host)?; 879 - if !matches!(outcome, AuthorityOutcome::Authorized) { 880 - ctx.batch.insert( 881 - &ctx.state.db.repos, 882 - keys::repo_key(did), 883 - crate::db::ser_repo_state(repo_state)?, 884 - ); 885 - } 886 - Ok(outcome) 887 - } 888 - } 889 - 890 - impl WorkerContext<'_> { 891 - fn refresh_doc(&mut self, repo_state: &mut RepoState, did: &Did) -> Result<(), IngestError> { 892 - super::refresh_doc(&self.state.resolver, did, repo_state)?; 893 - self.batch.insert( 894 - &self.state.db.repos, 895 - keys::repo_key(did), 896 - crate::db::ser_repo_state(repo_state)?, 897 - ); 898 - Ok(()) 899 - } 900 - 901 - fn fetch_key(&self, did: &Did) -> Result<Option<PublicKey<'static>>> { 902 - super::fetch_key(&self.state.resolver, self.verify_signatures, did) 903 - } 904 - 905 - fn validate_commit<'s, 'c>( 906 - &mut self, 907 - did: &Did, 908 - repo_state: &mut RepoState<'s>, 909 - commit: &'c Commit<'c>, 910 - ) -> Result<Option<ValidatedCommit<'c>>, IngestError> { 911 - let key = self.fetch_key(did)?; 912 - match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 913 - Ok(v) => return Ok(Some(v)), 914 - Err(CommitValidationError::StaleRev) => { 915 - debug!(did = %did, commit_rev = %commit.rev, "skipping replayed commit"); 916 - return Ok(None); 917 - } 918 - Err(CommitValidationError::SigFailure) => {} 919 - Err(e) => { 920 - warn!(did = %did, err = %e, "commit rejected"); 921 - return Ok(None); 922 - } 923 - } 924 - 925 - self.refresh_doc(repo_state, did)?; 926 - let key = self.fetch_key(did)?; 927 - match self.vctx.validate_commit(commit, repo_state, key.as_ref()) { 928 - Ok(v) => Ok(Some(v)), 929 - Err(e) => { 930 - warn!(did = %did, err = %e, "commit rejected after key refresh"); 931 - Ok(None) 932 - } 933 - } 934 - } 935 - 936 - fn validate_sync<'s>( 937 - &mut self, 938 - did: &Did, 939 - repo_state: &mut RepoState<'s>, 940 - sync: &Sync<'_>, 941 - ) -> Result<Option<ValidatedSync>, IngestError> { 942 - let key = self.fetch_key(did)?; 943 - match self.vctx.validate_sync(sync, key.as_ref()) { 944 - Ok(v) => return Ok(Some(v)), 945 - Err(SyncValidationError::SigFailure) => {} 946 - Err(e) => { 947 - warn!(did = %did, err = %e, "sync rejected"); 948 - return Ok(None); 949 - } 950 - } 951 - 952 - self.refresh_doc(repo_state, did)?; 953 - let key = self.fetch_key(did)?; 954 - match self.vctx.validate_sync(sync, key.as_ref()) { 955 - Ok(v) => Ok(Some(v)), 956 - Err(e) => { 957 - warn!(did = %did, err = %e, "sync rejected after key refresh"); 958 - Ok(None) 959 - } 960 - } 961 - } 962 - }
+6 -7
src/lib.rs
··· 3 3 pub mod filter; 4 4 pub mod types; 5 5 6 - #[cfg(all(feature = "relay", feature = "events", not(debug_assertions)))] 7 - compile_error!("`relay` and `events` features are mutually exclusive"); 8 - 9 - #[cfg(all(feature = "relay", feature = "backlinks", not(debug_assertions)))] 10 - compile_error!("`relay` and `backlinks` features are mutually exclusive"); 6 + #[cfg(all(feature = "relay", feature = "indexer"))] 7 + compile_error!("can't be relay and indexer at the same time"); 8 + #[cfg(all(feature = "relay", feature = "backlinks"))] 9 + compile_error!("can't index backlinks while running as a relay"); 11 10 12 11 pub(crate) mod api; 13 - #[cfg(feature = "events")] 12 + #[cfg(feature = "indexer")] 14 13 pub(crate) mod backfill; 15 14 #[cfg(feature = "backlinks")] 16 15 pub(crate) mod backlinks; 17 16 pub(crate) mod crawler; 18 17 pub(crate) mod db; 19 18 pub(crate) mod ingest; 20 - #[cfg(feature = "events")] 19 + #[cfg(feature = "indexer")] 21 20 pub(crate) mod ops; 22 21 pub(crate) mod resolver; 23 22 pub(crate) mod state;
+72 -70
src/ops.rs
··· 6 6 use jacquard_common::Data; 7 7 use jacquard_common::types::did::Did; 8 8 use miette::{Context, IntoDiagnostic, Result}; 9 - use rand::{Rng, rng}; 10 9 use std::collections::HashMap; 11 10 use std::sync::atomic::Ordering; 12 11 use tracing::debug; 13 12 14 13 use crate::db::types::{DbAction, DbRkey, DbTid, TrimmedDid}; 15 - use crate::db::{self, Db, keys, ser_repo_state}; 14 + use crate::db::{self, Db, keys}; 16 15 use crate::filter::FilterConfig; 17 16 use crate::ingest::stream::Commit; 18 17 use crate::ingest::validation::ValidatedCommit; ··· 32 31 "buffered commit to resync_buffer" 33 32 ); 34 33 Ok(()) 35 - } 36 - 37 - pub fn has_buffered_commits(db: &Db, did: &Did) -> bool { 38 - let prefix = keys::resync_buffer_prefix(did); 39 - db.resync_buffer.prefix(&prefix).next().is_some() 40 34 } 41 35 42 36 // emitting identity is ephemeral ··· 69 63 batch: &mut OwnedWriteBatch, 70 64 db: &Db, 71 65 did: &Did, 72 - repo_state: &RepoState, 66 + _repo_state: &RepoState, 73 67 ) -> Result<()> { 74 68 debug!(did = %did, "deleting repo"); 75 69 76 70 let repo_key = keys::repo_key(did); 77 - let pending_key = keys::pending_key(repo_state.index_id); 71 + let metadata_key = keys::repo_metadata_key(did); 78 72 79 - // 1. delete from repos, pending, resync 80 - batch.remove(&db.repos, &repo_key); 81 - match repo_state.status { 82 - RepoStatus::Synced => {} 83 - RepoStatus::Backfilling => { 84 - batch.remove(&db.pending, &pending_key); 85 - } 86 - _ => { 87 - batch.remove(&db.resync, &repo_key); 88 - } 73 + let metadata_bytes = db.repo_metadata.get(&metadata_key).into_diagnostic()?; 74 + if let Some(metadata_bytes) = metadata_bytes { 75 + let metadata = db::deser_repo_metadata(&metadata_bytes)?; 76 + batch.remove(&db.pending, keys::pending_key(metadata.index_id)); 89 77 } 78 + 79 + // 1. delete from resync, and metadata 80 + // we don't delete from repos, relay uses it as a tombstone 81 + // todo: we should still delete it after some time 82 + batch.remove(&db.resync, &repo_key); 83 + batch.remove(&db.repo_metadata, &metadata_key); 90 84 91 85 // 2. delete from resync buffer 92 86 let resync_prefix = keys::resync_buffer_prefix(did); ··· 123 117 Ok(()) 124 118 } 125 119 126 - pub fn update_repo_status<'batch, 's>( 120 + pub fn transition_repo<'batch, 's>( 127 121 batch: &'batch mut OwnedWriteBatch, 128 122 db: &Db, 129 123 did: &Did, ··· 133 127 debug!(did = %did, status = ?new_status, "updating repo status"); 134 128 135 129 let repo_key = keys::repo_key(did); 136 - let pending_key = keys::pending_key(repo_state.index_id); 130 + let metadata_key = keys::repo_metadata_key(did); 131 + 132 + let metadata_bytes = db.repo_metadata.get(&metadata_key).into_diagnostic()?; 133 + if let Some(metadata_bytes) = metadata_bytes { 134 + let metadata = db::deser_repo_metadata(&metadata_bytes)?; 135 + let pending_key = keys::pending_key(metadata.index_id); 137 136 138 - // manage queues 139 - match &new_status { 140 - RepoStatus::Synced => { 141 - batch.remove(&db.pending, &pending_key); 142 - // we dont have to remove from resync here because it has to transition resync -> pending first 143 - } 144 - RepoStatus::Backfilling => { 145 - // if we are coming from an error state, remove from resync 146 - if !matches!(repo_state.status, RepoStatus::Synced) { 137 + // manage queues 138 + match &new_status { 139 + RepoStatus::Synced => { 140 + batch.remove(&db.pending, &pending_key); 141 + // we dont have to remove from resync here because it has to transition resync -> pending first 142 + } 143 + RepoStatus::Error(msg) => { 144 + tracing::warn!("transitioning to error: {msg}"); 145 + batch.remove(&db.pending, &pending_key); 146 + // TODO: we need to make errors have kind instead of "message" in repo status 147 + // and then pass it to resync error kind 148 + let resync_state = crate::types::ResyncState::Error { 149 + kind: crate::types::ResyncErrorKind::Generic, 150 + retry_count: 0, 151 + next_retry: chrono::Utc::now().timestamp(), 152 + }; 153 + batch.insert( 154 + &db.resync, 155 + &repo_key, 156 + rmp_serde::to_vec(&resync_state).into_diagnostic()?, 157 + ); 158 + } 159 + RepoStatus::Deactivated | RepoStatus::Takendown | RepoStatus::Suspended => { 160 + // this shouldnt be needed since a repo wont be in a pending state when it gets to any of these states 161 + // batch.remove(&db.pending, &pending_key); 162 + let resync_state = ResyncState::Gone { 163 + status: new_status.clone(), 164 + }; 165 + batch.insert( 166 + &db.resync, 167 + &repo_key, 168 + rmp_serde::to_vec(&resync_state).into_diagnostic()?, 169 + ); 170 + } 171 + RepoStatus::Deleted => { 172 + // terminal state: remove from queues, no resync entry needed 173 + batch.remove(&db.pending, &pending_key); 147 174 batch.remove(&db.resync, &repo_key); 148 175 } 149 - // remove the old entry 150 - batch.remove(&db.pending, &pending_key); 151 - // add as new entry 152 - repo_state.index_id = rng().next_u64(); 153 - batch.insert( 154 - &db.pending, 155 - keys::pending_key(repo_state.index_id), 156 - &repo_key, 157 - ); 158 - } 159 - RepoStatus::Error(_msg) => { 160 - batch.remove(&db.pending, &pending_key); 161 - // TODO: we need to make errors have kind instead of "message" in repo status 162 - // and then pass it to resync error kind 163 - let resync_state = crate::types::ResyncState::Error { 164 - kind: crate::types::ResyncErrorKind::Generic, 165 - retry_count: 0, 166 - next_retry: chrono::Utc::now().timestamp(), 167 - }; 168 - batch.insert( 169 - &db.resync, 170 - &repo_key, 171 - rmp_serde::to_vec(&resync_state).into_diagnostic()?, 172 - ); 173 - } 174 - RepoStatus::Deactivated | RepoStatus::Takendown | RepoStatus::Suspended => { 175 - // this shouldnt be needed since a repo wont be in a pending state when it gets to any of these states 176 - // batch.remove(&db.pending, &pending_key); 177 - let resync_state = ResyncState::Gone { 178 - status: new_status.clone(), 179 - }; 180 - batch.insert( 181 - &db.resync, 182 - &repo_key, 183 - rmp_serde::to_vec(&resync_state).into_diagnostic()?, 184 - ); 176 + RepoStatus::Desynchronized | RepoStatus::Throttled => { 177 + // like an error: remove from pending and schedule a resync attempt 178 + batch.remove(&db.pending, &pending_key); 179 + let resync_state = crate::types::ResyncState::Error { 180 + kind: crate::types::ResyncErrorKind::Generic, 181 + retry_count: 0, 182 + next_retry: chrono::Utc::now().timestamp(), 183 + }; 184 + batch.insert( 185 + &db.resync, 186 + &repo_key, 187 + rmp_serde::to_vec(&resync_state).into_diagnostic()?, 188 + ); 189 + } 185 190 } 186 191 } 187 192 193 + repo_state.active = matches!(new_status, RepoStatus::Synced | RepoStatus::Error(_)); 188 194 repo_state.status = new_status; 189 195 repo_state.touch(); 190 - 191 - batch.insert(&db.repos, &repo_key, ser_repo_state(&repo_state)?); 192 196 193 197 Ok(repo_state) 194 198 } ··· 214 218 215 219 repo_state.root = Some(validated.commit_obj.into()); 216 220 repo_state.touch(); 217 - 218 - batch.insert(&db.repos, keys::repo_key(did), ser_repo_state(&repo_state)?); 219 221 220 222 // 2. iterate ops and update records index 221 223 let mut records_delta = 0;
+97 -39
src/types.rs
··· 1 1 use std::fmt::{Debug, Display}; 2 2 3 + use bytes::Bytes; 3 4 use jacquard_common::types::cid::IpldCid; 4 5 use jacquard_common::types::nsid::Nsid; 5 6 use jacquard_common::types::string::{Did, Rkey}; ··· 16 17 pub(crate) mod v2 { 17 18 use super::*; 18 19 19 - // todo: add desynchronized and throttled fields 20 20 #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 21 21 pub enum RepoStatus { 22 22 Backfilling, ··· 40 40 #[derive(Debug, Clone, Serialize, Deserialize)] 41 41 #[serde(bound(deserialize = "'i: 'de"))] 42 42 pub(crate) struct RepoState<'i> { 43 - // todo: add active field 44 43 pub status: RepoStatus, 45 44 pub root: Option<Commit>, 46 - // todo: is this actually valid? the spec says this is informal and intermadiate 47 - // services may change it. we should probably document it. if we cant use this 48 - // then how do we dedup account / identity ops? 45 + pub last_message_time: Option<i64>, 46 + pub last_updated_at: i64, 47 + pub tracked: bool, 48 + pub index_id: u64, 49 + #[serde(borrow)] 50 + pub signing_key: Option<DidKey<'i>>, 51 + #[serde(borrow)] 52 + pub pds: Option<CowStr<'i>>, 53 + #[serde(borrow)] 54 + pub handle: Option<Handle<'i>>, 55 + } 56 + } 57 + 58 + pub(crate) mod v4 { 59 + use super::*; 60 + pub(crate) use v2::Commit; 61 + 62 + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 63 + pub enum RepoStatus { 64 + /// repo is synced to latest commit from what we know of 65 + Synced, 66 + /// some unclassified fatal error 67 + Error(SmolStr), 68 + /// user has temporarily paused their overall account. content should 69 + /// not be displayed or redistributed, but does not need to be deleted 70 + /// from infrastructure. implied time-limited. also the initial state 71 + /// for an account after migrating to another pds instance. 72 + Deactivated, 73 + /// host or service has takendown the account. implied permanent or 74 + /// long-term, though may be reverted. 75 + Takendown, 76 + /// host or service has temporarily paused the account. implied 77 + /// time-limited. 78 + Suspended, 79 + /// user or host has deleted the account, and content should be removed 80 + /// from the network. implied permanent or long-term, though may be 81 + /// reverted (deleted accounts may reactivate on the same or another 82 + /// host). 83 + /// 84 + /// account is deleted; kept as a tombstone so stale commits arriving from the upstream 85 + /// backfill window are not forwarded. active=false per spec. 86 + Deleted, 87 + /// host detected a repo sync problem. active may be true or false per spec; 88 + /// the `active` field on `RepoState` is authoritative. 89 + Desynchronized, 90 + /// resource rate-limit exceeded. active may be true or false per spec; 91 + /// the `active` field on `RepoState` is authoritative. 92 + Throttled, 93 + } 94 + 95 + #[derive(Debug, Clone, Serialize, Deserialize)] 96 + #[serde(bound(deserialize = "'i: 'de"))] 97 + pub(crate) struct RepoState<'i> { 98 + /// whether the upstream considers this account active. 99 + /// services should use the `active` flag to control overall account visibility 100 + pub active: bool, 101 + pub status: RepoStatus, 102 + pub root: Option<Commit>, 49 103 /// ms since epoch of the last firehose message we processed for this repo. 50 104 /// used to deduplicate identity / account events that can arrive from multiple relays at 51 105 /// different wall-clock times but represent the same underlying PDS event. 52 106 pub last_message_time: Option<i64>, 53 107 /// this is when we *ingested* any last updates 54 108 pub last_updated_at: i64, // unix timestamp 55 - /// whether we are ingesting events for this repo 56 - pub tracked: bool, 57 - /// index id in pending keyspace 58 - pub index_id: u64, 59 109 #[serde(borrow)] 60 110 pub signing_key: Option<DidKey<'i>>, 61 111 #[serde(borrow)] ··· 63 113 #[serde(borrow)] 64 114 pub handle: Option<Handle<'i>>, 65 115 } 66 - } 67 - pub(crate) use v2::*; 68 116 69 - impl Display for RepoStatus { 70 - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 71 - match self { 72 - RepoStatus::Backfilling => write!(f, "backfilling"), 73 - RepoStatus::Synced => write!(f, "synced"), 74 - RepoStatus::Error(e) => write!(f, "error({e})"), 75 - RepoStatus::Deactivated => write!(f, "deactivated"), 76 - RepoStatus::Takendown => write!(f, "takendown"), 77 - RepoStatus::Suspended => write!(f, "suspended"), 78 - } 117 + #[derive(Debug, Clone, Serialize, Deserialize)] 118 + pub(crate) struct RepoMetadata { 119 + /// whether we are ingesting events for this repo 120 + pub tracked: bool, 121 + /// index id in pending keyspace (if backfilling) 122 + pub index_id: u64, 79 123 } 80 124 } 125 + 126 + pub(crate) use v4::*; 81 127 82 128 impl<'c> From<AtpCommit<'c>> for Commit { 83 129 fn from(value: AtpCommit<'c>) -> Self { ··· 93 139 94 140 impl Commit { 95 141 pub(crate) fn into_atp_commit<'i>(self, did: Did<'i>) -> Option<AtpCommit<'i>> { 96 - // from a migration 142 + // version < 0 is a sentinel used in v2 migration for repos with no commit data 97 143 if self.version < 0 { 98 144 return None; 99 145 } ··· 108 154 } 109 155 } 110 156 111 - impl<'i> RepoState<'i> { 157 + impl Display for RepoStatus { 158 + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 159 + match self { 160 + RepoStatus::Synced => write!(f, "synced"), 161 + RepoStatus::Error(e) => write!(f, "error({e})"), 162 + RepoStatus::Deactivated => write!(f, "deactivated"), 163 + RepoStatus::Takendown => write!(f, "takendown"), 164 + RepoStatus::Suspended => write!(f, "suspended"), 165 + RepoStatus::Deleted => write!(f, "deleted"), 166 + RepoStatus::Desynchronized => write!(f, "desynchronized"), 167 + RepoStatus::Throttled => write!(f, "throttled"), 168 + } 169 + } 170 + } 171 + 172 + impl RepoMetadata { 112 173 pub fn backfilling(index_id: u64) -> Self { 113 174 Self { 114 - status: RepoStatus::Backfilling, 115 - root: None, 116 - last_updated_at: chrono::Utc::now().timestamp(), 117 175 index_id, 118 176 tracked: true, 119 - handle: None, 120 - pds: None, 121 - signing_key: None, 122 - last_message_time: None, 123 177 } 124 178 } 179 + } 125 180 126 - /// backfilling, but not tracked yet 127 - pub fn untracked(index_id: u64) -> Self { 181 + impl<'i> RepoState<'i> { 182 + pub fn backfilling() -> Self { 128 183 Self { 129 - tracked: false, 130 - ..Self::backfilling(index_id) 184 + active: true, 185 + status: RepoStatus::Desynchronized, 186 + root: None, 187 + last_updated_at: chrono::Utc::now().timestamp(), 188 + handle: None, 189 + pds: None, 190 + signing_key: None, 191 + last_message_time: None, 131 192 } 132 193 } 133 194 ··· 158 219 159 220 fn into_static(self) -> Self::Output { 160 221 RepoState { 222 + active: self.active, 161 223 status: self.status, 162 224 root: self.root, 163 225 last_updated_at: self.last_updated_at, 164 - index_id: self.index_id, 165 - tracked: self.tracked, 166 226 handle: self.handle.map(IntoStatic::into_static), 167 227 pds: self.pds.map(IntoStatic::into_static), 168 228 signing_key: self.signing_key.map(IntoStatic::into_static), ··· 244 304 pub account: Option<AccountEvt<'i>>, 245 305 } 246 306 247 - #[cfg(feature = "events")] 307 + #[cfg(feature = "indexer")] 248 308 #[derive(Clone, Debug)] 249 309 pub(crate) enum BroadcastEvent { 250 310 #[allow(dead_code)] ··· 284 344 #[serde(skip_serializing_if = "Option::is_none")] 285 345 pub status: Option<CowStr<'i>>, 286 346 } 287 - 288 - use bytes::Bytes; 289 347 290 348 #[derive(Serialize, Deserialize, Clone)] 291 349 pub(crate) enum StoredData {
+1 -1
tests/authenticated_stream.nu
··· 106 106 $e | select id type | insert value $value 107 107 }) 108 108 print $"captured ($events | length) events" 109 - $display_events | table -e | print 109 + $display_events | to text | print 110 110 111 111 # filter live events for the relevant entities 112 112 let relevant_events = ($events | where { |it|
+1 -1
tests/backlinks.nu
··· 157 157 # verify that reverse=true actually inverts the order using a subject with 2+ backlinks. 158 158 # returns an error string on failure, or empty string on success. 159 159 def check-reverse-ordering [url: string, subject: string, expected_count: int] { 160 - print $"checking reverse ordering — subject has ($expected_count) backlinks..." 160 + print $"checking reverse ordering... subject has ($expected_count) backlinks..." 161 161 print $" subject: ($subject)" 162 162 163 163 let fwd = (http get $"($url)/xrpc/blue.microcosm.links.getBacklinks?subject=($subject | url encode)&limit=50")
+2 -2
tests/by_collection.nu
··· 57 57 let filter = (http get $"($url)/filter") 58 58 print $"filter state: ($filter | to json)" 59 59 if not ($filter.signals | any { |s| $s == $collection }) { 60 - print $"FAILED: ($collection) not in signals — filter not configured" 60 + print $"FAILED: ($collection) not in signals, filter not configured" 61 61 try { kill -9 $instance.pid } 62 62 rm -rf $db_path 63 63 exit 1 ··· 99 99 print $"FAILED: ($did) not found in repos API" 100 100 $all_found = false 101 101 } else { 102 - print $"ok: ($did) — status: ($repo.status)" 102 + print $"ok: ($did), status: ($repo.status)" 103 103 } 104 104 } 105 105
+6 -3
tests/run_all.nu
··· 37 37 } 38 38 } 39 39 40 - def main [--only: list<string> = []] { 40 + def main [--only: list<string> = [], --skip-creds] { 41 41 print "building hydrant..." 42 - # build defaults features 42 + # build default features 43 43 cargo build 44 44 # build backlinks 45 45 cargo build --features backlinks 46 46 print "" 47 47 48 48 # discover all test scripts, excluding infrastructure files 49 - let excluded = ["common", "mock_relay", "run_all"] 49 + mut excluded = ["common", "mock_relay", "run_all"] 50 + if $skip_creds { 51 + $excluded = ($excluded | append ["authenticated_stream", "repo_sync_integrity"]) 52 + } 50 53 let discovered = ( 51 54 ls tests/*.nu 52 55 | get name
+1 -1
tests/signal_filter.nu
··· 15 15 let url = $"http://localhost:($port)" 16 16 let db_path = (mktemp -d -t hydrant_signal_test.XXXXXX) 17 17 18 - let random_str = (random chars -l 6) 18 + let random_str = ("a" + (random chars -l 5)) 19 19 let collection = $"systems.hydrant.test.($random_str)" 20 20 21 21 print $"database path: ($db_path)"