Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm
75
fork

Configure Feed

Select the types of activity you want to include in your feed.

implement top collections over arbitrary times

phil 91baf420 31ac134e

+225 -235
+1 -76
ufos/src/server.rs
··· 284 284 method = GET, 285 285 path = "/collections" 286 286 }] 287 - /// Get a list of collection NSIDs with statistics 287 + /// Get collection with statistics 288 288 /// 289 289 /// ## To fetch a full list: 290 290 /// ··· 356 356 }) 357 357 } 358 358 359 - #[derive(Debug, Deserialize, JsonSchema)] 360 - struct TopByQuery { 361 - /// The maximum number of collections to return in one request. 362 - /// 363 - /// Default: 32 364 - #[schemars(range(min = 1, max = 200), default = "top_collections_default_limit")] 365 - #[serde(default = "top_collections_default_limit")] 366 - limit: usize, 367 - /// Limit collections and statistics to those seen after this UTC datetime 368 - since: Option<DateTime<Utc>>, 369 - /// Limit collections and statistics to those seen before this UTC datetime 370 - until: Option<DateTime<Utc>>, 371 - } 372 - fn top_collections_default_limit() -> usize { 373 - 32 374 - } 375 - 376 - /// Get top collections by record count 377 - #[endpoint { 378 - method = GET, 379 - path = "/collections/by-count" 380 - }] 381 - async fn get_top_collections_by_count( 382 - ctx: RequestContext<Context>, 383 - query: Query<TopByQuery>, 384 - ) -> OkCorsResponse<Vec<NsidCount>> { 385 - let Context { storage, .. } = ctx.context(); 386 - let q = query.into_inner(); 387 - 388 - if !(1..=200).contains(&q.limit) { 389 - let msg = format!("limit not in 1..=200: {}", q.limit); 390 - return Err(HttpError::for_bad_request(None, msg)); 391 - } 392 - 393 - let since = q.since.map(dt_to_cursor).transpose()?; 394 - let until = q.until.map(dt_to_cursor).transpose()?; 395 - 396 - let collections = storage 397 - .get_top_collections_by_count(100, since, until) 398 - .await 399 - .map_err(|e| HttpError::for_internal_error(format!("oh shoot: {e:?}")))?; 400 - 401 - ok_cors(collections) 402 - } 403 - 404 - /// Get top collections by estimated unique DIDs 405 - #[endpoint { 406 - method = GET, 407 - path = "/collections/by-dids" 408 - }] 409 - async fn get_top_collections_by_dids( 410 - ctx: RequestContext<Context>, 411 - query: Query<TopByQuery>, 412 - ) -> OkCorsResponse<Vec<NsidCount>> { 413 - let Context { storage, .. } = ctx.context(); 414 - let q = query.into_inner(); 415 - 416 - if !(1..=200).contains(&q.limit) { 417 - let msg = format!("limit not in 1..=200: {}", q.limit); 418 - return Err(HttpError::for_bad_request(None, msg)); 419 - } 420 - 421 - let since = q.since.map(dt_to_cursor).transpose()?; 422 - let until = q.until.map(dt_to_cursor).transpose()?; 423 - 424 - let collections = storage 425 - .get_top_collections_by_dids(100, since, until) 426 - .await 427 - .map_err(|e| HttpError::for_internal_error(format!("oh shoot: {e:?}")))?; 428 - 429 - ok_cors(collections) 430 - } 431 - 432 359 pub async fn serve(storage: impl StoreReader + 'static) -> Result<(), String> { 433 360 let log = ConfigLogging::StderrTerminal { 434 361 level: ConfigLoggingLevel::Info, ··· 444 371 api.register(get_records_by_collections).unwrap(); 445 372 api.register(get_records_total_seen).unwrap(); 446 373 api.register(get_collections).unwrap(); 447 - api.register(get_top_collections_by_count).unwrap(); 448 - api.register(get_top_collections_by_dids).unwrap(); 449 374 450 375 let context = Context { 451 376 spec: Arc::new(
-14
ufos/src/storage.rs
··· 84 84 until: Option<HourTruncatedCursor>, 85 85 ) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)>; 86 86 87 - async fn get_top_collections_by_count( 88 - &self, 89 - limit: usize, 90 - since: Option<HourTruncatedCursor>, 91 - until: Option<HourTruncatedCursor>, 92 - ) -> StorageResult<Vec<NsidCount>>; 93 - 94 - async fn get_top_collections_by_dids( 95 - &self, 96 - limit: usize, 97 - since: Option<HourTruncatedCursor>, 98 - until: Option<HourTruncatedCursor>, 99 - ) -> StorageResult<Vec<NsidCount>>; 100 - 101 87 async fn get_counts_by_collection(&self, collection: &Nsid) -> StorageResult<(u64, u64)>; 102 88 103 89 async fn get_records_by_collections(
+187 -129
ufos/src/storage_fjall.rs
··· 1 - use crate::db_types::{db_complete, DbBytes, DbStaticStr, StaticStr}; 1 + use crate::db_types::{db_complete, DbBytes, DbStaticStr, EncodingResult, StaticStr}; 2 2 use crate::error::StorageError; 3 3 use crate::storage::{StorageResult, StorageWhatever, StoreBackground, StoreReader, StoreWriter}; 4 4 use crate::store_types::{ ··· 9 9 NewRollupCursorKey, NewRollupCursorValue, NsidRecordFeedKey, NsidRecordFeedVal, 10 10 RecordLocationKey, RecordLocationMeta, RecordLocationVal, RecordRawValue, SketchSecretKey, 11 11 SketchSecretPrefix, TakeoffKey, TakeoffValue, TrimCollectionCursorKey, WeekTruncatedCursor, 12 - WeeklyDidsKey, WeeklyRecordsKey, WeeklyRollupKey, WithCollection, 12 + WeeklyDidsKey, WeeklyRecordsKey, WeeklyRollupKey, WithCollection, WithRank, 13 13 }; 14 14 use crate::{ 15 15 CommitAction, ConsumerInfo, Did, EventBatch, Nsid, NsidCount, OrderCollectionsBy, UFOsRecord, ··· 346 346 Ok((nsid, get_counts)) 347 347 }))) 348 348 } 349 + type GetRollupKey = Arc<dyn Fn(&Nsid) -> EncodingResult<Vec<u8>>>; 350 + fn get_lookup_iter<T: WithCollection + WithRank + DbBytes + 'static>( 351 + snapshot: lsm_tree::Snapshot, 352 + start: Bound<Vec<u8>>, 353 + end: Bound<Vec<u8>>, 354 + get_rollup_key: GetRollupKey, 355 + ) -> StorageResult<NsidCounter> { 356 + Ok(Box::new(snapshot.range((start, end)).rev().map( 357 + move |kv| { 358 + let (k_bytes, _) = kv?; 359 + let key = db_complete::<T>(&k_bytes)?; 360 + let nsid = key.collection().clone(); 361 + let get_counts: GetCounts = Box::new({ 362 + let nsid = nsid.clone(); 363 + let snapshot = snapshot.clone(); 364 + let get_rollup_key = get_rollup_key.clone(); 365 + move || { 366 + let db_count_bytes = snapshot.get(get_rollup_key(&nsid)?)?.expect( 367 + "integrity: all-time rank rollup must have corresponding all-time count rollup", 368 + ); 369 + Ok(db_complete::<CountsValue>(&db_count_bytes)?) 370 + } 371 + }); 372 + Ok((nsid, get_counts)) 373 + }, 374 + ))) 375 + } 349 376 350 377 impl FjallReader { 351 378 fn get_storage_stats(&self) -> StorageResult<serde_json::Value> { ··· 406 433 Ok(cursor) 407 434 } 408 435 409 - fn get_collections( 436 + fn get_lexi_collections( 410 437 &self, 438 + snapshot: Snapshot, 411 439 limit: usize, 412 - order: OrderCollectionsBy, 413 - since: Option<HourTruncatedCursor>, 414 - until: Option<HourTruncatedCursor>, 440 + cursor: Option<Vec<u8>>, 441 + buckets: Vec<CursorBucket>, 415 442 ) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)> { 416 - let snapshot = self.rollups.snapshot(); 417 - 418 - let buckets = if let (None, None) = (since, until) { 419 - vec![CursorBucket::AllTime] 420 - } else { 421 - let mut lower = self.get_earliest_hour(Some(&snapshot))?; 422 - if let Some(specified) = since { 423 - if specified > lower { 424 - lower = specified; 425 - } 426 - } 427 - let upper = until.unwrap_or_else(|| Cursor::at(SystemTime::now()).into()); 428 - CursorBucket::buckets_spanning(lower, upper) 429 - }; 430 - 443 + let cursor_nsid = cursor.as_deref().map(db_complete::<Nsid>).transpose()?; 431 444 let mut iters: Vec<Peekable<NsidCounter>> = Vec::with_capacity(buckets.len()); 432 - 433 - match order { 434 - OrderCollectionsBy::Lexi { cursor } => { 435 - let cursor_nsid = cursor.as_deref().map(db_complete::<Nsid>).transpose()?; 436 - for bucket in &buckets { 437 - let it: NsidCounter = match bucket { 438 - CursorBucket::Hour(t) => { 439 - let start = cursor_nsid 440 - .as_ref() 441 - .map(|nsid| HourlyRollupKey::after_nsid(*t, nsid)) 442 - .unwrap_or_else(|| HourlyRollupKey::start(*t))?; 443 - let end = HourlyRollupKey::end(*t)?; 444 - get_lexi_iter::<HourlyRollupKey>(&snapshot, start, end)? 445 - } 446 - CursorBucket::Week(t) => { 447 - let start = cursor_nsid 448 - .as_ref() 449 - .map(|nsid| WeeklyRollupKey::after_nsid(*t, nsid)) 450 - .unwrap_or_else(|| WeeklyRollupKey::start(*t))?; 451 - let end = WeeklyRollupKey::end(*t)?; 452 - get_lexi_iter::<WeeklyRollupKey>(&snapshot, start, end)? 453 - } 454 - CursorBucket::AllTime => { 455 - let start = cursor_nsid 456 - .as_ref() 457 - .map(AllTimeRollupKey::after_nsid) 458 - .unwrap_or_else(AllTimeRollupKey::start)?; 459 - let end = AllTimeRollupKey::end()?; 460 - get_lexi_iter::<AllTimeRollupKey>(&snapshot, start, end)? 461 - } 462 - }; 463 - iters.push(it.peekable()); 445 + for bucket in &buckets { 446 + let it: NsidCounter = match bucket { 447 + CursorBucket::Hour(t) => { 448 + let start = cursor_nsid 449 + .as_ref() 450 + .map(|nsid| HourlyRollupKey::after_nsid(*t, nsid)) 451 + .unwrap_or_else(|| HourlyRollupKey::start(*t))?; 452 + let end = HourlyRollupKey::end(*t)?; 453 + get_lexi_iter::<HourlyRollupKey>(&snapshot, start, end)? 464 454 } 465 - } 466 - OrderCollectionsBy::RecordsCreated => todo!(), 467 - OrderCollectionsBy::DidsEstimate => todo!(), 455 + CursorBucket::Week(t) => { 456 + let start = cursor_nsid 457 + .as_ref() 458 + .map(|nsid| WeeklyRollupKey::after_nsid(*t, nsid)) 459 + .unwrap_or_else(|| WeeklyRollupKey::start(*t))?; 460 + let end = WeeklyRollupKey::end(*t)?; 461 + get_lexi_iter::<WeeklyRollupKey>(&snapshot, start, end)? 462 + } 463 + CursorBucket::AllTime => { 464 + let start = cursor_nsid 465 + .as_ref() 466 + .map(AllTimeRollupKey::after_nsid) 467 + .unwrap_or_else(AllTimeRollupKey::start)?; 468 + let end = AllTimeRollupKey::end()?; 469 + get_lexi_iter::<AllTimeRollupKey>(&snapshot, start, end)? 470 + } 471 + }; 472 + iters.push(it.peekable()); 468 473 } 469 474 470 475 let mut out = Vec::new(); ··· 508 513 Ok((out, next_cursor)) 509 514 } 510 515 511 - fn get_top_collections_by_count( 516 + fn get_ordered_collections( 512 517 &self, 518 + snapshot: Snapshot, 513 519 limit: usize, 514 - since: Option<HourTruncatedCursor>, 515 - until: Option<HourTruncatedCursor>, 520 + order: OrderCollectionsBy, 521 + buckets: Vec<CursorBucket>, 516 522 ) -> StorageResult<Vec<NsidCount>> { 517 - Ok(if since.is_none() && until.is_none() { 518 - let snapshot = self.rollups.snapshot(); 519 - let mut out = Vec::with_capacity(limit); 520 - let prefix = AllTimeRecordsKey::from_prefix_to_db_bytes(&Default::default())?; 521 - for kv in snapshot.prefix(prefix).rev().take(limit) { 522 - let (key_bytes, _) = kv?; 523 - let key = db_complete::<AllTimeRecordsKey>(&key_bytes)?; 524 - let rollup_key = AllTimeRollupKey::new(key.collection()); 525 - let db_count_bytes = snapshot.get(rollup_key.to_db_bytes()?)?.expect( 526 - "integrity: all-time rank rollup must have corresponding all-time count rollup", 527 - ); 528 - let db_counts = db_complete::<CountsValue>(&db_count_bytes)?; 529 - assert_eq!(db_counts.records(), key.count()); 530 - out.push(NsidCount { 531 - nsid: key.collection().to_string(), 532 - records: db_counts.records(), 533 - dids_estimate: db_counts.dids().estimate() as u64, 534 - }); 523 + let mut iters: Vec<NsidCounter> = Vec::with_capacity(buckets.len()); 524 + 525 + for bucket in buckets { 526 + let it: NsidCounter = match (&order, bucket) { 527 + (OrderCollectionsBy::RecordsCreated, CursorBucket::Hour(t)) => { 528 + get_lookup_iter::<HourlyRecordsKey>( 529 + snapshot.clone(), 530 + HourlyRecordsKey::start(t)?, 531 + HourlyRecordsKey::end(t)?, 532 + Arc::new({ 533 + move |collection| HourlyRollupKey::new(t, collection).to_db_bytes() 534 + }), 535 + )? 536 + } 537 + (OrderCollectionsBy::DidsEstimate, CursorBucket::Hour(t)) => { 538 + get_lookup_iter::<HourlyDidsKey>( 539 + snapshot.clone(), 540 + HourlyDidsKey::start(t)?, 541 + HourlyDidsKey::end(t)?, 542 + Arc::new({ 543 + move |collection| HourlyRollupKey::new(t, collection).to_db_bytes() 544 + }), 545 + )? 546 + } 547 + (OrderCollectionsBy::RecordsCreated, CursorBucket::Week(t)) => { 548 + get_lookup_iter::<WeeklyRecordsKey>( 549 + snapshot.clone(), 550 + WeeklyRecordsKey::start(t)?, 551 + WeeklyRecordsKey::end(t)?, 552 + Arc::new({ 553 + move |collection| WeeklyRollupKey::new(t, collection).to_db_bytes() 554 + }), 555 + )? 556 + } 557 + (OrderCollectionsBy::DidsEstimate, CursorBucket::Week(t)) => { 558 + get_lookup_iter::<WeeklyDidsKey>( 559 + snapshot.clone(), 560 + WeeklyDidsKey::start(t)?, 561 + WeeklyDidsKey::end(t)?, 562 + Arc::new({ 563 + move |collection| WeeklyRollupKey::new(t, collection).to_db_bytes() 564 + }), 565 + )? 566 + } 567 + (OrderCollectionsBy::RecordsCreated, CursorBucket::AllTime) => { 568 + get_lookup_iter::<AllTimeRecordsKey>( 569 + snapshot.clone(), 570 + AllTimeRecordsKey::start()?, 571 + AllTimeRecordsKey::end()?, 572 + Arc::new(|collection| AllTimeRollupKey::new(collection).to_db_bytes()), 573 + )? 574 + } 575 + (OrderCollectionsBy::DidsEstimate, CursorBucket::AllTime) => { 576 + get_lookup_iter::<AllTimeDidsKey>( 577 + snapshot.clone(), 578 + AllTimeDidsKey::start()?, 579 + AllTimeDidsKey::end()?, 580 + Arc::new(|collection| AllTimeRollupKey::new(collection).to_db_bytes()), 581 + )? 582 + } 583 + (OrderCollectionsBy::Lexi { .. }, _) => unreachable!(), 584 + }; 585 + iters.push(it); 586 + } 587 + 588 + // overfetch by taking a bit more than the limit 589 + // merge by collection 590 + // sort by requested order, take limit, discard all remaining 591 + // 592 + // this isn't guaranteed to be correct, but it will hopefully be close most of the time: 593 + // - it's possible that some NSIDs might score low during some time-buckets, and miss being merged 594 + // - overfetching hopefully helps a bit by catching nsids near the threshold more often, but. yeah. 595 + // 596 + // this thing is heavy, there's probably a better way 597 + let mut ranked: HashMap<Nsid, CountsValue> = HashMap::with_capacity(limit * 2); 598 + for iter in iters { 599 + for pair in iter.take((limit as f64 * 1.3).ceil() as usize) { 600 + let (nsid, get_counts) = pair?; 601 + let counts = get_counts()?; 602 + ranked.entry(nsid).or_default().merge(&counts); 535 603 } 536 - out 537 - } else { 538 - todo!() 539 - }) 604 + } 605 + let mut ranked: Vec<(Nsid, CountsValue)> = ranked.into_iter().collect(); 606 + match order { 607 + OrderCollectionsBy::RecordsCreated => ranked.sort_by_key(|(_, c)| c.records()), 608 + OrderCollectionsBy::DidsEstimate => ranked.sort_by_key(|(_, c)| c.dids().estimate()), 609 + OrderCollectionsBy::Lexi { .. } => unreachable!(), 610 + } 611 + let counts = ranked 612 + .into_iter() 613 + .rev() 614 + .take(limit) 615 + .map(|(nsid, cv)| NsidCount { 616 + nsid: nsid.to_string(), 617 + records: cv.records(), 618 + dids_estimate: cv.dids().estimate() as u64, 619 + }) 620 + .collect(); 621 + Ok(counts) 540 622 } 541 623 542 - fn get_top_collections_by_dids( 624 + fn get_collections( 543 625 &self, 544 626 limit: usize, 627 + order: OrderCollectionsBy, 545 628 since: Option<HourTruncatedCursor>, 546 629 until: Option<HourTruncatedCursor>, 547 - ) -> StorageResult<Vec<NsidCount>> { 548 - Ok(if since.is_none() && until.is_none() { 549 - let snapshot = self.rollups.snapshot(); 550 - let mut out = Vec::with_capacity(limit); 551 - let prefix = AllTimeDidsKey::from_prefix_to_db_bytes(&Default::default())?; 552 - for kv in snapshot.prefix(prefix).rev().take(limit) { 553 - let (key_bytes, _) = kv?; 554 - let key = db_complete::<AllTimeDidsKey>(&key_bytes)?; 555 - let rollup_key = AllTimeRollupKey::new(key.collection()); 556 - let db_count_bytes = snapshot.get(rollup_key.to_db_bytes()?)?.expect( 557 - "integrity: all-time rank rollup must have corresponding all-time count rollup", 558 - ); 559 - let db_counts = db_complete::<CountsValue>(&db_count_bytes)?; 560 - assert_eq!(db_counts.dids().estimate() as u64, key.count()); 561 - out.push(NsidCount { 562 - nsid: key.collection().to_string(), 563 - records: db_counts.records(), 564 - dids_estimate: db_counts.dids().estimate() as u64, 565 - }); 630 + ) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)> { 631 + let snapshot = self.rollups.snapshot(); 632 + let buckets = if let (None, None) = (since, until) { 633 + vec![CursorBucket::AllTime] 634 + } else { 635 + let mut lower = self.get_earliest_hour(Some(&snapshot))?; 636 + if let Some(specified) = since { 637 + if specified > lower { 638 + lower = specified; 639 + } 640 + } 641 + let upper = until.unwrap_or_else(|| Cursor::at(SystemTime::now()).into()); 642 + CursorBucket::buckets_spanning(lower, upper) 643 + }; 644 + match order { 645 + OrderCollectionsBy::Lexi { cursor } => { 646 + self.get_lexi_collections(snapshot, limit, cursor, buckets) 566 647 } 567 - out 568 - } else { 569 - todo!() 570 - }) 648 + _ => Ok(( 649 + self.get_ordered_collections(snapshot, limit, order, buckets)?, 650 + None, 651 + )), 652 + } 571 653 } 572 654 573 655 fn get_counts_by_collection(&self, collection: &Nsid) -> StorageResult<(u64, u64)> { ··· 679 761 let s = self.clone(); 680 762 tokio::task::spawn_blocking(move || { 681 763 FjallReader::get_collections(&s, limit, order, since, until) 682 - }) 683 - .await? 684 - } 685 - async fn get_top_collections_by_count( 686 - &self, 687 - limit: usize, 688 - since: Option<HourTruncatedCursor>, 689 - until: Option<HourTruncatedCursor>, 690 - ) -> StorageResult<Vec<NsidCount>> { 691 - let s = self.clone(); 692 - tokio::task::spawn_blocking(move || { 693 - FjallReader::get_top_collections_by_count(&s, limit, since, until) 694 - }) 695 - .await? 696 - } 697 - async fn get_top_collections_by_dids( 698 - &self, 699 - limit: usize, 700 - since: Option<HourTruncatedCursor>, 701 - until: Option<HourTruncatedCursor>, 702 - ) -> StorageResult<Vec<NsidCount>> { 703 - let s = self.clone(); 704 - tokio::task::spawn_blocking(move || { 705 - FjallReader::get_top_collections_by_dids(&s, limit, since, until) 706 764 }) 707 765 .await? 708 766 }
-16
ufos/src/storage_mem.rs
··· 556 556 ) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)> { 557 557 todo!() 558 558 } 559 - async fn get_top_collections_by_count( 560 - &self, 561 - _: usize, 562 - _: Option<HourTruncatedCursor>, 563 - _: Option<HourTruncatedCursor>, 564 - ) -> StorageResult<Vec<NsidCount>> { 565 - todo!() 566 - } 567 - async fn get_top_collections_by_dids( 568 - &self, 569 - _: usize, 570 - _: Option<HourTruncatedCursor>, 571 - _: Option<HourTruncatedCursor>, 572 - ) -> StorageResult<Vec<NsidCount>> { 573 - todo!() 574 - } 575 559 async fn get_counts_by_collection(&self, collection: &Nsid) -> StorageResult<(u64, u64)> { 576 560 let s = self.clone(); 577 561 let collection = collection.clone();
+37
ufos/src/store_types.rs
··· 69 69 fn collection(&self) -> &Nsid; 70 70 } 71 71 72 + pub trait WithRank { 73 + fn rank(&self) -> u64; 74 + } 75 + 72 76 pub type NsidRecordFeedKey = DbConcat<Nsid, Cursor>; 73 77 impl NsidRecordFeedKey { 74 78 pub fn collection(&self) -> &Nsid { ··· 313 317 pub fn with_rank(&self, new_rank: KeyRank) -> Self { 314 318 Self::new(self.prefix.suffix.clone(), new_rank, &self.suffix.suffix) 315 319 } 320 + pub fn start(cursor: C) -> EncodingResult<Bound<Vec<u8>>> { 321 + let prefix: DbConcat<DbStaticStr<P>, C> = DbConcat::from_pair(Default::default(), cursor); 322 + Ok(Bound::Included(Self::from_prefix_to_db_bytes(&prefix)?)) 323 + } 324 + pub fn end(cursor: C) -> EncodingResult<Bound<Vec<u8>>> { 325 + let prefix: DbConcat<DbStaticStr<P>, C> = DbConcat::from_pair(Default::default(), cursor); 326 + Ok(Bound::Excluded(Self::prefix_range_end(&prefix)?)) 327 + } 328 + } 329 + impl<P: StaticStr, C: DbBytes> WithCollection for BucketedRankRecordsKey<P, C> { 330 + fn collection(&self) -> &Nsid { 331 + &self.suffix.suffix 332 + } 333 + } 334 + impl<P: StaticStr, C: DbBytes> WithRank for BucketedRankRecordsKey<P, C> { 335 + fn rank(&self) -> u64 { 336 + self.suffix.prefix.into() 337 + } 316 338 } 317 339 318 340 static_str!("hourly_counts", _HourlyRollupStaticStr); ··· 437 459 pub fn count(&self) -> u64 { 438 460 self.suffix.prefix.0 439 461 } 462 + pub fn start() -> EncodingResult<Bound<Vec<u8>>> { 463 + Ok(Bound::Included(Self::from_prefix_to_db_bytes( 464 + &Default::default(), 465 + )?)) 466 + } 467 + pub fn end() -> EncodingResult<Bound<Vec<u8>>> { 468 + Ok(Bound::Excluded( 469 + Self::prefix_range_end(&Default::default())?, 470 + )) 471 + } 440 472 } 441 473 impl<P: StaticStr> WithCollection for AllTimeRankRecordsKey<P> { 442 474 fn collection(&self) -> &Nsid { 443 475 &self.suffix.suffix 476 + } 477 + } 478 + impl<P: StaticStr> WithRank for AllTimeRankRecordsKey<P> { 479 + fn rank(&self) -> u64 { 480 + self.suffix.prefix.into() 444 481 } 445 482 } 446 483