···11-use allegedly::{
22- Db, Dt, ExportPage, FolderSource, HttpSource, ListenConf, PageBoundaryState, backfill,
33- backfill_to_pg, bin_init, pages_to_pg, pages_to_weeks, poll_upstream, serve,
44-};
11+use allegedly::{Dt, bin::GlobalArgs, bin_init, pages_to_stdout, pages_to_weeks, poll_upstream};
52use clap::{CommandFactory, Parser, Subcommand};
66-use reqwest::Url;
77-use std::{net::SocketAddr, path::PathBuf, time::Instant};
88-use tokio::sync::{mpsc, oneshot};
33+use std::{path::PathBuf, time::Instant};
44+use tokio::sync::mpsc;
55+66+mod backfill;
77+mod mirror;
98109#[derive(Debug, Parser)]
1110struct Cli {
1212- /// Upstream PLC server
1313- #[arg(short, long, global = true, env = "ALLEGEDLY_UPSTREAM")]
1414- #[clap(default_value = "https://plc.directory")]
1515- upstream: Url,
1111+ #[command(flatten)]
1212+ globals: GlobalArgs,
1313+1614 #[command(subcommand)]
1715 command: Commands,
1816}
···2119enum Commands {
2220 /// Use weekly bundled ops to get a complete directory mirror FAST
2321 Backfill {
2424- /// Remote URL prefix to fetch bundles from
2525- #[arg(long)]
2626- #[clap(default_value = "https://plc.t3.storage.dev/plc.directory/")]
2727- http: Url,
2828- /// Local folder to fetch bundles from (overrides `http`)
2929- #[arg(long)]
3030- dir: Option<PathBuf>,
3131- /// Parallel bundle fetchers
3232- ///
3333- /// Default: 4 for http fetches, 1 for local folder
3434- #[arg(long)]
3535- source_workers: Option<usize>,
3636- /// Bulk load into did-method-plc-compatible postgres instead of stdout
3737- ///
3838- /// Pass a postgres connection url like "postgresql://localhost:5432"
3939- #[arg(long, env = "ALLEGEDLY_TO_POSTGRES")]
4040- to_postgres: Option<Url>,
4141- /// Cert for postgres (if needed)
4242- #[arg(long)]
4343- postgres_cert: Option<PathBuf>,
4444- /// Delete all operations from the postgres db before starting
4545- ///
4646- /// only used if `--to-postgres` is present
4747- #[arg(long, action)]
4848- postgres_reset: bool,
4949- /// Stop at the week ending before this date
5050- #[arg(long)]
5151- until: Option<Dt>,
5252- /// After the weekly imports, poll upstream until we're caught up
5353- #[arg(long, action)]
5454- catch_up: bool,
2222+ #[command(flatten)]
2323+ args: backfill::Args,
5524 },
5625 /// Scrape a PLC server, collecting ops into weekly bundles
5726 ///
···7645 },
7746 /// Wrap a did-method-plc server, syncing upstream and blocking op submits
7847 Mirror {
7979- /// the wrapped did-method-plc server
8080- #[arg(long, env = "ALLEGEDLY_WRAP")]
8181- wrap: Url,
8282- /// the wrapped did-method-plc server's database (write access required)
8383- #[arg(long, env = "ALLEGEDLY_WRAP_PG")]
8484- wrap_pg: Url,
8585- /// path to tls cert for the wrapped postgres db, if needed
8686- #[arg(long, env = "ALLEGEDLY_WRAP_PG_CERT")]
8787- wrap_pg_cert: Option<PathBuf>,
8888- /// wrapping server listen address
8989- #[arg(short, long, env = "ALLEGEDLY_BIND")]
9090- #[clap(default_value = "127.0.0.1:8000")]
9191- bind: SocketAddr,
9292- /// obtain a certificate from letsencrypt
9393- ///
9494- /// for now this will force listening on all interfaces at :80 and :443
9595- /// (:80 will serve an "https required" error, *will not* redirect)
9696- #[arg(
9797- long,
9898- conflicts_with("bind"),
9999- requires("acme_cache_path"),
100100- env = "ALLEGEDLY_ACME_DOMAIN"
101101- )]
102102- acme_domain: Vec<String>,
103103- /// which local directory to keep the letsencrypt certs in
104104- #[arg(long, requires("acme_domain"), env = "ALLEGEDLY_ACME_CACHE_PATH")]
105105- acme_cache_path: Option<PathBuf>,
106106- /// which public acme directory to use
107107- ///
108108- /// eg. letsencrypt staging: "https://acme-staging-v02.api.letsencrypt.org/directory"
109109- #[arg(long, requires("acme_domain"), env = "ALLEGEDLY_ACME_DIRECTORY_URL")]
110110- #[clap(default_value = "https://acme-v02.api.letsencrypt.org/directory")]
111111- acme_directory_url: Url,
4848+ #[command(flatten)]
4949+ args: mirror::Args,
11250 },
11351 /// Poll an upstream PLC server and log new ops to stdout
11452 Tail {
···11856 },
11957}
12058121121-async fn pages_to_stdout(
122122- mut rx: mpsc::Receiver<ExportPage>,
123123- notify_last_at: Option<oneshot::Sender<Option<Dt>>>,
124124-) -> anyhow::Result<()> {
125125- let mut last_at = None;
126126- while let Some(page) = rx.recv().await {
127127- for op in &page.ops {
128128- println!("{}", serde_json::to_string(op)?);
129129- }
130130- if notify_last_at.is_some()
131131- && let Some(s) = PageBoundaryState::new(&page)
132132- {
133133- last_at = last_at.filter(|&l| l >= s.last_at).or(Some(s.last_at));
134134- }
135135- }
136136- if let Some(notify) = notify_last_at {
137137- log::trace!("notifying last_at: {last_at:?}");
138138- if notify.send(last_at).is_err() {
139139- log::error!("receiver for last_at dropped, can't notify");
140140- };
141141- }
142142- Ok(())
143143-}
144144-145145-/// page forwarder who drops its channels on receipt of a small page
146146-///
147147-/// PLC will return up to 1000 ops on a page, and returns full pages until it
148148-/// has caught up, so this is a (hacky?) way to stop polling once we're up.
149149-fn full_pages(mut rx: mpsc::Receiver<ExportPage>) -> mpsc::Receiver<ExportPage> {
150150- let (tx, fwd) = mpsc::channel(1);
151151- tokio::task::spawn(async move {
152152- while let Some(page) = rx.recv().await
153153- && page.ops.len() > 900
154154- {
155155- tx.send(page).await.expect("to be able to forward a page");
156156- }
157157- });
158158- fwd
159159-}
160160-16159#[tokio::main]
162162-async fn main() {
6060+async fn main() -> anyhow::Result<()> {
16361 let args = Cli::parse();
16462 let matches = Cli::command().get_matches();
16563 let name = matches.subcommand().map(|(name, _)| name).unwrap_or("???");
16664 bin_init(name);
167656666+ let globals = args.globals.clone();
6767+16868 let t0 = Instant::now();
16969 match args.command {
170170- Commands::Backfill {
171171- http,
172172- dir,
173173- source_workers,
174174- to_postgres,
175175- postgres_cert,
176176- postgres_reset,
177177- until,
178178- catch_up,
179179- } => {
180180- let (tx, rx) = mpsc::channel(32); // these are big pages
181181- tokio::task::spawn(async move {
182182- if let Some(dir) = dir {
183183- log::info!("Reading weekly bundles from local folder {dir:?}");
184184- backfill(FolderSource(dir), tx, source_workers.unwrap_or(1), until)
185185- .await
186186- .inspect_err(|e| log::error!("backfill from folder problem: {e}"))
187187- .expect("to source bundles from a folder");
188188- } else {
189189- log::info!("Fetching weekly bundles from from {http}");
190190- backfill(HttpSource(http), tx, source_workers.unwrap_or(4), until)
191191- .await
192192- .expect("to source bundles from http");
193193- }
194194- });
195195-196196- // postgres writer will notify us as soon as the very last op's time is known
197197- // so we can start catching up while pg is restoring indexes and stuff
198198- let (notify_last_at, rx_last) = if catch_up {
199199- let (tx, rx) = oneshot::channel();
200200- (Some(tx), Some(rx))
201201- } else {
202202- (None, None)
203203- };
204204-205205- let to_postgres_url_bulk = to_postgres.clone();
206206- let pg_cert = postgres_cert.clone();
207207- let bulk_out_write = tokio::task::spawn(async move {
208208- if let Some(ref url) = to_postgres_url_bulk {
209209- let db = Db::new(url.as_str(), pg_cert)
210210- .await
211211- .expect("to get db for bulk out write");
212212- backfill_to_pg(db, postgres_reset, rx, notify_last_at)
213213- .await
214214- .expect("to backfill to pg");
215215- } else {
216216- pages_to_stdout(rx, notify_last_at)
217217- .await
218218- .expect("to backfill to stdout");
219219- }
220220- });
221221-222222- if let Some(rx_last) = rx_last {
223223- let mut upstream = args.upstream;
224224- upstream.set_path("/export");
225225- // wait until the time for `after` is known
226226- let last_at = rx_last.await.expect("to get the last log's createdAt");
227227- log::info!("beginning catch-up from {last_at:?} while the writer finalizes stuff");
228228- let (tx, rx) = mpsc::channel(256); // these are small pages
229229- tokio::task::spawn(async move {
230230- poll_upstream(last_at, upstream, tx)
231231- .await
232232- .expect("polling upstream to work")
233233- });
234234- bulk_out_write.await.expect("to wait for bulk_out_write");
235235- log::info!("writing catch-up pages");
236236- let full_pages = full_pages(rx);
237237- if let Some(url) = to_postgres {
238238- let db = Db::new(url.as_str(), postgres_cert)
239239- .await
240240- .expect("to connect pg for catchup");
241241- pages_to_pg(db, full_pages)
242242- .await
243243- .expect("to write catch-up pages to pg");
244244- } else {
245245- pages_to_stdout(full_pages, None)
246246- .await
247247- .expect("to write catch-up pages to stdout");
248248- }
249249- }
250250- }
7070+ Commands::Backfill { args } => backfill::run(globals, args).await?,
25171 Commands::Bundle {
25272 dest,
25373 after,
25474 clobber,
25575 } => {
256256- let mut url = args.upstream;
7676+ let mut url = globals.upstream;
25777 url.set_path("/export");
25878 let (tx, rx) = mpsc::channel(32); // read ahead if gzip stalls for some reason
25979 tokio::task::spawn(async move {
···26787 .await
26888 .expect("to write bundles to output files");
26989 }
270270- Commands::Mirror {
271271- wrap,
272272- wrap_pg,
273273- wrap_pg_cert,
274274- bind,
275275- acme_domain,
276276- acme_cache_path,
277277- acme_directory_url,
278278- } => {
279279- let db = Db::new(wrap_pg.as_str(), wrap_pg_cert)
280280- .await
281281- .expect("to connect to pg for mirroring");
282282- let latest = db
283283- .get_latest()
284284- .await
285285- .expect("to query for last createdAt")
286286- .expect("there to be at least one op in the db. did you backfill?");
287287-288288- let (tx, rx) = mpsc::channel(2);
289289- // upstream poller
290290- let mut url = args.upstream.clone();
291291- tokio::task::spawn(async move {
292292- log::info!("starting poll reader...");
293293- url.set_path("/export");
294294- tokio::task::spawn(async move {
295295- poll_upstream(Some(latest), url, tx)
296296- .await
297297- .expect("to poll upstream for mirror sync")
298298- });
299299- });
300300- // db writer
301301- let poll_db = db.clone();
302302- tokio::task::spawn(async move {
303303- log::info!("starting db writer...");
304304- pages_to_pg(poll_db, rx)
305305- .await
306306- .expect("to write to pg for mirror");
307307- });
308308-309309- let listen_conf = match (bind, acme_domain.is_empty(), acme_cache_path) {
310310- (_, false, Some(cache_path)) => ListenConf::Acme {
311311- domains: acme_domain,
312312- cache_path,
313313- directory_url: acme_directory_url.to_string(),
314314- },
315315- (bind, true, None) => ListenConf::Bind(bind),
316316- (_, _, _) => unreachable!(),
317317- };
318318-319319- serve(&args.upstream, wrap, listen_conf)
320320- .await
321321- .expect("to be able to serve the mirror proxy app");
322322- }
9090+ Commands::Mirror { args } => mirror::run(globals, args).await?,
32391 Commands::Tail { after } => {
324324- let mut url = args.upstream;
9292+ let mut url = globals.upstream;
32593 url.set_path("/export");
32694 let start_at = after.or_else(|| Some(chrono::Utc::now()));
32795 let (tx, rx) = mpsc::channel(1);
···336104 }
337105 }
338106 log::info!("whew, {:?}. goodbye!", t0.elapsed());
107107+ Ok(())
339108}
+146
src/bin/backfill.rs
···11+use allegedly::{
22+ Db, Dt, FolderSource, HttpSource, backfill, backfill_to_pg, bin::GlobalArgs, bin_init,
33+ full_pages, pages_to_pg, pages_to_stdout, poll_upstream,
44+};
55+use clap::Parser;
66+use reqwest::Url;
77+use std::path::PathBuf;
88+use tokio::sync::{mpsc, oneshot};
99+1010+#[derive(Debug, clap::Args)]
1111+pub struct Args {
1212+ /// Remote URL prefix to fetch bundles from
1313+ #[arg(long)]
1414+ #[clap(default_value = "https://plc.t3.storage.dev/plc.directory/")]
1515+ http: Url,
1616+ /// Local folder to fetch bundles from (overrides `http`)
1717+ #[arg(long)]
1818+ dir: Option<PathBuf>,
1919+ /// Parallel bundle fetchers
2020+ ///
2121+ /// Default: 4 for http fetches, 1 for local folder
2222+ #[arg(long)]
2323+ source_workers: Option<usize>,
2424+ /// Bulk load into did-method-plc-compatible postgres instead of stdout
2525+ ///
2626+ /// Pass a postgres connection url like "postgresql://localhost:5432"
2727+ #[arg(long, env = "ALLEGEDLY_TO_POSTGRES")]
2828+ to_postgres: Option<Url>,
2929+ /// Cert for postgres (if needed)
3030+ #[arg(long)]
3131+ postgres_cert: Option<PathBuf>,
3232+ /// Delete all operations from the postgres db before starting
3333+ ///
3434+ /// only used if `--to-postgres` is present
3535+ #[arg(long, action)]
3636+ postgres_reset: bool,
3737+ /// Stop at the week ending before this date
3838+ #[arg(long)]
3939+ until: Option<Dt>,
4040+ /// After the weekly imports, poll upstream until we're caught up
4141+ #[arg(long, action)]
4242+ catch_up: bool,
4343+}
4444+4545+pub async fn run(
4646+ GlobalArgs { upstream }: GlobalArgs,
4747+ Args {
4848+ http,
4949+ dir,
5050+ source_workers,
5151+ to_postgres,
5252+ postgres_cert,
5353+ postgres_reset,
5454+ until,
5555+ catch_up,
5656+ }: Args,
5757+) -> anyhow::Result<()> {
5858+ let (tx, rx) = mpsc::channel(32); // these are big pages
5959+ tokio::task::spawn(async move {
6060+ if let Some(dir) = dir {
6161+ log::info!("Reading weekly bundles from local folder {dir:?}");
6262+ backfill(FolderSource(dir), tx, source_workers.unwrap_or(1), until)
6363+ .await
6464+ .inspect_err(|e| log::error!("backfill from folder problem: {e}"))
6565+ .expect("to source bundles from a folder");
6666+ } else {
6767+ log::info!("Fetching weekly bundles from from {http}");
6868+ backfill(HttpSource(http), tx, source_workers.unwrap_or(4), until)
6969+ .await
7070+ .expect("to source bundles from http");
7171+ }
7272+ });
7373+7474+ // postgres writer will notify us as soon as the very last op's time is known
7575+ // so we can start catching up while pg is restoring indexes and stuff
7676+ let (notify_last_at, rx_last) = if catch_up {
7777+ let (tx, rx) = oneshot::channel();
7878+ (Some(tx), Some(rx))
7979+ } else {
8080+ (None, None)
8181+ };
8282+8383+ let to_postgres_url_bulk = to_postgres.clone();
8484+ let pg_cert = postgres_cert.clone();
8585+ let bulk_out_write = tokio::task::spawn(async move {
8686+ if let Some(ref url) = to_postgres_url_bulk {
8787+ let db = Db::new(url.as_str(), pg_cert)
8888+ .await
8989+ .expect("to get db for bulk out write");
9090+ backfill_to_pg(db, postgres_reset, rx, notify_last_at)
9191+ .await
9292+ .expect("to backfill to pg");
9393+ } else {
9494+ pages_to_stdout(rx, notify_last_at)
9595+ .await
9696+ .expect("to backfill to stdout");
9797+ }
9898+ });
9999+100100+ if let Some(rx_last) = rx_last {
101101+ let mut upstream = upstream;
102102+ upstream.set_path("/export");
103103+ // wait until the time for `after` is known
104104+ let last_at = rx_last.await.expect("to get the last log's createdAt");
105105+ log::info!("beginning catch-up from {last_at:?} while the writer finalizes stuff");
106106+ let (tx, rx) = mpsc::channel(256); // these are small pages
107107+ tokio::task::spawn(async move {
108108+ poll_upstream(last_at, upstream, tx)
109109+ .await
110110+ .expect("polling upstream to work")
111111+ });
112112+ bulk_out_write.await.expect("to wait for bulk_out_write");
113113+ log::info!("writing catch-up pages");
114114+ let full_pages = full_pages(rx);
115115+ if let Some(url) = to_postgres {
116116+ let db = Db::new(url.as_str(), postgres_cert)
117117+ .await
118118+ .expect("to connect pg for catchup");
119119+ pages_to_pg(db, full_pages)
120120+ .await
121121+ .expect("to write catch-up pages to pg");
122122+ } else {
123123+ pages_to_stdout(full_pages, None)
124124+ .await
125125+ .expect("to write catch-up pages to stdout");
126126+ }
127127+ }
128128+ Ok(())
129129+}
130130+131131+#[derive(Debug, Parser)]
132132+struct CliArgs {
133133+ #[command(flatten)]
134134+ globals: GlobalArgs,
135135+ #[command(flatten)]
136136+ args: Args,
137137+}
138138+139139+#[allow(dead_code)]
140140+#[tokio::main]
141141+async fn main() -> anyhow::Result<()> {
142142+ let args = CliArgs::parse();
143143+ bin_init("backfill");
144144+ run(args.globals, args.args).await?;
145145+ Ok(())
146146+}
+117
src/bin/mirror.rs
···11+use allegedly::{Db, ListenConf, bin::GlobalArgs, bin_init, pages_to_pg, poll_upstream, serve};
22+use clap::Parser;
33+use reqwest::Url;
44+use std::{net::SocketAddr, path::PathBuf};
55+use tokio::sync::mpsc;
66+77+#[derive(Debug, clap::Args)]
88+pub struct Args {
99+ /// the wrapped did-method-plc server
1010+ #[arg(long, env = "ALLEGEDLY_WRAP")]
1111+ wrap: Url,
1212+ /// the wrapped did-method-plc server's database (write access required)
1313+ #[arg(long, env = "ALLEGEDLY_WRAP_PG")]
1414+ wrap_pg: Url,
1515+ /// path to tls cert for the wrapped postgres db, if needed
1616+ #[arg(long, env = "ALLEGEDLY_WRAP_PG_CERT")]
1717+ wrap_pg_cert: Option<PathBuf>,
1818+ /// wrapping server listen address
1919+ #[arg(short, long, env = "ALLEGEDLY_BIND")]
2020+ #[clap(default_value = "127.0.0.1:8000")]
2121+ bind: SocketAddr,
2222+ /// obtain a certificate from letsencrypt
2323+ ///
2424+ /// for now this will force listening on all interfaces at :80 and :443
2525+ /// (:80 will serve an "https required" error, *will not* redirect)
2626+ #[arg(
2727+ long,
2828+ conflicts_with("bind"),
2929+ requires("acme_cache_path"),
3030+ env = "ALLEGEDLY_ACME_DOMAIN"
3131+ )]
3232+ acme_domain: Vec<String>,
3333+ /// which local directory to keep the letsencrypt certs in
3434+ #[arg(long, requires("acme_domain"), env = "ALLEGEDLY_ACME_CACHE_PATH")]
3535+ acme_cache_path: Option<PathBuf>,
3636+ /// which public acme directory to use
3737+ ///
3838+ /// eg. letsencrypt staging: "https://acme-staging-v02.api.letsencrypt.org/directory"
3939+ #[arg(long, requires("acme_domain"), env = "ALLEGEDLY_ACME_DIRECTORY_URL")]
4040+ #[clap(default_value = "https://acme-v02.api.letsencrypt.org/directory")]
4141+ acme_directory_url: Url,
4242+}
4343+4444+pub async fn run(
4545+ GlobalArgs { upstream }: GlobalArgs,
4646+ Args {
4747+ wrap,
4848+ wrap_pg,
4949+ wrap_pg_cert,
5050+ bind,
5151+ acme_domain,
5252+ acme_cache_path,
5353+ acme_directory_url,
5454+ }: Args,
5555+) -> anyhow::Result<()> {
5656+ let db = Db::new(wrap_pg.as_str(), wrap_pg_cert)
5757+ .await
5858+ .expect("to connect to pg for mirroring");
5959+ let latest = db
6060+ .get_latest()
6161+ .await
6262+ .expect("to query for last createdAt")
6363+ .expect("there to be at least one op in the db. did you backfill?");
6464+6565+ let (tx, rx) = mpsc::channel(2);
6666+ // upstream poller
6767+ let mut url = upstream.clone();
6868+ tokio::task::spawn(async move {
6969+ log::info!("starting poll reader...");
7070+ url.set_path("/export");
7171+ tokio::task::spawn(async move {
7272+ poll_upstream(Some(latest), url, tx)
7373+ .await
7474+ .expect("to poll upstream for mirror sync")
7575+ });
7676+ });
7777+ // db writer
7878+ let poll_db = db.clone();
7979+ tokio::task::spawn(async move {
8080+ log::info!("starting db writer...");
8181+ pages_to_pg(poll_db, rx)
8282+ .await
8383+ .expect("to write to pg for mirror");
8484+ });
8585+8686+ let listen_conf = match (bind, acme_domain.is_empty(), acme_cache_path) {
8787+ (_, false, Some(cache_path)) => ListenConf::Acme {
8888+ domains: acme_domain,
8989+ cache_path,
9090+ directory_url: acme_directory_url.to_string(),
9191+ },
9292+ (bind, true, None) => ListenConf::Bind(bind),
9393+ (_, _, _) => unreachable!(),
9494+ };
9595+9696+ serve(&upstream, wrap, listen_conf)
9797+ .await
9898+ .expect("to be able to serve the mirror proxy app");
9999+ Ok(())
100100+}
101101+102102+#[derive(Debug, Parser)]
103103+struct CliArgs {
104104+ #[command(flatten)]
105105+ globals: GlobalArgs,
106106+ #[command(flatten)]
107107+ args: Args,
108108+}
109109+110110+#[allow(dead_code)]
111111+#[tokio::main]
112112+async fn main() -> anyhow::Result<()> {
113113+ let args = CliArgs::parse();
114114+ bin_init("mirror");
115115+ run(args.globals, args.args).await?;
116116+ Ok(())
117117+}
+14
src/bin/mod.rs
···11+use reqwest::Url;
22+33+#[derive(Debug, Clone, clap::Args)]
44+pub struct GlobalArgs {
55+ /// Upstream PLC server
66+ #[arg(short, long, global = true, env = "ALLEGEDLY_UPSTREAM")]
77+ #[clap(default_value = "https://plc.directory")]
88+ pub upstream: Url,
99+}
1010+1111+#[allow(dead_code)]
1212+fn main() {
1313+ panic!("this is not actually a module")
1414+}
+43
src/lib.rs
···11use serde::{Deserialize, Serialize};
22+use tokio::sync::{mpsc, oneshot};
2334mod backfill;
45mod client;
···78mod poll;
89mod ratelimit;
910mod weekly;
1111+1212+pub mod bin;
10131114pub use backfill::backfill;
1215pub use client::{CLIENT, UA};
···7174 cid: cid.to_string(),
7275 }
7376 }
7777+}
7878+7979+/// page forwarder who drops its channels on receipt of a small page
8080+///
8181+/// PLC will return up to 1000 ops on a page, and returns full pages until it
8282+/// has caught up, so this is a (hacky?) way to stop polling once we're up.
8383+pub fn full_pages(mut rx: mpsc::Receiver<ExportPage>) -> mpsc::Receiver<ExportPage> {
8484+ let (tx, fwd) = mpsc::channel(1);
8585+ tokio::task::spawn(async move {
8686+ while let Some(page) = rx.recv().await
8787+ && page.ops.len() > 900
8888+ {
8989+ tx.send(page).await.expect("to be able to forward a page");
9090+ }
9191+ });
9292+ fwd
9393+}
9494+9595+pub async fn pages_to_stdout(
9696+ mut rx: mpsc::Receiver<ExportPage>,
9797+ notify_last_at: Option<oneshot::Sender<Option<Dt>>>,
9898+) -> anyhow::Result<()> {
9999+ let mut last_at = None;
100100+ while let Some(page) = rx.recv().await {
101101+ for op in &page.ops {
102102+ println!("{}", serde_json::to_string(op)?);
103103+ }
104104+ if notify_last_at.is_some()
105105+ && let Some(s) = PageBoundaryState::new(&page)
106106+ {
107107+ last_at = last_at.filter(|&l| l >= s.last_at).or(Some(s.last_at));
108108+ }
109109+ }
110110+ if let Some(notify) = notify_last_at {
111111+ log::trace!("notifying last_at: {last_at:?}");
112112+ if notify.send(last_at).is_err() {
113113+ log::error!("receiver for last_at dropped, can't notify");
114114+ };
115115+ }
116116+ Ok(())
74117}
7511876119pub fn logo(name: &str) -> String {