···11use anyhow::Context as _;22-use auth::jwt::{Algorithm, Curve, Header};32use data_encoding::{BASE32HEX_NOPAD, BASE64URL_NOPAD};33+use gordian_auth::jwt::{Algorithm, Curve, Header};44use owo_colors::{OwoColorize, Stream::Stderr};55use ssh_agent_client_rs::{Client, Identity};66use ssh_key::public::{EcdsaPublicKey, KeyData};···9999 account_did.if_supports_color(Stderr, |text| text.green())100100 );101101102102- assert!(self.method.contains(&"ssh".to_string()), "unsupported method");102102+ assert!(103103+ self.method.contains(&"ssh".to_string()),104104+ "unsupported method"105105+ );103106104107 // Build a list of the public keys associated with the current active DID.105108 let empty = HashSet::new();···146143 let jti: [u8; 16] = rand::random();147144 let jti: Box<str> = BASE32HEX_NOPAD.encode(&jti).to_ascii_lowercase().into();148145149149- let claims = auth::jwt::Claims {146146+ let claims = gordian_auth::jwt::Claims {150147 iss,151148 aud,152149 iat,
···11+use std::collections::HashSet;22+33+use gordian_types::OwnedDid;44+use serde::{Deserialize, Serialize};55+66+use crate::{Did, Nsid};77+88+pub const MAX_WANTED_COLLECTIONS: usize = 100;99+1010+pub const MAX_WANTED_DIDS: usize = 10_000;1111+1212+// @TODO Review1313+pub const MAX_URL_LENGTH: usize = 4000;1414+1515+/// Jetstream subscription options.1616+///1717+/// Can either be appended to the `/subscribe` URL on connection to the Jetstream instance1818+/// or sent as an options update message after connection.1919+///2020+/// Ref: <https://github.com/bluesky-social/jetstream?tab=readme-ov-file#options-updates>2121+///2222+#[derive(Clone, Debug, Default, Deserialize, Serialize)]2323+#[serde(rename_all = "camelCase")]2424+pub struct SubscriberOptions {2525+ /// Collection NSIDs to filter which records are received.2626+ ///2727+ /// Maximum: 1002828+ pub wanted_collections: HashSet<Box<Nsid>>,2929+3030+ /// Repository DIDs to filter which records are received.3131+ ///3232+ /// Maximum: `10_000`3333+ pub wanted_dids: HashSet<OwnedDid>,3434+3535+ /// Maximum message size in bytes the subscriber wants to receive.3636+ ///3737+ /// Zero means no limit, negative values are treated as zero by Jetstream, and3838+ /// will be normalized to zero when serialized.3939+ #[serde(with = "max_message_size")]4040+ pub max_message_size_bytes: i64,4141+4242+ pub cursor: Option<u128>,4343+}4444+4545+impl SubscriberOptions {4646+ /// Add a collection NSID to the subscription options.4747+ ///4848+ /// Returns an error if the maximum number of subscribed collections has been reached; `Ok(true)`4949+ /// if the collection was newly added to the set, or `Ok(false)` if the colletion was already in the5050+ /// the set.5151+ pub fn add_collection(&mut self, collection: Box<Nsid>) -> Result<bool, Box<Nsid>> {5252+ if self.wanted_collections.len() == MAX_WANTED_COLLECTIONS5353+ && !self.wanted_collections.contains(&collection)5454+ {5555+ return Err(collection);5656+ }5757+5858+ Ok(self.wanted_collections.insert(collection))5959+ }6060+6161+ pub fn remove_collection(&mut self, collection: &Nsid) -> bool {6262+ self.wanted_collections.remove(collection)6363+ }6464+6565+ /// Add a DID to the subscription options.6666+ ///6767+ /// Returns an error if the maximum number of subscribed DIDs has been reached; `Ok(true)`6868+ /// if the DID was newly added to the set, or `Ok(false)` if the DID was already in the6969+ /// the set.7070+ pub fn add_did(&mut self, did: OwnedDid) -> Result<bool, OwnedDid> {7171+ if self.wanted_dids.len() == MAX_WANTED_DIDS && !self.wanted_dids.contains(&did) {7272+ return Err(did);7373+ }7474+7575+ Ok(self.wanted_dids.insert(did))7676+ }7777+7878+ pub fn remove_did(&mut self, did: &Did) -> bool {7979+ self.wanted_dids.remove(did)8080+ }8181+8282+ /// Get the normalized maximum message size.8383+ #[must_use]8484+ pub fn max_message_size(&self) -> i64 {8585+ normalize_max_message_size(self.max_message_size_bytes)8686+ }8787+8888+ /// Construct the Jetstream subscribe URL, returning a tuple of the URL and a boolean8989+ /// indicating whether the client should send an options update message on connect.9090+ #[must_use]9191+ pub fn subscribe_url(&self, url: &url::Url) -> (url::Url, bool) {9292+ let mut url = url.to_owned();9393+ url.set_path("/subscribe");9494+ url.set_query(None);9595+9696+ if let Some(cursor) = self.cursor {9797+ url.query_pairs_mut()9898+ .append_pair("cursor", &cursor.to_string());9999+ }100100+101101+ if self.subscribe_url_len(&url) > MAX_URL_LENGTH {102102+ url.query_pairs_mut().append_pair("requireHello", "true");103103+ return (url, true);104104+ }105105+106106+ if !self.wanted_dids.is_empty() || !self.wanted_collections.is_empty() {107107+ let mut query = url.query_pairs_mut();108108+ for collection in &self.wanted_collections {109109+ query.append_pair("wantedCollections", collection);110110+ }111111+ for did in &self.wanted_dids {112112+ query.append_pair("wantedDids", did.as_str());113113+ }114114+ }115115+116116+ if self.max_message_size() > 0 {117117+ url.query_pairs_mut().append_pair(118118+ "maxMessageSizeBytes",119119+ &self.max_message_size_bytes.to_string(),120120+ );121121+ }122122+123123+ (url, false)124124+ }125125+126126+ /// Present the `SubscriberOptions` as a [`SubscriberSourcedMessage`] for serialization.127127+ #[must_use]128128+ pub fn as_subscriber_sourced_message(&self) -> SubscriberSourcedMessage<'_> {129129+ SubscriberSourcedMessage::OptionsUpdate(self.into())130130+ }131131+132132+ fn subscribe_url_len(&self, base: &url::Url) -> usize {133133+ const WANTED_DIDS_LEN: usize = "wantedDids=".len();134134+ const WANTED_COLLECTIONS_LEN: usize = "wantedCollections=".len();135135+136136+ let (wanted_did_len, wanted_dids_count) =137137+ self.wanted_dids.iter().fold((0, 0), |(len, count), val| {138138+ (len + WANTED_DIDS_LEN + val.len(), count + 1)139139+ });140140+141141+ let (wanted_col_len, wanted_col_count) = self142142+ .wanted_collections143143+ .iter()144144+ .fold((0, 0), |(len, count), val| {145145+ (len + WANTED_COLLECTIONS_LEN + val.len(), count + 1)146146+ });147147+148148+ let (message_size_len, message_size_count) = match self.max_message_size() {149149+ 0 => (0, 0),150150+ n => (n.to_string().len() + "maxMessageSizeBytes=".len(), 1),151151+ };152152+153153+ let param_count = wanted_dids_count + wanted_col_count + message_size_count;154154+ base.as_str().len() + message_size_len + wanted_did_len + wanted_col_len + param_count155155+ }156156+}157157+158158+mod max_message_size {159159+ use serde::{Deserialize, Deserializer, Serializer};160160+161161+ pub fn deserialize<'de, D>(deserializer: D) -> Result<i64, D::Error>162162+ where163163+ D: Deserializer<'de>,164164+ {165165+ let value = <i64 as Deserialize>::deserialize(deserializer)?;166166+ Ok(super::normalize_max_message_size(value))167167+ }168168+169169+ pub fn serialize<S>(value: &i64, serializer: S) -> Result<S::Ok, S::Error>170170+ where171171+ S: Serializer,172172+ {173173+ serializer.serialize_i64(super::normalize_max_message_size(*value))174174+ }175175+}176176+177177+const fn normalize_max_message_size(value: i64) -> i64 {178178+ value.abs()179179+}180180+181181+/// Subscriber sourced message.182182+///183183+/// Ref: <https://github.com/bluesky-social/jetstream?tab=readme-ov-file#subscriber-sourced-messages>184184+///185185+#[derive(Debug, Serialize)]186186+#[serde(tag = "type", content = "payload", rename_all = "snake_case")]187187+pub enum SubscriberSourcedMessage<'a> {188188+ OptionsUpdate(OptionsUpdate<'a>),189189+}190190+191191+impl SubscriberSourcedMessage<'_> {192192+ /// Serialize the [`SubscriberSourcedMessage`] to JSON.193193+ #[must_use]194194+ pub fn to_json(&self) -> String {195195+ serde_json::to_string(self).expect("SubscriberSourcedMessage should be serializable")196196+ }197197+}198198+199199+#[derive(Debug, Serialize)]200200+#[serde(rename_all = "camelCase")]201201+pub struct OptionsUpdate<'a> {202202+ wanted_collections: &'a HashSet<Box<Nsid>>,203203+ wanted_dids: &'a HashSet<OwnedDid>,204204+ #[serde(with = "max_message_size")]205205+ max_message_size_bytes: &'a i64,206206+}207207+208208+impl<'a> From<&'a SubscriberOptions> for OptionsUpdate<'a> {209209+ fn from(value: &'a SubscriberOptions) -> Self {210210+ let SubscriberOptions {211211+ wanted_collections,212212+ wanted_dids,213213+ max_message_size_bytes,214214+ cursor: _,215215+ } = value;216216+ Self {217217+ wanted_collections,218218+ wanted_dids,219219+ max_message_size_bytes,220220+ }221221+ }222222+}223223+224224+#[cfg(test)]225225+mod tests {226226+ use std::collections::HashSet;227227+228228+ use gordian_types::{Did, Nsid};229229+230230+ use super::SubscriberOptions;231231+232232+ #[test]233233+ fn default() {234234+ let base = "wss://jetstream1.us-east.bsky.network".parse().unwrap();235235+ let options = SubscriberOptions::default();236236+ let (url, _) = options.subscribe_url(&base);237237+ assert_eq!(238238+ url.as_str(),239239+ "wss://jetstream1.us-east.bsky.network/subscribe"240240+ );241241+ }242242+243243+ #[test]244244+ fn one_collection() {245245+ let base = "wss://jetstream1.us-east.bsky.network".parse().unwrap();246246+ let mut options = SubscriberOptions::default();247247+ options248248+ .add_collection(Nsid::from_static("app.bsky.feed.like").into_boxed())249249+ .unwrap();250250+ let (url, _) = options.subscribe_url(&base);251251+ assert_eq!(252252+ url.as_str(),253253+ "wss://jetstream1.us-east.bsky.network/subscribe?wantedCollections=app.bsky.feed.like"254254+ );255255+ }256256+257257+ #[test]258258+ fn query_len() {259259+ let url: url::Url = "wss://example.url/subscribe".parse().unwrap();260260+ let mut options = SubscriberOptions::default();261261+ assert_eq!(262262+ options.subscribe_url_len(&url),263263+ "wss://example.url/subscribe".len()264264+ );265265+266266+ options267267+ .add_collection(Nsid::from_static("sh.tangled.*").into_boxed())268268+ .unwrap();269269+270270+ assert_eq!(271271+ options.subscribe_url_len(&url),272272+ "wss://example.url/subscribe?wantedCollections=sh.tangled.*".len()273273+ );274274+275275+ options276276+ .add_collection(Nsid::from_static("app.bsky.*").into_boxed())277277+ .unwrap();278278+ assert_eq!(279279+ options.subscribe_url_len(&url),280280+ "wss://example.url/subscribe?wantedCollections=sh.tangled.*&wantedCollections=app.bsky.*".len()281281+ );282282+283283+ options.max_message_size_bytes = 1_000_000;284284+ assert_eq!(285285+ options.subscribe_url_len(&url),286286+ "wss://example.url/subscribe?wantedCollections=sh.tangled.*&wantedCollections=app.bsky.*&maxMessageSizeBytes=1000000".len()287287+ );288288+ }289289+290290+ #[test]291291+ fn serialize_default_options() {292292+ let options = SubscriberOptions::default();293293+ let serialized = options.as_subscriber_sourced_message().to_json();294294+ assert_eq!(295295+ serialized,296296+ r#"{"type":"options_update","payload":{"wantedCollections":[],"wantedDids":[],"maxMessageSizeBytes":0}}"#297297+ );298298+ }299299+300300+ #[test]301301+ fn serialize_example_options() {302302+ let options = SubscriberOptions {303303+ wanted_collections: HashSet::from_iter([304304+ Nsid::from_static("app.bsky.feed.post").into_boxed()305305+ ]),306306+ wanted_dids: HashSet::from_iter([307307+ Did::from_static("did:plc:q6gjnaw2blty4crticxkmujt").to_owned()308308+ ]),309309+ max_message_size_bytes: 1000000,310310+ ..Default::default()311311+ };312312+ let serialized = options.as_subscriber_sourced_message().to_json();313313+ assert_eq!(314314+ serialized,315315+ r#"{"type":"options_update","payload":{"wantedCollections":["app.bsky.feed.post"],"wantedDids":["did:plc:q6gjnaw2blty4crticxkmujt"],"maxMessageSizeBytes":1000000}}"#316316+ )317317+ }318318+}
+65
crates/gordian-knot/Cargo.toml
···11+[package]22+name = "gordian-knot"33+description = "An alternative Tangled knot-server"44+version.workspace = true55+authors.workspace = true66+repository.workspace = true77+license.workspace = true88+edition.workspace = true99+publish.workspace = true1010+1111+[dependencies]1212+gordian-types = { workspace = true, features = ["sqlx", "time"] }1313+gordian-auth.workspace = true1414+gordian-identity.workspace = true1515+gordian-jetstream.workspace = true1616+gordian-lexicon.workspace = true1717+git-service.workspace = true1818+1919+anyhow.workspace = true2020+gix.workspace = true2121+reqwest.workspace = true2222+serde.workspace = true2323+serde_json.workspace = true2424+thiserror.workspace = true2525+tracing.workspace = true2626+url.workspace = true2727+2828+aws-lc-rs = { version = "1.14.1", default-features = false, features = ["alloc", "aws-lc-sys"] }2929+axum = { workspace = true, features = ["ws"] }3030+axum-extra = { version = "0.12.1", features = ["async-read-body"] }3131+bytes = "1.10.1"3232+clap = { version = "4.5.47", features = ["derive", "env", "string"] }3333+data-encoding.workspace = true3434+futures-util = "0.3.31"3535+hyper-util = { version = "0.1.17", features = ["client"] }3636+mimetype-detector = "0.3.4"3737+moka = { version = "0.12.12", features = ["future"] }3838+rand = "0.9.2"3939+rayon = "1.11.0"4040+rustc-hash = "2.1.1"4141+time.workspace = true4242+sqlx = { version = "0.8.6", features = ["runtime-tokio", "sqlite", "time", "json", "macros", "derive"] }4343+tempfile = "3.24.0"4444+tokio = { version = "1.47.1", features = ["io-util", "macros", "net", "process", "signal", "rt-multi-thread"] }4545+tokio-rayon = "2.1.0"4646+tokio-stream = { version = "0.1.17", features = ["time"] }4747+tokio-tungstenite = "0.28.0"4848+tokio-util = "0.7.18"4949+tower = { version = "0.5.2", features = ["buffer", "filter", "limit"] }5050+tower-http = { version = "0.6.6", features = ["decompression-gzip", "request-id", "trace", "tracing", "util"] }5151+tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }5252+dashmap = "6.1.0"5353+mock-pds = { version = "0.0.0", path = "../mock-pds" }5454+clap_complete = "4.5.65"5555+5656+[dev-dependencies]5757+http-body-util = "0.1.3"5858+multibase = "0.9.1"5959+6060+[target.'cfg(not(target_env = "msvc"))'.dependencies]6161+tikv-jemallocator = { version = "0.6.1", optional = true }6262+6363+[features]6464+default = ["jemalloc"]6565+jemalloc = ["dep:tikv-jemallocator"]
+197
crates/gordian-knot/README.md
···11+# gordian-knot22+33+A blazingly fast 🚀 and memory-efficient [knot server](https://tangled.org/tangled.org/core/tree/master/knotserver).44+55+Work in progress.66+77+## Status88+99+### XRPC API1010+1111+| Status | Lexicon Method | Notes |1212+| :----: | :--------------------------------- | :-- |1313+| ✅ | `sh.tangled.owner` | |1414+| ✅ | `sh.tangled.knot.version` | |1515+| ✅ | `sh.tangled.repo.archive` | |1616+| ✅ | `sh.tangled.repo.blob` | |1717+| ✅ | `sh.tangled.repo.branch` | Ignores `shortHash` parameter |1818+| ✅ | `sh.tangled.repo.branches` | |1919+| ✅ | `sh.tangled.repo.create` | |2020+| ✅ | `sh.tangled.repo.delete` | |2121+| ✅ | `sh.tangled.repo.diff` | |2222+| ✅ | `sh.tangled.repo.getDefaultBranch` | |2323+| ✅ | `sh.tangled.repo.mergeCheck` | |2424+| ✅ | `sh.tangled.repo.log` | |2525+| ✅ | `sh.tangled.repo.setDefaultBranch` | |2626+| ✅ | `sh.tangled.repo.tags` | |2727+| 🔶 | `sh.tangled.repo.compare` | Seems to work, but only computes `patch` field 😅 |2828+| 🔶 | `sh.tangled.repo.tree` | Cheats by not computing most recent commit 🚀 |2929+| ❌ | `sh.tangled.repo.deleteBranch` | |3030+| ❌ | `sh.tangled.repo.forkStatus` | |3131+| ❌ | `sh.tangled.repo.forkSync` | |3232+| ❌ | `sh.tangled.repo.hiddenRef` | |3333+| ❌ | `sh.tangled.repo.merge` | |3434+| ❌ | `sh.tangled.repo.languages` | |3535+| ⚫️ | ~~sh.tangled.knot.listKeys~~ | |3636+3737+### Jetstream Ingest3838+3939+Complete create/update/delete ingest for collections:4040+4141+- [x] `sh.tangled.knot.member`4242+- [x] `sh.tangled.publicKey`4343+- [x] `sh.tangled.repo`4444+- [x] `sh.tangled.repo.collaborator`4545+4646+### `git` Services4747+4848+- [x] `git-archive`4949+- [x] `git-receive-pack`5050+- [x] `git-upload-pack`5151+5252+5353+### `/events`5454+5555+`sh.tangled.repo.refUpdate` events are generated, but pipelines won’t run because i don’t emit any `sh.tangled.repo.pipeline` events.5656+5757+## Compile5858+5959+`cargo` is your friend:6060+6161+```sh6262+; cargo build --release --package gordian-knot6363+```6464+6565+## Running6666+6767+Serve the knot-server with the `serve` subcommand. All options are configured from the CLI:6868+6969+```sh7070+; gordian-knot serve --help7171+Serve the tangled knot7272+7373+Usage: gordian-knot serve [OPTIONS] --name <NAME> --owner <OWNER>7474+7575+Options:7676+ -n, --name <NAME>7777+ FQDN of the knot7878+7979+ [env: KNOT_NAME=]8080+8181+ -o, --owner <OWNER>8282+ Handle or DID of the knot owner8383+8484+ [env: KNOT_OWNER=]8585+8686+ -r, --repos <REPOS>8787+ Base path for repositories8888+8989+ [env: KNOT_REPO_BASE=]9090+ [default: /home/tjh/gordian]9191+9292+ -H, --hooks <HOOKS>9393+ Path to knot-level git hooks9494+9595+ [env: KNOT_HOOKS_PATH=]9696+9797+ --git-config <GIT_CONFIG>9898+ Path to knot-level git config9999+100100+ [env: KNOT_GIT_CONFIG_PATH=]101101+ [default: /home/tjh/gordian/git_config]102102+103103+ --bind <BIND>104104+ Address to bind the the public knot API105105+106106+ [env: KNOT_ADDR=]107107+ [default: localhost:5555]108108+109109+ --db <DB>110110+ Path to the knot sqlite database111111+112112+ [env: KNOT_DATABASE_PATH=]113113+ [default: knot.db]114114+115115+ --plc-directory <PLC_DIRECTORY>116116+ PLC directory for DID resolution117117+118118+ [env: KNOT_PLC_DIRECTORY=]119119+ [default: https://plc.directory]120120+121121+ -j, --jetstream <JETSTREAM>122122+ [env: KNOT_JETSTREAM=]123123+ [default: wss://jetstream1.us-east.bsky.network,wss://jetstream2.us-east.bsky.network,wss://jetstream1.us-west.bsky.network,wss://jetstream2.us-west.bsky.network]124124+125125+ --require-signed-push=<REQUIRE_SIGNED_PUSH>126126+ Require git pushes to be signed by a public key from a 'sh.tangled.publicKey'.127127+128128+ See: <https://git-scm.com/docs/git-push#Documentation/git-push.txt---signed>129129+130130+ [default: true]131131+ [possible values: true, false]132132+133133+ --repo-cache-size <REPO_CACHE_SIZE>134134+ Number of open repository handles to cache.135135+136136+ Keeping open handles reduces the overhead of opening a repository at the expense of increased memory usage.137137+138138+ [env: KNOT_REPO_CACHE_SIZE=]139139+ [default: 0]140140+141141+ --repo-cache-idle <REPO_CACHE_IDLE>142142+ Seconds to retain an idle repository handle in cache143143+144144+ [env: KNOT_REPO_CACHE_IDLE=]145145+ [default: 60]146146+147147+ --repo-cache-live <REPO_CACHE_LIVE>148148+ Seconds to retain a repository handle in cache149149+150150+ [env: KNOT_REPO_CACHE_LIVE=]151151+ [default: 600]152152+153153+ --archive-bz2-command <ARCHIVE_BZ2_COMMAND>154154+ Command to use to compress bzip2 archives155155+156156+ [env: KNOT_ARCHIVE_BZ2=]157157+ [default: /usr/bin/bzip2]158158+159159+ --archive-xz-command <ARCHIVE_XZ_COMMAND>160160+ Command to use to compress xz archives161161+162162+ [env: KNOT_ARCHIVE_XZ=]163163+ [default: /usr/bin/xz]164164+165165+ -h, --help166166+ Print help (see a summary with '-h')167167+```168168+169169+The only required options are `--owner` and `--name`. By default the public API is bound to 'localhost:5555' and repositories are stored170170+in the current working directory.171171+172172+## Differences to the real knot server173173+174174+### Transport175175+176176+`ssh` transport for `git` operations is currently unsupported.177177+178178+`http` transport is supported for all `git` services (`git fetch`, `git pull`, `git push`, and `git archive`).179179+180180+`git push` operations are authorized using signed [service-auth](https://atproto.com/specs/xrpc#inter-service-authentication-jwt)181181+tokens, which may be validated against an atproto signing-key or a supported `sh.tangled.publicKey` for the corresponding identity.182182+183183+A `git` credential helper is required to generate such tokens. a slightly dodgy one may be found in [crates/credential-helper](../credential-helper),184184+which uses `ssh-agent` to create a signed jwt.185185+186186+### Signed `git` Pushes187187+188188+`gordian-knot` requires pushes to be accompanied by push certificate signed by a key from a `sh.tangled.publicKey` record associated with the189189+authorized identity.190190+191191+See:192192+- <https://git-scm.com/docs/git-push#documentation/git-push.txt---signed>193193+- <https://git-scm.com/docs/githooks/2.27.0#pre-receive>194194+- <https://git-scm.com/docs/git-receive-pack#_pre_receive_hook>195195+196196+This requirement can be disabled by the knot operator with the `--require-signed-push=false` argument.197197+
+285
crates/gordian-knot/src/cli.rs
···11+use clap::{ArgAction, Args, CommandFactory, Parser, Subcommand, ValueEnum, ValueHint};22+use clap_complete::Shell;33+use core::fmt;44+use gix::bstr::BString;55+use gordian_identity::HttpClient;66+use gordian_knot::model::config::{DEFAULT_READMES, KnotConfiguration, RepoCacheConfig};77+use gordian_types::OwnedDid;88+use std::{env, path::PathBuf, time::Duration};99+use url::Url;1010+1111+pub fn parse() -> KnotCommand {1212+ match Arguments::parse().command {1313+ KnotCommand::Generate(arguments) => {1414+ let mut command = Arguments::command();1515+ let name = command.get_name().to_string();1616+ clap_complete::generate(arguments.shell, &mut command, name, &mut std::io::stdout());1717+ std::process::exit(0);1818+ }1919+ KnotCommand::Serve(mut arguments) => {2020+ if let Some("") = arguments.archive_bz2_command.as_deref() {2121+ arguments.archive_bz2_command = None;2222+ }2323+2424+ if let Some("") = arguments.archive_xz_command.as_deref() {2525+ arguments.archive_xz_command = None;2626+ }2727+2828+ KnotCommand::Serve(arguments)2929+ }3030+ hook @ KnotCommand::Hook(_) => hook,3131+ }3232+}3333+3434+#[derive(Debug, Parser)]3535+#[command(about, author, version)]3636+pub struct Arguments {3737+ #[clap(subcommand)]3838+ command: KnotCommand,3939+}4040+4141+#[derive(Debug, Subcommand, Clone)]4242+pub enum KnotCommand {4343+ Generate(GenerateArguments),4444+ Serve(ServeArguments),4545+ Hook(HookArguments),4646+}4747+4848+/// Generate shell completions.4949+#[derive(Clone, Debug, Args)]5050+pub struct GenerateArguments {5151+ shell: Shell,5252+}5353+5454+/// Serve the tangled knot.5555+#[derive(Clone, Debug, Args)]5656+pub struct ServeArguments {5757+ /// FQDN of the knot.5858+ #[arg(long, short, value_hint = ValueHint::Hostname, env = "KNOT_NAME")]5959+ #[cfg_attr(debug_assertions, arg(default_value = "localhost:5555"))]6060+ pub name: String,6161+6262+ /// Handle or DID of the knot owner.6363+ #[arg(long, short, env = "KNOT_OWNER")]6464+ pub owner: OwnedDid,6565+6666+ /// Base path for repositories.6767+ #[arg(long, short, value_hint = ValueHint::DirPath, env = "KNOT_REPO_BASE")]6868+ #[arg(default_value = default_repository_base().into_os_string())]6969+ pub repos: PathBuf,7070+7171+ /// Path to knot-level git hooks.7272+ #[arg(long, short = 'H', value_hint = ValueHint::DirPath, env = "KNOT_HOOKS_PATH")]7373+ pub hooks: Option<PathBuf>,7474+7575+ /// Path to knot-level git config.7676+ #[arg(long, value_hint = ValueHint::FilePath, env = "KNOT_GIT_CONFIG_PATH")]7777+ #[arg(default_value = default_repository_base().join("git_config").into_os_string())]7878+ pub git_config: PathBuf,7979+8080+ /// Address to bind the the public knot API.8181+ #[arg(long, value_delimiter = ',', env = "KNOT_ADDR")]8282+ #[arg(default_value = "localhost:5555")]8383+ pub bind: Vec<String>,8484+8585+ /// Path to the knot sqlite database.8686+ #[arg(long, env = "KNOT_DATABASE_PATH", default_value = "knot.db")]8787+ pub db: PathBuf,8888+8989+ /// PLC directory for DID resolution.9090+ #[arg(long, value_hint = ValueHint::Url, env = "KNOT_PLC_DIRECTORY")]9191+ #[arg(default_value = "https://plc.directory")]9292+ pub plc_directory: String,9393+9494+ #[arg(long, short, value_delimiter = ',', value_hint = ValueHint::Url, env = "KNOT_JETSTREAM")]9595+ #[arg(default_value = default_jetstream_instances())]9696+ pub jetstream: Vec<String>,9797+9898+ /// Acceptable authorization methods for git pushes over http.9999+ #[arg(hide = true, long, require_equals = true, value_delimiter = ',')]100100+ #[arg(env = "KNOT_AUTH_METHODS")]101101+ #[arg(default_value = "service-auth,public-key")]102102+ pub auth_methods: Vec<AuthenticationMethods>,103103+104104+ /// Require git pushes to be signed by a public key from a 'sh.tangled.publicKey'.105105+ ///106106+ /// See: <https://git-scm.com/docs/git-push#Documentation/git-push.txt---signed>107107+ #[arg(long, action = ArgAction::Set, require_equals = true)]108108+ #[arg(default_value_t = true)]109109+ pub require_signed_push: bool,110110+111111+ /// Number of open repository handles to cache.112112+ ///113113+ /// Keeping open handles reduces the overhead of opening a repository at the114114+ /// expense of increased memory usage.115115+ #[arg(long, env = "KNOT_REPO_CACHE_SIZE", default_value_t = 0)]116116+ pub repo_cache_size: u64,117117+118118+ /// Seconds to retain an idle repository handle in cache.119119+ #[arg(long, env = "KNOT_REPO_CACHE_IDLE", default_value_t = 60)]120120+ pub repo_cache_idle: u64,121121+122122+ /// Seconds to retain a repository handle in cache.123123+ #[arg(long, env = "KNOT_REPO_CACHE_LIVE", default_value_t = 600)]124124+ pub repo_cache_live: u64,125125+126126+ /// Command to use to compress bzip2 archives.127127+ #[arg(long, env = "KNOT_ARCHIVE_BZ2", default_value = find_command("bzip2").unwrap_or_default())]128128+ pub archive_bz2_command: Option<String>,129129+130130+ /// Command to use to compress xz archives.131131+ #[arg(long, env = "KNOT_ARCHIVE_XZ", default_value = find_command("xz").unwrap_or_default())]132132+ pub archive_xz_command: Option<String>,133133+}134134+135135+fn find_command(name: &str) -> Option<String> {136136+ use std::process::Command;137137+138138+ let output = Command::new("which").arg(name).output().ok()?;139139+ if !output.status.success() {140140+ return None;141141+ }142142+143143+ let full_path = String::from_utf8(output.stdout).ok()?;144144+ Some(full_path.trim().to_string())145145+}146146+147147+impl ServeArguments {148148+ pub fn to_knot_config(&self) -> Result<KnotConfiguration, Error> {149149+ let Self {150150+ name,151151+ owner,152152+ repos: repo_path,153153+ hooks: _,154154+ git_config,155155+ bind: _,156156+ db: _,157157+ plc_directory: _,158158+ jetstream: _,159159+ auth_methods: _,160160+ require_signed_push: _,161161+ repo_cache_size,162162+ repo_cache_idle,163163+ repo_cache_live,164164+ archive_bz2_command: _,165165+ archive_xz_command: _,166166+ } = self.clone();167167+168168+ // @TODO Validate?169169+170170+ let instance = format!("did:web:{name}").parse()?;171171+172172+ Ok(KnotConfiguration {173173+ owner,174174+ instance,175175+ repo_path,176176+ git_config,177177+ readmes: DEFAULT_READMES178178+ .iter()179179+ .map(|v| BString::new(v.to_vec()))180180+ .collect(),181181+ repo_cache: RepoCacheConfig {182182+ size: repo_cache_size,183183+ idle: Duration::from_secs(repo_cache_idle),184184+ live: Duration::from_secs(repo_cache_live),185185+ },186186+ })187187+ }188188+189189+ pub fn init_resolver(&self, http: HttpClient) -> gordian_identity::Resolver {190190+ let plc_url = Url::parse(&self.plc_directory).expect("PLC directory should be a valid URL");191191+ assert!(["http", "https"].contains(&plc_url.scheme()));192192+193193+ gordian_identity::Resolver::builder()194194+ .plc_directory(self.plc_directory.clone())195195+ .build_with(http)196196+ }197197+}198198+199199+#[derive(Debug, thiserror::Error)]200200+pub enum Error {201201+ #[error("unable to build 'did:web:{{name}}' from knot fqdn: {0}")]202202+ Name(#[from] gordian_types::did::Error),203203+}204204+205205+#[derive(Clone, Debug, ValueEnum)]206206+pub enum AuthenticationMethods {207207+ ServiceAuth,208208+ PublicKey,209209+}210210+211211+fn default_repository_base() -> PathBuf {212212+ env::current_dir().expect("current working directory should be readable")213213+}214214+215215+fn default_jetstream_instances() -> String {216216+ gordian_jetstream::PUBLIC_JETSTREAM_INSTANCES.join(",")217217+}218218+219219+/// Forward a git hook to the internal API.220220+///221221+/// This command is expected to be invoked by git during operations via222222+/// the global hook shims.223223+#[derive(Clone, Args)]224224+pub struct HookArguments {225225+ /// Internal API endpoints.226226+ #[arg(long, value_delimiter = ',', env = gordian_knot::private::ENV_PRIVATE_ENDPOINTS)]227227+ pub api: Vec<Url>,228228+229229+ /// DID of the repository owner.230230+ #[arg(long, env = gordian_knot::private::ENV_REPO_DID)]231231+ pub repo_did: OwnedDid,232232+233233+ /// Record key of the repository.234234+ #[arg(long, env = gordian_knot::private::ENV_REPO_RKEY)]235235+ pub repo_rkey: String,236236+237237+ /// Name of the hook to forward.238238+ pub hook: HookName,239239+}240240+241241+impl fmt::Debug for HookArguments {242242+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {243243+ f.debug_struct("HookArguments")244244+ // Suppress `url::Url`'s god-awful debug output.245245+ .field("api", &self.api.iter().map(Url::as_str).collect::<Vec<_>>())246246+ .field("repo_did", &self.repo_did)247247+ .field("repo_rkey", &self.repo_rkey)248248+ .field("hook", &self.hook)249249+ .finish()250250+ }251251+}252252+253253+#[derive(Clone, Copy, Debug, ValueEnum)]254254+#[clap(rename_all = "kebab-case")]255255+pub enum HookName {256256+ PreReceive,257257+ PostReceive,258258+ PostUpdate,259259+}260260+261261+impl fmt::Display for HookName {262262+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {263263+ f.write_str(match self {264264+ Self::PreReceive => "pre-receive",265265+ Self::PostReceive => "post-receive",266266+ Self::PostUpdate => "post-update",267267+ })268268+ }269269+}270270+271271+impl AsRef<std::path::Path> for HookName {272272+ fn as_ref(&self) -> &std::path::Path {273273+ std::path::Path::new(match self {274274+ Self::PreReceive => "pre-receive",275275+ Self::PostReceive => "post-receive",276276+ Self::PostUpdate => "post-update",277277+ })278278+ }279279+}280280+281281+impl HookName {282282+ pub fn iter_variants() -> impl Iterator<Item = HookName> {283283+ [Self::PreReceive, Self::PostReceive, Self::PostUpdate].into_iter()284284+ }285285+}
+145
crates/gordian-knot/src/hooks.rs
···11+use std::{22+ collections::HashMap,33+ env,44+ fs::{self, Permissions},55+ io::{self, Write},66+ os::unix::fs::PermissionsExt,77+ path::Path,88+};99+1010+use axum::http::{HeaderMap, HeaderName, HeaderValue, header::InvalidHeaderName};1111+use bytes::Bytes;1212+use gordian_knot::private;1313+1414+use crate::cli::{HookArguments, HookName};1515+1616+/// Setup the global hooks directory at `path`.1717+pub fn setup_global_hooks<P: AsRef<Path>>(path: P) -> io::Result<()> {1818+ let executable = env::current_exe()1919+ .map(|path| path.to_str().map(ToOwned::to_owned))2020+ .expect("Current executable must be defined")2121+ .expect("Current executable must be valid utf8");2222+2323+ let _ = fs::create_dir_all(&path);2424+ for hook_name in HookName::iter_variants() {2525+ let hook_path = path.as_ref().join(hook_name);2626+ let script = format!(2727+ "#!/usr/bin/sh\n# This file is generated by gordian-knot. Do not modify.\n{executable} hook {hook_name}\n"2828+ );2929+ std::fs::write(&hook_path, script)?;3030+3131+ let permissions = Permissions::from_mode(0o755);3232+ std::fs::set_permissions(&hook_path, permissions)?;3333+ tracing::info!(?executable, ?hook_path, "git hook installed");3434+ }3535+ Ok(())3636+}3737+3838+/// [`core::fmt::Debug`] an [`url::Url`] without causing eye-cancer.3939+#[repr(transparent)]4040+struct DebugUrl(url::Url);4141+4242+impl core::fmt::Debug for DebugUrl {4343+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {4444+ core::fmt::Display::fmt(&self.0, f)4545+ }4646+}4747+4848+/// [`core::fmt::Debug`] a slice [`url::Url`] without causing eye-cancer.4949+pub struct DebugUrls<'a>(pub &'a [url::Url]);5050+5151+impl<'a> core::fmt::Debug for DebugUrls<'a> {5252+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {5353+ let urls = unsafe {5454+ // SAFETY: Close your eyes an pray!5555+ &*(self.0 as *const [url::Url] as *const [DebugUrl])5656+ };5757+ core::fmt::Debug::fmt(&urls, f)5858+ }5959+}6060+6161+#[tracing::instrument(fields(api = ?DebugUrls(&api)))]6262+pub async fn run_hook(6363+ HookArguments {6464+ api,6565+ repo_did,6666+ repo_rkey,6767+ hook,6868+ }: HookArguments,6969+) -> anyhow::Result<()> {7070+ if api.is_empty() {7171+ tracing::warn!("internal API not specified, skipping hook");7272+ return Ok(());7373+ };7474+7575+ let mut environment_vars: HashMap<_, _> = env::vars()7676+ .filter(|(key, _)| !key.trim().is_empty())7777+ .collect();7878+7979+ let request_id = take_var(&mut environment_vars, "X_REQUEST_ID").ok();8080+8181+ // Build a header map with the remaining environment variables.8282+ let mut headers = HeaderMap::with_capacity(environment_vars.len());8383+ if let Some(request_id) = request_id {8484+ headers.insert("X-Request-ID", HeaderValue::from_str(&request_id)?);8585+ }8686+8787+ for (key, value) in environment_vars {8888+ match (variable_to_header_name(&key), HeaderValue::try_from(&value)) {8989+ (Ok(key), Ok(value)) => _ = headers.insert(key, value),9090+ (Err(error), _) => tracing::warn!(?error, ?key, ?value, "ignoring header"),9191+ (_, Err(error)) => tracing::warn!(?error, ?key, ?value, "ignoring header"),9292+ }9393+ }9494+9595+ let stdin = Bytes::from(io::read_to_string(io::stdin())?);9696+9797+ let client = reqwest::Client::new();9898+ let url_path = format!("/hook/{repo_did}/{repo_rkey}/{hook}");9999+ for mut hook_url in api {100100+ hook_url.set_path(&url_path);101101+ let response = client102102+ .post(hook_url)103103+ .headers(headers.clone())104104+ .body(stdin.clone())105105+ .send()106106+ .await;107107+108108+ match response {109109+ Ok(response) if response.status().is_success() => {110110+ let body = response.bytes().await?;111111+ io::stdout().write_all(&body)?;112112+ return Ok(());113113+ }114114+ Ok(response) => {115115+ let status = response.status();116116+ let body = response.bytes().await?;117117+ io::stdout().write_all(&body)?;118118+ return Err(anyhow::anyhow!("Knot returned error status {status}"));119119+ }120120+ Err(error) => {121121+ tracing::error!(?error, "failed to post hook to internal API");122122+ continue;123123+ }124124+ }125125+ }126126+127127+ Err(anyhow::anyhow!("Failed to find a valid internal endpoint"))128128+}129129+130130+fn take_var(vars: &mut HashMap<String, String>, name: &str) -> anyhow::Result<String> {131131+ vars.remove(name).ok_or(anyhow::anyhow!(132132+ "Expected environment variable {name:?} to be set",133133+ ))134134+}135135+136136+fn variable_to_header_name(name: &str) -> Result<HeaderName, InvalidHeaderName> {137137+ format!(138138+ "{}-{}",139139+ private::ENV_HEADER_PREFIX,140140+ name.trim_start_matches("GORDIAN_")141141+ )142142+ .replace('_', "-")143143+ .to_lowercase()144144+ .try_into()145145+}
+687
crates/gordian-knot/src/lib.rs
···11+use std::io;22+33+use axum::Router;44+use gordian_types::Nsid;55+use tokio::{net::TcpListener, task::JoinSet};66+use tokio_util::sync::CancellationToken;77+88+pub mod extractors;99+pub mod model;1010+pub mod private;1111+pub mod public;1212+pub mod services;1313+pub mod sync;1414+pub mod types;1515+mod util;1616+1717+#[cfg(test)]1818+pub(crate) mod mock;1919+2020+pub mod nsid {2121+ use gordian_types::Nsid;2222+2323+ macro_rules! nsid {2424+ ($nsid:literal) => {2525+ unsafe { Nsid::from_static_unchecked($nsid) }2626+ };2727+ }2828+2929+ pub const SH_TANGLED_KNOT_MEMBER: &Nsid = nsid!("sh.tangled.knot.member");3030+ pub const SH_TANGLED_PUBLICKEY: &Nsid = nsid!("sh.tangled.publicKey");3131+ pub const SH_TANGLED_REPO: &Nsid = nsid!("sh.tangled.repo");3232+ pub const SH_TANGLED_REPO_COLLABORATOR: &Nsid = nsid!("sh.tangled.repo.collaborator");3333+ pub const SH_TANGLED_REPO_CREATE: &Nsid = nsid!("sh.tangled.repo.create");3434+ pub const SH_TANGLED_REPO_DELETE: &Nsid = nsid!("sh.tangled.repo.delete");3535+ pub const SH_TANGLED_REPO_GITRECEIVEPACK: &Nsid = nsid!("sh.tangled.repo.gitReceivePack");3636+ pub const SH_TANGLED_REPO_SETDEFAULTBRANCH: &Nsid = nsid!("sh.tangled.repo.setDefaultBranch");3737+}3838+3939+pub use gordian_lexicon as lexicon;4040+4141+/// NSIDs of interest to a knot server.4242+pub const NSIDS: &[&Nsid] = {4343+ &[4444+ nsid::SH_TANGLED_KNOT_MEMBER,4545+ nsid::SH_TANGLED_PUBLICKEY,4646+ nsid::SH_TANGLED_REPO,4747+ nsid::SH_TANGLED_REPO_COLLABORATOR,4848+ ]4949+};5050+5151+pub async fn serve_all(5252+ router: Router,5353+ listeners: impl IntoIterator<Item = TcpListener>,5454+ shutdown: CancellationToken,5555+) -> io::Result<()> {5656+ let mut service = JoinSet::new();5757+ for listener in listeners {5858+ let router = router.clone();5959+ let addr = listener.local_addr()?;6060+ tracing::info!(?addr, "listening on socket");6161+6262+ let shutdown = shutdown.child_token();6363+ service.spawn(async move {6464+ axum::serve(listener, router)6565+ .with_graceful_shutdown(async move { shutdown.cancelled().await })6666+ .await6767+ });6868+ }6969+7070+ for task in service.join_all().await {7171+ task?;7272+ }7373+7474+ Ok(())7575+}7676+7777+#[cfg(test)]7878+mod tests {7979+ use gordian_auth::jwt::Claims;8080+ use gordian_lexicon::sh_tangled;8181+ use gordian_types::{Did, Tid};8282+8383+ use axum::{8484+ body::Body,8585+ http::{Request, StatusCode},8686+ };8787+ use time::{OffsetDateTime, format_description::well_known::Rfc3339};8888+ use tower::ServiceExt;8989+9090+ use crate::model::Knot;9191+9292+ const TEST_DID: &str = "did:plc:65gha4t3avpfpzmvpbwovss7";9393+ const TEST_INSTANCE: &str = "lib-knot-test";9494+9595+ fn get(uri: &str) -> Request<Body> {9696+ Request::builder().uri(uri).body(Body::empty()).unwrap()9797+ }9898+9999+ #[tokio::test]100100+ async fn can_query_knot_owner() {101101+ let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;102102+ let response = super::public::router()103103+ .with_state(knot)104104+ .oneshot(get("/xrpc/sh.tangled.owner"))105105+ .await106106+ .unwrap();107107+108108+ assert_eq!(response.status(), StatusCode::OK);109109+ let body = axum::body::to_bytes(response.into_body(), 1000)110110+ .await111111+ .unwrap();112112+113113+ assert_eq!(114114+ body.as_ref(),115115+ format!("{{\"owner\":\"{TEST_DID}\"}}").as_bytes()116116+ );117117+118118+ let resp: sh_tangled::owner::Output = serde_json::from_slice(&body).unwrap();119119+ assert_eq!(resp.owner.as_str(), TEST_DID);120120+ }121121+122122+ #[tokio::test]123123+ async fn xrpc_sh_tangled_repo_missing_repo() {124124+ let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;125125+ for particle in ["tree", "log", "tags", "branches"] {126126+ let response = super::public::router()127127+ .with_state(knot.clone())128128+ .oneshot(get(&format!("/xrpc/sh.tangled.repo.{particle}")))129129+ .await130130+ .unwrap();131131+132132+ assert_eq!(response.status(), StatusCode::BAD_REQUEST);133133+ }134134+ }135135+136136+ #[tokio::test]137137+ async fn xrpc_sh_tangled_repo_bad_repo_format() {138138+ let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;139139+ for particle in ["tree", "log", "tags", "branches"] {140140+ // Missing repo name141141+ let response = super::public::router()142142+ .with_state(knot.clone())143143+ .oneshot(get(&format!(144144+ "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com"145145+ )))146146+ .await147147+ .unwrap();148148+149149+ assert_eq!(response.status(), StatusCode::BAD_REQUEST);150150+151151+ // Bad repo names '..'152152+153153+ for repo_name in ["", "..", "../../secret-data", ".hidden", "/etc/passwd"] {154154+ let response = super::public::router()155155+ .with_state(knot.clone())156156+ .oneshot(get(&format!(157157+ "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com/{repo_name}"158158+ )))159159+ .await160160+ .unwrap();161161+162162+ assert_eq!(response.status(), StatusCode::BAD_REQUEST);163163+ }164164+ }165165+ }166166+167167+ #[tokio::test]168168+ async fn xrpc_sh_tangled_repo_not_found() {169169+ let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;170170+ for particle in ["tree", "log", "tags", "branches"] {171171+ let response = super::public::router()172172+ .with_state(knot.clone())173173+ .oneshot(get(&format!(174174+ "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com/non-existent-repo"175175+ )))176176+ .await177177+ .unwrap();178178+179179+ assert_eq!(response.status(), StatusCode::NOT_FOUND);180180+ }181181+ }182182+183183+ mod sh_tangled_repo_create {184184+ use crate::nsid::{SH_TANGLED_REPO_CREATE, SH_TANGLED_REPO_DELETE};185185+186186+ use super::super::public;187187+ use super::*;188188+ use axum::http::{HeaderValue, Method, Response, header};189189+190190+ fn make_claims<F>(iss: &Did, aud: &Did, modify_claims: F) -> Claims191191+ where192192+ F: FnOnce(&mut Claims),193193+ {194194+ let jti: [u8; 16] = rand::random();195195+ let jti = data_encoding::BASE32_NOPAD_VISUAL196196+ .encode(&jti)197197+ .to_lowercase();198198+199199+ let mut claims = Claims {200200+ iss: iss.into(),201201+ aud: aud.into(),202202+ iat: OffsetDateTime::now_utc().unix_timestamp(),203203+ exp: OffsetDateTime::now_utc().unix_timestamp() + 10,204204+ lxm: None,205205+ jti: jti.into(),206206+ };207207+208208+ modify_claims(&mut claims);209209+ claims210210+ }211211+212212+ async fn service_auth_with<F>(213213+ pds: &mock_pds::Pds,214214+ iss: &Did,215215+ aud: &Did,216216+ modify_claims: F,217217+ ) -> HeaderValue218218+ where219219+ F: FnOnce(&mut Claims),220220+ {221221+ let claims = make_claims(iss, aud, modify_claims);222222+ let authorization = pds.service_auth(&claims).await;223223+ HeaderValue::from_str(&authorization).unwrap()224224+ }225225+226226+ #[tokio::test]227227+ async fn reject_wrong_method() {228228+ let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;229229+ let response = public::router()230230+ .with_state(knot.clone())231231+ .oneshot(get("/xrpc/sh.tangled.repo.create"))232232+ .await233233+ .unwrap();234234+235235+ assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED);236236+ }237237+238238+ async fn create_repo_with<F>(239239+ knot: &Knot,240240+ pds: mock_pds::Pds,241241+ did: &Did,242242+ rkey: &str,243243+ repo_name: &str,244244+ source: Option<&str>,245245+ modify_claims: F,246246+ ) -> Response<Body>247247+ where248248+ F: Fn(&mut Claims) + Copy,249249+ {250250+ // Create fake PDS record for our new repository.251251+ pds.insert_record(252252+ did,253253+ "sh.tangled.repo",254254+ rkey,255255+ &serde_json::json!({256256+ "name": repo_name,257257+ "knot": knot.instance_ident(),258258+ "source": source,259259+ "createdAt": OffsetDateTime::now_utc().format(&Rfc3339).unwrap()260260+ }),261261+ )262262+ .await;263263+264264+ // Generate the body of the 'sh.tangled.repo.create' request.265265+ let create = sh_tangled::repo::create::Input {266266+ rkey: rkey.to_string(),267267+ default_branch: Some("main".into()),268268+ source: None,269269+ };270270+271271+ let auth = service_auth_with(&pds, &did, &knot.instance, |claims| {272272+ claims.lxm = Some(SH_TANGLED_REPO_CREATE.into_boxed());273273+ modify_claims(claims);274274+ })275275+ .await;276276+277277+ let response = public::router()278278+ .with_state(knot.clone())279279+ .oneshot(280280+ Request::post("/xrpc/sh.tangled.repo.create")281281+ .header(header::AUTHORIZATION, auth)282282+ .header(header::CONTENT_TYPE, "application/json")283283+ .body(Body::new(serde_json::to_string(&create).unwrap()))284284+ .expect("sh.tangled.repo.create request"),285285+ )286286+ .await287287+ .expect("xrpc response");288288+289289+ response290290+ }291291+292292+ async fn create_repo(293293+ knot: &Knot,294294+ pds: mock_pds::Pds,295295+ did: &Did,296296+ rkey: &str,297297+ repo_name: &str,298298+ source: Option<&str>,299299+ ) -> Response<Body> {300300+ create_repo_with(knot, pds, did, rkey, repo_name, source, |_| {}).await301301+ }302302+303303+ async fn repo_exists_in_db(knot: &Knot, did: &Did, rkey: &str) -> bool {304304+ knot.resolve_repo_key(&crate::types::repository_path::RepositoryPath {305305+ owner: did.into_boxed().into(),306306+ name: rkey.into(),307307+ })308308+ .await309309+ .is_ok()310310+ }311311+312312+ #[tokio::test]313313+ async fn can_create_repo() {314314+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;315315+316316+ let did = Did::from_static(TEST_DID);317317+ pds.insert_identity(did, "tjh.dev").await;318318+ knot.add_member(319319+ "",320320+ "",321321+ "",322322+ &sh_tangled::knot::Member::new(323323+ &did,324324+ knot.instance_ident(),325325+ OffsetDateTime::now_utc(),326326+ ),327327+ )328328+ .await329329+ .unwrap();330330+331331+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();332332+ assert_eq!(333333+ create_repo(&knot, pds, did, &rkey, "test-repo", None)334334+ .await335335+ .status(),336336+ StatusCode::OK337337+ );338338+339339+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);340340+ }341341+342342+ #[tokio::test]343343+ async fn can_create_fork_from_at() {344344+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;345345+346346+ let did = Did::from_static(TEST_DID);347347+ pds.insert_identity(did, "tjh.dev").await;348348+ knot.add_member(349349+ "",350350+ "",351351+ "",352352+ &sh_tangled::knot::Member::new(353353+ &did,354354+ knot.instance_ident(),355355+ OffsetDateTime::now_utc(),356356+ ),357357+ )358358+ .await359359+ .unwrap();360360+361361+ // Create a record for the repository to fork from.362362+ // <https://pdsls.dev/at://did:plc:65gha4t3avpfpzmvpbwovss7/sh.tangled.repo/3m24udbjajf22#record>363363+ let aturi = pds364364+ .insert_record(365365+ did,366366+ "sh.tangled.repo",367367+ "3m24udbjajf22",368368+ &serde_json::json!({369369+ "name": "gordian",370370+ "knot": "gordian.tjh.dev",371371+ "createdAt": "2025-10-01T10:45:52Z"372372+ }),373373+ )374374+ .await;375375+376376+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();377377+ assert_eq!(378378+ create_repo(&knot, pds, did, &rkey, "test-repo", Some(&aturi))379379+ .await380380+ .status(),381381+ StatusCode::OK382382+ );383383+384384+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);385385+ }386386+387387+ #[tokio::test]388388+ async fn can_create_fork_from_http() {389389+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;390390+391391+ let did = Did::from_static(TEST_DID);392392+ pds.insert_identity(did, "tjh.dev").await;393393+ knot.add_member(394394+ "",395395+ "",396396+ "",397397+ &sh_tangled::knot::Member::new(398398+ &did,399399+ knot.instance_ident(),400400+ OffsetDateTime::now_utc(),401401+ ),402402+ )403403+ .await404404+ .unwrap();405405+406406+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();407407+ let source =408408+ Some("https://gordian.tjh.dev/did:plc:65gha4t3avpfpzmvpbwovss7/3m24udbjajf22");409409+ assert_eq!(410410+ create_repo(&knot, pds, did, &rkey, "test-repo", source)411411+ .await412412+ .status(),413413+ StatusCode::OK414414+ );415415+416416+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);417417+ }418418+419419+ #[tokio::test]420420+ async fn can_create_fork_from_http_fail() {421421+ let (base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;422422+423423+ let did = Did::from_static(TEST_DID);424424+ pds.insert_identity(did, "tjh.dev").await;425425+ knot.add_member(426426+ "",427427+ "",428428+ "",429429+ &sh_tangled::knot::Member::new(430430+ &did,431431+ knot.instance_ident(),432432+ OffsetDateTime::now_utc(),433433+ ),434434+ )435435+ .await436436+ .unwrap();437437+438438+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();439439+ let source =440440+ Some("https://gordian.tjh.dev/did:plc:65gha4t3avpfpmvpbwovss7/3m24udbjajf22");441441+442442+ assert_ne!(443443+ create_repo(&knot, pds, did, &rkey, "test-repo", source)444444+ .await445445+ .status(),446446+ StatusCode::OK447447+ );448448+449449+ // Verifiy the repository wasn't created on disk.450450+ assert!(451451+ std::fs::exists(base.path().join(did.as_str()).join(&rkey)).is_ok_and(|val| !val),452452+ );453453+454454+ assert!(!repo_exists_in_db(&knot, &did, &rkey).await);455455+ }456456+457457+ #[tokio::test]458458+ async fn rejects_if_owner_is_not_a_member() {459459+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;460460+461461+ let did = Did::from_static(TEST_DID);462462+ pds.insert_identity(did, "tjh.dev").await;463463+464464+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();465465+ assert_ne!(466466+ create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |_| {})467467+ .await468468+ .status(),469469+ StatusCode::OK,470470+ );471471+472472+ assert!(!repo_exists_in_db(&knot, &did, &rkey).await);473473+ }474474+475475+ #[tokio::test]476476+ async fn rejects_auth_issued_in_future() {477477+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;478478+479479+ let did = Did::from_static(TEST_DID);480480+ pds.insert_identity(did, "tjh.dev").await;481481+ knot.add_member(482482+ "",483483+ "",484484+ "",485485+ &sh_tangled::knot::Member::new(486486+ &did,487487+ knot.instance_ident(),488488+ OffsetDateTime::now_utc(),489489+ ),490490+ )491491+ .await492492+ .unwrap();493493+494494+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();495495+ assert_eq!(496496+ create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |claims| {497497+ //498498+ claims.iat = OffsetDateTime::now_utc().unix_timestamp() + 60;499499+ })500500+ .await501501+ .status(),502502+ StatusCode::FORBIDDEN,503503+ "iat > now => should be 403 Forbidden"504504+ );505505+506506+ assert!(!repo_exists_in_db(&knot, &did, &rkey).await);507507+ }508508+509509+ #[tokio::test]510510+ async fn rejects_auth_expired() {511511+ let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;512512+513513+ let did = Did::from_static(TEST_DID);514514+ pds.insert_identity(did, "tjh.dev").await;515515+ knot.add_member(516516+ "",517517+ "",518518+ "",519519+ &sh_tangled::knot::Member::new(520520+ &did,521521+ knot.instance_ident(),522522+ OffsetDateTime::now_utc(),523523+ ),524524+ )525525+ .await526526+ .unwrap();527527+528528+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();529529+ assert_eq!(530530+ create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |claims| {531531+ //532532+ claims.exp = OffsetDateTime::now_utc().unix_timestamp() - 1;533533+ })534534+ .await535535+ .status(),536536+ StatusCode::FORBIDDEN,537537+ "exp < now => should be 403 Forbidden"538538+ );539539+ }540540+541541+ #[tokio::test]542542+ async fn can_delete_repo() {543543+ let (base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;544544+545545+ let did = Did::from_static(TEST_DID);546546+ pds.insert_identity(did, "tjh.dev").await;547547+ knot.add_member(548548+ "",549549+ "",550550+ "",551551+ &sh_tangled::knot::Member::new(552552+ &did,553553+ knot.instance_ident(),554554+ OffsetDateTime::now_utc(),555555+ ),556556+ )557557+ .await558558+ .unwrap();559559+560560+ let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();561561+ let name = "another-test-repo";562562+ assert_eq!(563563+ create_repo(&knot, pds.clone(), did, &rkey, name, None)564564+ .await565565+ .status(),566566+ StatusCode::OK567567+ );568568+569569+ gix::open(base.path().join(did.as_str()).join(&rkey))570570+ .expect("new repository should exist");571571+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);572572+573573+ let delete = sh_tangled::repo::delete::Input {574574+ did: did.to_owned(),575575+ rkey: rkey.clone(),576576+ name: "another-test-repo".to_string(),577577+ };578578+579579+ // First check we cannot delete without auth.580580+ assert_eq!(581581+ public::router()582582+ .with_state(knot.clone())583583+ .oneshot(584584+ Request::builder()585585+ .method(Method::POST)586586+ .uri("/xrpc/sh.tangled.repo.delete")587587+ .header(header::CONTENT_TYPE, "application/json")588588+ .body(Body::new(serde_json::to_string(&delete).unwrap()))589589+ .expect("sh.tangled.repo.delete request"),590590+ )591591+ .await592592+ .expect("xrpc response")593593+ .status(),594594+ StatusCode::UNAUTHORIZED595595+ );596596+597597+ // Check repository has not been deleted.598598+ gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");599599+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);600600+601601+ // Or with the wrong lxm.602602+ let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {603603+ claims.lxm = Some(SH_TANGLED_REPO_CREATE.into_boxed());604604+ })605605+ .await;606606+607607+ assert_eq!(608608+ public::router()609609+ .with_state(knot.clone())610610+ .oneshot(611611+ Request::builder()612612+ .method(Method::POST)613613+ .uri("/xrpc/sh.tangled.repo.delete")614614+ .header(header::CONTENT_TYPE, "application/json")615615+ .header(header::AUTHORIZATION, auth)616616+ .body(Body::new(serde_json::to_string(&delete).unwrap()))617617+ .expect("sh.tangled.repo.delete request"),618618+ )619619+ .await620620+ .expect("xrpc response")621621+ .status(),622622+ StatusCode::FORBIDDEN623623+ );624624+625625+ // Check repository has not been deleted.626626+ gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");627627+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);628628+629629+ // Valid auth, empty request body.630630+ // Or with the wrong auth.631631+ let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {632632+ claims.lxm = Some(SH_TANGLED_REPO_DELETE.into_boxed());633633+ })634634+ .await;635635+ assert_eq!(636636+ public::router()637637+ .with_state(knot.clone())638638+ .oneshot(639639+ Request::builder()640640+ .method(Method::POST)641641+ .uri("/xrpc/sh.tangled.repo.delete")642642+ .header(header::CONTENT_TYPE, "application/json")643643+ .header(header::AUTHORIZATION, auth)644644+ .body(Body::empty())645645+ .expect("sh.tangled.repo.delete request"),646646+ )647647+ .await648648+ .expect("xrpc response")649649+ .status(),650650+ StatusCode::BAD_REQUEST651651+ );652652+653653+ // Check repository has not been deleted.654654+ gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");655655+ assert!(repo_exists_in_db(&knot, &did, &rkey).await);656656+657657+ // Or with the wrong auth.658658+ let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {659659+ claims.lxm = Some("sh.tangled.repo.delete".try_into().unwrap());660660+ })661661+ .await;662662+663663+ assert_eq!(664664+ public::router()665665+ .with_state(knot.clone())666666+ .oneshot(667667+ Request::builder()668668+ .method(Method::POST)669669+ .uri("/xrpc/sh.tangled.repo.delete")670670+ .header(header::CONTENT_TYPE, "application/json")671671+ .header(header::AUTHORIZATION, auth)672672+ .body(Body::new(serde_json::to_string(&delete).unwrap()))673673+ .expect("sh.tangled.repo.delete request"),674674+ )675675+ .await676676+ .expect("xrpc response")677677+ .status(),678678+ StatusCode::OK679679+ );680680+681681+ // Check repository has been deleted.682682+ gix::open(base.path().join(did.as_str()).join(&rkey))683683+ .expect_err("deleted repository should not exist");684684+ assert!(!repo_exists_in_db(&knot, &did, &rkey).await);685685+ }686686+ }687687+}
+290
crates/gordian-knot/src/main.rs
···11+mod cli;22+mod hooks;33+44+use anyhow::Context as _;55+use axum::http::{Request, Response};66+use futures_util::FutureExt as _;77+use gordian_knot::{88+ model::{Knot, KnotState, config::KnotConfiguration},99+ services::database::DataStore,1010+};1111+use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions};1212+use std::{env, ffi::OsStr, net::ToSocketAddrs as _, time::Duration};1313+use tokio::{net::TcpListener, signal::unix::SignalKind, task::JoinSet};1414+use tokio::{runtime::Builder, signal};1515+use tokio_util::sync::CancellationToken;1616+use tower::ServiceBuilder;1717+use tower_http::{1818+ ServiceBuilderExt as _,1919+ decompression::RequestDecompressionLayer,2020+ request_id::{MakeRequestUuid, RequestId},2121+ trace::{MakeSpan, OnResponse, TraceLayer},2222+};2323+use tracing::{Span, field::Empty, level_filters::LevelFilter};2424+use tracing_subscriber::{EnvFilter, layer::SubscriberExt as _, util::SubscriberInitExt as _};2525+2626+#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]2727+use tikv_jemallocator::Jemalloc;2828+2929+#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]3030+#[global_allocator]3131+static GLOBAL: Jemalloc = Jemalloc;3232+3333+const USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));3434+3535+fn main() -> anyhow::Result<()> {3636+ tracing_subscriber::registry()3737+ .with(3838+ EnvFilter::builder()3939+ .with_default_directive(LevelFilter::INFO.into())4040+ .from_env_lossy(),4141+ )4242+ .with(4343+ tracing_subscriber::fmt::layer()4444+ .with_writer(std::io::stderr)4545+ .without_time(),4646+ )4747+ .init();4848+4949+ let runtime = Builder::new_current_thread()5050+ .enable_all()5151+ .build()5252+ .expect("Failed to build runtime");5353+5454+ match cli::parse() {5555+ cli::KnotCommand::Generate(_) => unreachable!("Handled by cli module"),5656+ cli::KnotCommand::Serve(arguments) => runtime.block_on(knot_main(arguments)),5757+ cli::KnotCommand::Hook(arguments) => runtime.block_on(hooks::run_hook(arguments)),5858+ }5959+}6060+6161+pub async fn knot_main(arguments: cli::ServeArguments) -> anyhow::Result<()> {6262+ unsafe { env::set_var("GIT_CONFIG_GLOBAL", &arguments.git_config) };6363+6464+ let tempdir = tempfile::TempDir::with_prefix("gordian-knot-")?;6565+ let hooks_path = if let Some(path) = &arguments.hooks {6666+ // @TODO Verify hooks exist in the specified path.6767+ tracing::warn!(?path, "assuming existence of hooks at path");6868+ path.to_path_buf()6969+ } else {7070+ let path = tempdir.path().join("hooks");7171+ hooks::setup_global_hooks(&path)?;7272+ path7373+ };7474+7575+ assert!(git_config_global("core.hooksPath", &hooks_path)?);7676+ assert!(git_config_global("receive.advertisePushOptions", "true")?);7777+ if let Some(command) = &arguments.archive_bz2_command {7878+ assert!(git_config_global("tar.tar.bz2.command", command)?);7979+ }8080+ if let Some(command) = &arguments.archive_xz_command {8181+ assert!(git_config_global("tar.tar.xz.command", command)?);8282+ }8383+8484+ let database = {8585+ let pool = {8686+ let connect_options = SqliteConnectOptions::new()8787+ .filename(&arguments.db)8888+ .create_if_missing(true)8989+ .foreign_keys(true)9090+ .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal);9191+9292+ SqlitePoolOptions::new()9393+ .connect_with(connect_options)9494+ .await?9595+ };9696+9797+ sqlx::migrate!().run(&pool).await?;9898+ DataStore::new(pool)9999+ };100100+101101+ let public_http = reqwest::ClientBuilder::new()102102+ .timeout(Duration::from_secs(2))103103+ .user_agent(USER_AGENT)104104+ .http2_keep_alive_while_idle(true)105105+ .https_only(true)106106+ .build()107107+ .context("Failed to build public HTTP client")?;108108+109109+ let resolver = arguments.init_resolver(public_http.clone());110110+111111+ // Bind listeners for the public API.112112+ let mut public_listeners = Vec::with_capacity(arguments.bind.len());113113+ for addr in &arguments.bind {114114+ for socket in addr.to_socket_addrs()? {115115+ let listener = TcpListener::bind(socket).await?;116116+ public_listeners.push(listener);117117+ }118118+ }119119+120120+ // Bind listeners for the private API.121121+ let mut private_listeners = Vec::with_capacity(2);122122+ for socket in "localhost:0".to_socket_addrs()? {123123+ let listener = TcpListener::bind(socket).await?;124124+ private_listeners.push(listener);125125+ }126126+127127+ // The knot needs to know the sockets we've bound the private API.128128+ let private_addrs = private_listeners129129+ .iter()130130+ .map(tokio::net::TcpListener::local_addr)131131+ .collect::<Result<Vec<_>, std::io::Error>>()?;132132+133133+ tracing::info!(?private_addrs, "bound internal API");134134+135135+ let config: KnotConfiguration = arguments.to_knot_config()?;136136+ let knot_state = KnotState::new(config, resolver, public_http, database, &private_addrs)?;137137+ let knot = Knot::from(knot_state);138138+139139+ // Ensure the knot owner's records are seeded.140140+ knot.seed_owner()141141+ .await142142+ .context("seeding knot owner's records")?;143143+144144+ let mut tasks = JoinSet::new();145145+ let shutdown = CancellationToken::new();146146+147147+ // Spawn the internal API.148148+ tasks.spawn(gordian_knot::serve_all(149149+ gordian_knot::private::router()150150+ .layer(151151+ ServiceBuilder::new()152152+ .set_x_request_id(MakeRequestUuid)153153+ .layer(154154+ TraceLayer::new_for_http()155155+ .make_span_with(PrivateHttpSpan)156156+ .on_request(|_: &Request<_>, _: &Span| {})157157+ .on_response(TraceResponse),158158+ )159159+ .propagate_x_request_id(),160160+ )161161+ .with_state(knot.clone()),162162+ private_listeners,163163+ shutdown.child_token(),164164+ ));165165+166166+ // Spawn the jetstream consumer.167167+ tasks.spawn(168168+ gordian_knot::services::jetstream::init_consumer(169169+ &knot,170170+ arguments.jetstream.as_slice(),171171+ shutdown.child_token(),172172+ )173173+ .map(|_| Ok(())),174174+ );175175+176176+ // Build the public API.177177+ let router = gordian_knot::public::router()178178+ .layer(RequestDecompressionLayer::new())179179+ .layer(180180+ ServiceBuilder::new()181181+ .set_x_request_id(MakeRequestUuid)182182+ .layer(183183+ TraceLayer::new_for_http()184184+ .make_span_with(PublicHttpSpan)185185+ .on_request(|_: &Request<_>, _: &Span| {})186186+ .on_response(TraceResponse),187187+ )188188+ .propagate_x_request_id(),189189+ )190190+ .with_state(knot);191191+192192+ tasks.spawn(gordian_knot::serve_all(193193+ router,194194+ public_listeners,195195+ shutdown.child_token(),196196+ ));197197+198198+ tasks.spawn(wait_for_shutdown(shutdown));199199+200200+ for task in tasks.join_all().await {201201+ if let Err(error) = task {202202+ tracing::error!(?error, "knot task completed with error");203203+ }204204+ }205205+206206+ Ok(())207207+}208208+209209+async fn wait_for_shutdown(shutdown: CancellationToken) -> std::io::Result<()> {210210+ let mut sigterm = signal::unix::signal(SignalKind::terminate())?;211211+212212+ tokio::select! {213213+ Ok(()) = signal::ctrl_c() => {214214+ eprintln!();215215+ tracing::info!("ctrl+c received, shutting down ...");216216+ },217217+ Some(()) = sigterm.recv() => {218218+ tracing::info!("SIGTERM received, shutting down ...");219219+ }220220+ }221221+222222+ shutdown.cancel();223223+224224+ Ok(())225225+}226226+227227+fn git_config_global<K, V>(key: K, value: V) -> std::io::Result<bool>228228+where229229+ K: AsRef<OsStr>,230230+ V: AsRef<OsStr>,231231+{232232+ use std::process::Stdio;233233+234234+ let success = std::process::Command::new("/usr/bin/git")235235+ .args(["config", "set", "--global"])236236+ .arg(key)237237+ .arg(value)238238+ .stdout(Stdio::inherit())239239+ .stderr(Stdio::inherit())240240+ .spawn()?241241+ .wait()?242242+ .success();243243+244244+ Ok(success)245245+}246246+247247+macro_rules! make_span {248248+ ($name:ident, $label:literal) => {249249+ #[derive(Clone)]250250+ struct $name;251251+252252+ impl<B> MakeSpan<B> for $name {253253+ fn make_span(&mut self, request: &axum::http::Request<B>) -> tracing::Span {254254+ let method = request.method();255255+ let path = request.uri().path();256256+257257+ let span = tracing::error_span!($label, id = Empty, method = Empty, path = Empty);258258+ if let Some(id) = request259259+ .extensions()260260+ .get::<RequestId>()261261+ .and_then(|request_id| request_id.header_value().to_str().ok())262262+ {263263+ span.record("id", &id);264264+ }265265+266266+ span.record("method", tracing::field::debug(&method));267267+ span.record("path", tracing::field::debug(&path));268268+269269+ span270270+ }271271+ }272272+ };273273+}274274+275275+make_span!(PublicHttpSpan, "public");276276+make_span!(PrivateHttpSpan, "private");277277+278278+#[derive(Clone)]279279+pub struct TraceResponse;280280+281281+impl<B> OnResponse<B> for TraceResponse {282282+ fn on_response(self, response: &Response<B>, latency: Duration, _: &Span) {283283+ match response.status() {284284+ status if status.is_success() => tracing::trace!(?status, ?latency),285285+ status if status.is_client_error() => tracing::warn!(?status, ?latency),286286+ status if status.is_server_error() => tracing::error!(?status, ?latency),287287+ status => tracing::info!(?status, ?latency),288288+ }289289+ }290290+}
+35
crates/gordian-knot/src/mock.rs
···11+use crate::{22+ model::{Knot, config::KnotConfiguration},33+ services::database::DataStore,44+};55+use gordian_identity::Resolver;66+use gordian_types::OwnedDid;77+88+pub async fn setup(99+ owner_did: &str,1010+ instance_name: &str,1111+) -> (tempfile::TempDir, mock_pds::Pds, Knot) {1212+ let base = tempfile::tempdir().expect("temporary directory");1313+ let pool = sqlx::SqlitePool::connect("sqlite://:memory:")1414+ .await1515+ .unwrap();1616+1717+ sqlx::migrate!().run(&pool).await.unwrap();1818+1919+ let (pds, listener) = mock_pds::init().await;2020+ let pds_api = mock_pds::router(pds.clone());2121+ tokio::spawn(async move {2222+ axum::serve(listener, pds_api).await.unwrap();2323+ });2424+2525+ let owner_did = OwnedDid::parse(owner_did).expect("owner DID must be valid");2626+ let instance = OwnedDid::parse(format!("did:web:{instance_name}"))2727+ .expect("instance name should form a valid DID");2828+2929+ let database = DataStore::new(pool);3030+ let resolver = Resolver::new(pds.clone());3131+ let config = KnotConfiguration::new(owner_did.clone(), instance, base.path());3232+ let knot = Knot::new(config, resolver, reqwest::Client::new(), database, []).unwrap();3333+3434+ (base, pds, knot)3535+}
+260
crates/gordian-knot/src/model.rs
···11+pub mod config;22+pub mod convert;33+pub mod errors;44+pub mod knot_state;55+pub mod nicediff;66+pub mod repository;77+88+use core::ops;99+use std::{borrow::Cow, ffi::OsString, net::SocketAddr, sync::Arc};1010+1111+use axum::{1212+ extract::{FromRef, FromRequestParts, OptionalFromRequestParts},1313+ http::request::Parts,1414+};1515+use futures_util::future::BoxFuture;1616+use git_service::{state::GitServiceState, util::SetOptionEnv as _};1717+use gordian_auth::jwt;1818+use gordian_identity::{HttpClient, Resolver};1919+use gordian_lexicon::sh_tangled::knot::Member;2020+use gordian_types::Tid;2121+use time::OffsetDateTime;2222+use tokio::process::Command;2323+2424+use crate::{2525+ extractors::request_id::RequestId,2626+ model::{config::KnotConfiguration, repository::TangledRepository},2727+ private,2828+ public::git::{Error, GitAuthorization},2929+ services::{3030+ authorization::{AuthorizationClaimsStore, AuthorizationClaimsStoreError},3131+ database::DataStore,3232+ },3333+};3434+3535+pub use knot_state::KnotState;3636+3737+#[derive(Debug, Clone)]3838+#[repr(transparent)]3939+pub struct Knot {4040+ inner: Arc<KnotState>,4141+}4242+4343+impl From<Arc<KnotState>> for Knot {4444+ #[inline]4545+ fn from(inner: Arc<KnotState>) -> Self {4646+ Self { inner }4747+ }4848+}4949+5050+impl FromRef<Knot> for Resolver {5151+ #[inline]5252+ fn from_ref(input: &Knot) -> Self {5353+ input.resolver().clone()5454+ }5555+}5656+5757+impl ops::Deref for Knot {5858+ type Target = KnotState;5959+ #[inline]6060+ fn deref(&self) -> &Self::Target {6161+ &self.inner6262+ }6363+}6464+6565+impl Knot {6666+ pub fn new<'a>(6767+ config: KnotConfiguration,6868+ resolver: Resolver,6969+ http: HttpClient,7070+ database: DataStore,7171+ private_binds: impl IntoIterator<Item = &'a SocketAddr>,7272+ ) -> std::io::Result<Self> {7373+ let inner = KnotState::new(config, resolver, http, database, private_binds)?;7474+ Ok(Self { inner })7575+ }7676+7777+ pub async fn add_member(7878+ &self,7979+ rkey: &str,8080+ rev: &str,8181+ cid: &str,8282+ member: &Member<'_>,8383+ ) -> anyhow::Result<()> {8484+ let new_member = self8585+ .database()8686+ .upsert_knot_member(rkey, rev, cid, member)8787+ .await?;8888+8989+ if new_member {9090+ tracing::info!(member = %member.subject, "new knot member");9191+ crate::services::seed::public_keys(self, &member.subject).await?;9292+ crate::services::seed::repositories(self, &member.subject).await?;9393+ }9494+9595+ Ok(())9696+ }9797+9898+ pub async fn seed_owner(&self) -> anyhow::Result<()> {9999+ self.add_member(100100+ "",101101+ &Tid::MAX.to_string(),102102+ "",103103+ &Member {104104+ subject: Cow::Borrowed(self.owner()),105105+ domain: Cow::Borrowed(self.instance_ident()),106106+ created_at: OffsetDateTime::now_utc(),107107+ },108108+ )109109+ .await110110+ }111111+}112112+113113+impl AuthorizationClaimsStore<jwt::Claims> for Knot {114114+ fn get_unexpired_claims<'a: 'b, 'b>(115115+ &'a self,116116+ jti: &'b str,117117+ now: i64,118118+ ) -> BoxFuture<'b, Result<Option<jwt::Claims>, AuthorizationClaimsStoreError>> {119119+ self.inner.get_unexpired_claims(jti, now)120120+ }121121+122122+ fn store_claims(123123+ &self,124124+ claims: jwt::Claims,125125+ now: i64,126126+ ) -> BoxFuture<'_, Result<(), AuthorizationClaimsStoreError>> {127127+ self.inner.store_claims(claims, now)128128+ }129129+}130130+131131+impl GitServiceState for Knot {132132+ type Rejection = Error;133133+134134+ async fn init_upload_archive(&self, parts: &mut Parts) -> Result<Command, Self::Rejection> {135135+ let request_id = RequestId::from_request_parts(parts, self).await.unwrap();136136+ let repository = TangledRepository::from_git_request(parts, self).await?;137137+ let mut command = repository.git();138138+ command139139+ .option_env("X_REQUEST_ID", request_id)140140+ .args(["upload-archive"])141141+ .arg(repository.path());142142+143143+ Ok(command.into())144144+ }145145+146146+ async fn init_upload_pack_advertisement(147147+ &self,148148+ parts: &mut Parts,149149+ ) -> Result<tokio::process::Command, Self::Rejection> {150150+ let request_id = RequestId::from_request_parts(parts, self).await.unwrap();151151+ let repository = TangledRepository::from_git_request(parts, self).await?;152152+ let mut command = repository.git();153153+ command154154+ .option_env("X_REQUEST_ID", request_id)155155+ .args([156156+ "upload-pack",157157+ "--http-backend-info-refs",158158+ "--stateless-rpc",159159+ "--strict",160160+ "--timeout=10",161161+ ])162162+ .arg(repository.path());163163+164164+ Ok(command.into())165165+ }166166+167167+ async fn init_upload_pack(168168+ &self,169169+ parts: &mut Parts,170170+ ) -> Result<tokio::process::Command, Self::Rejection> {171171+ let request_id = RequestId::from_request_parts(parts, self).await.unwrap();172172+ let repository = TangledRepository::from_git_request(parts, self).await?;173173+ let mut command = repository.git();174174+ command175175+ .option_env("X_REQUEST_ID", request_id)176176+ .args(["upload-pack", "--strict", "--stateless-rpc"])177177+ .arg(repository.path());178178+179179+ Ok(command.into())180180+ }181181+182182+ async fn init_receive_pack_advertisement(183183+ &self,184184+ parts: &mut Parts,185185+ ) -> Result<tokio::process::Command, Self::Rejection> {186186+ let GitAuthorization(auth) = GitAuthorization::from_request_parts(parts, self).await?;187187+ let request_id = RequestId::from_request_parts(parts, self).await.unwrap();188188+ let repository = TangledRepository::from_git_request(parts, self).await?;189189+190190+ if !self.can_push(repository.repository_key(), &auth.iss).await {191191+ tracing::error!(did = %auth.iss, "push denied");192192+ return Err(Error::forbidden(193193+ self,194194+ format!(195195+ "'{}' does not have permission to push to this repository",196196+ auth.iss197197+ ),198198+ ))?;199199+ }200200+201201+ let nonce_seed = self.generate_push_seed(repository.repository_key());202202+ let mut command = repository.git();203203+ command204204+ .env(private::ENV_USER_DID, auth.iss.as_str())205205+ .option_env("X_REQUEST_ID", request_id)206206+ .args([207207+ "-c",208208+ &nonce_seed,209209+ "receive-pack",210210+ "--http-backend-info-refs",211211+ "--stateless-rpc",212212+ ])213213+ .arg(repository.path());214214+215215+ Ok(command.into())216216+ }217217+218218+ async fn init_receive_pack(219219+ &self,220220+ parts: &mut Parts,221221+ ) -> Result<tokio::process::Command, Self::Rejection> {222222+ let GitAuthorization(auth) = GitAuthorization::from_request_parts(parts, self).await?;223223+ let request_id = RequestId::from_request_parts(parts, self).await.unwrap();224224+ let repository = TangledRepository::from_git_request(parts, self).await?;225225+226226+ if !self.can_push(repository.repository_key(), &auth.iss).await {227227+ tracing::error!(did = %auth.iss, "push denied");228228+ return Err(Error::forbidden(229229+ self,230230+ format!(231231+ "'{}' does not have permission to push to this repository",232232+ auth.iss233233+ ),234234+ ))?;235235+ }236236+237237+ let allowed_signers_path = std::env::current_dir()238238+ .unwrap()239239+ .join("allowed_signers")240240+ .join(auth.iss.as_str());241241+242242+ let mut allowed_signers_option = OsString::with_capacity(243243+ "gpg.ssh.allowedSignersFile=".len() + allowed_signers_path.as_os_str().len(),244244+ );245245+ allowed_signers_option.push("gpg.ssh.allowedSignersFile=");246246+ allowed_signers_option.push(&allowed_signers_path);247247+248248+ let nonce_seed = self.generate_push_seed(repository.repository_key());249249+ let mut command = repository.git();250250+ command251251+ .env(private::ENV_USER_DID, auth.iss.as_str())252252+ .option_env("X_REQUEST_ID", request_id)253253+ .args(["-c", &nonce_seed, "-c"])254254+ .arg(&allowed_signers_option)255255+ .args(["receive-pack", "--stateless-rpc"])256256+ .arg(repository.path());257257+258258+ Ok(command.into())259259+ }260260+}
···11+use axum::{22+ extract::{FromRef, FromRequestParts},33+ http::{header::AUTHORIZATION, request::Parts},44+};55+use gordian_auth::{66+ IntoVerificationKey, OpenSshKey,77+ jwt::{Claims, Token, decode},88+};99+use gordian_identity::Resolver;1010+use gordian_types::Nsid;1111+use time::OffsetDateTime;1212+1313+use crate::{1414+ model::Knot,1515+ nsid::SH_TANGLED_REPO_GITRECEIVEPACK,1616+ services::authorization::{1717+ AuthorizationClaimsStore as _, Verification, VerificationError, extract_token,1818+ },1919+};2020+2121+use super::Error;2222+2323+#[derive(Debug)]2424+struct GitVerification;2525+2626+impl Verification for GitVerification {2727+ const LEXICON_METHOD: &'static Nsid = SH_TANGLED_REPO_GITRECEIVEPACK;2828+}2929+3030+#[derive(Clone, Debug)]3131+pub struct GitAuthorization(pub Claims);3232+3333+impl<S: Sync> FromRequestParts<S> for GitAuthorization3434+where3535+ Knot: FromRef<S>,3636+ Resolver: FromRef<S>,3737+{3838+ type Rejection = Error;3939+4040+ async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {4141+ let knot = Knot::from_ref(state);4242+ let resolver = Resolver::from_ref(state);4343+ let now = OffsetDateTime::now_utc().unix_timestamp();4444+4545+ let credential = extract_token(parts, AUTHORIZATION, "bearer").ok_or(4646+ Error::unauthorized(&knot, "inter-service authorization required"),4747+ )?;4848+4949+ let unverified_token = Token::decode_unverified(credential)5050+ .map_err(|_| Error::unauthorized(&knot, "inter-service authorization required"))?;5151+5252+ // Before performing a relatively expensive DID look-up, ensure the token5353+ // claims are valid.5454+ let unverified_claims = unverified_token.claims;5555+ GitVerification::verify(&knot, now, knot.instance(), &unverified_claims)5656+ .await5757+ .map_err(|error| match error {5858+ // Git re-uses the token from the credential helper for each request in a single push.5959+ //6060+ // Returning 'Forbidden' here will make git abort. Instead, we return an Unauthorized6161+ // which will force git to get a new token from the credential helper.6262+ VerificationError::Reused => Error::unauthorized(&knot, "authorization re-used"),6363+ error => Error::forbidden(&knot, error.to_string()),6464+ })?;6565+6666+ // Resolve the DID document for the claimed issuer, extract and parse6767+ // the verification methods into public keys.6868+6969+ let (resolved_did, doc) = resolver7070+ .resolve(unverified_claims.iss.as_str())7171+ .await7272+ .map_err(|error| Error::forbidden(&knot, error.to_string()))?;7373+7474+ assert_eq!(unverified_claims.iss, resolved_did);7575+7676+ let verification_keys = doc7777+ .verification_method7878+ .into_iter()7979+ .filter_map(|vm| vm.into_verification_key().ok());8080+8181+ // Try to decode and verify the JWT using any one of the verification keys8282+ // we have for the DID.8383+ for verification_key in verification_keys {8484+ if let Ok(token) = decode::<Claims>(credential, &verification_key) {8585+ // Store the JWT so it cannot be re-used within the claim period.8686+ knot.store_claims(token.claims.clone(), now).await?;8787+ return Ok(Self(token.claims));8888+ }8989+ }9090+9191+ // Read the 'sh.tangled.publicKey' records the knot has associated9292+ // with claimed issuer.9393+ let public_keys = knot9494+ .database()9595+ .public_keys_for_did(&unverified_claims.iss)9696+ .await9797+ .unwrap_or_default()9898+ .into_iter()9999+ .filter_map(|public_key| OpenSshKey(public_key.key).into_verification_key().ok());100100+101101+ // Try to decode and verify the JWT using any one of the public keys102102+ // we have for the DID.103103+ for verification_key in public_keys {104104+ if let Ok(token) = decode::<Claims>(credential, &verification_key) {105105+ // Store the JWT so it cannot be re-used within the claim period.106106+ knot.store_claims(token.claims.clone(), now).await?;107107+ return Ok(Self(token.claims));108108+ }109109+ }110110+111111+ Err(Error::forbidden(&knot, "No valid authorization found"))?112112+ }113113+}
···11+pub mod list_records {22+ //!33+ //! List a range of records in a repository, matching a specific44+ //! collection. Does not require auth.55+ //!66+ //! <https://docs.bsky.app/docs/api/com-atproto-repo-list-records>77+ //!88+ use gordian_types::RecordUri;99+1010+ #[derive(Debug, serde::Deserialize, serde::Serialize)]1111+ pub struct Input {1212+ /// The handle or DID of the repo.1313+ pub repo: String,1414+1515+ /// The NSID of the record type.1616+ pub collection: String,1717+1818+ /// The number of records to return.1919+ ///2020+ /// Possible values: 0..=100.2121+ #[serde(skip_serializing_if = "Option::is_none")]2222+ pub limit: Option<usize>,2323+2424+ #[serde(skip_serializing_if = "Option::is_none")]2525+ pub cursor: Option<String>,2626+2727+ /// Flag to reverse the order of the returned records.2828+ #[serde(default)]2929+ pub reverse: bool,3030+ }3131+3232+ #[derive(Debug, serde::Deserialize, serde::Serialize)]3333+ pub struct Output {3434+ pub cursor: Option<String>,3535+3636+ pub records: Vec<Record>,3737+ }3838+3939+ #[derive(Debug, serde::Deserialize, serde::Serialize)]4040+ pub struct Record {4141+ pub uri: RecordUri,4242+4343+ pub cid: String,4444+4545+ pub value: Box<serde_json::value::RawValue>,4646+ }4747+}
···11-[package]22-name = "knot"33-description = "An alternative Tangled knot-server"44-version.workspace = true55-authors.workspace = true66-repository.workspace = true77-license.workspace = true88-edition.workspace = true99-publish.workspace = true1010-1111-[dependencies]1212-atproto = { workspace = true, features = ["sqlx", "time"] }1313-auth.workspace = true1414-identity.workspace = true1515-jetstream.workspace = true1616-lexicon.workspace = true1717-git-service.workspace = true1818-1919-anyhow.workspace = true2020-gix.workspace = true2121-reqwest.workspace = true2222-serde.workspace = true2323-serde_json.workspace = true2424-thiserror.workspace = true2525-tracing.workspace = true2626-url.workspace = true2727-2828-aws-lc-rs = { version = "1.14.1", default-features = false, features = ["alloc", "aws-lc-sys"] }2929-axum = { workspace = true, features = ["ws"] }3030-axum-extra = { version = "0.12.1", features = ["async-read-body"] }3131-bytes = "1.10.1"3232-clap = { version = "4.5.47", features = ["derive", "env", "string"] }3333-data-encoding.workspace = true3434-futures-util = "0.3.31"3535-hyper-util = { version = "0.1.17", features = ["client"] }3636-mimetype-detector = "0.3.4"3737-moka = { version = "0.12.12", features = ["future"] }3838-rand = "0.9.2"3939-rayon = "1.11.0"4040-rustc-hash = "2.1.1"4141-time.workspace = true4242-sqlx = { version = "0.8.6", features = ["runtime-tokio", "sqlite", "time", "json", "macros", "derive"] }4343-tempfile = "3.24.0"4444-tokio = { version = "1.47.1", features = ["io-util", "macros", "net", "process", "signal", "rt-multi-thread"] }4545-tokio-rayon = "2.1.0"4646-tokio-stream = { version = "0.1.17", features = ["time"] }4747-tokio-tungstenite = "0.28.0"4848-tokio-util = "0.7.18"4949-tower = { version = "0.5.2", features = ["buffer", "filter", "limit"] }5050-tower-http = { version = "0.6.6", features = ["decompression-gzip", "request-id", "trace", "tracing", "util"] }5151-tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }5252-dashmap = "6.1.0"5353-mock-pds = { version = "0.0.0", path = "../mock-pds" }5454-clap_complete = "4.5.65"5555-5656-[dev-dependencies]5757-http-body-util = "0.1.3"5858-multibase = "0.9.1"5959-6060-[target.'cfg(not(target_env = "msvc"))'.dependencies]6161-tikv-jemallocator = { version = "0.6.1", optional = true }6262-6363-[features]6464-default = ["jemalloc"]6565-jemalloc = ["dep:tikv-jemallocator"]
-84
crates/knot/README.md
···11-# gordian-knot22-33-a blazingly fast 🚀 and memory-efficient [knot server](https://tangled.org/tangled.org/core/tree/master/knotserver).44-55-⚠️ work in progress. this code is full of jank.66-77-## progress88-99-xrpc api:1010-1111-| status | lexicon method | notes |1212-| :----: | :--------------------------------- | :-- |1313-| ✅ | `sh.tangled.owner` | |1414-| ⚫️ | ~~sh.tangled.knot.listKeys~~ | i have no intention of implementing this. |1515-| ✅ | `sh.tangled.knot.version` | |1616-| ✅ | `sh.tangled.repo.archive` | |1717-| ✅ | `sh.tangled.repo.blob` | |1818-| ✅ | `sh.tangled.repo.branch` | ignores `shortHash` parameter |1919-| ✅ | `sh.tangled.repo.branches` | |2020-| 🔶 | `sh.tangled.repo.compare` | seems to work, but only computes `patch` field 😅 |2121-| ✅ | `sh.tangled.repo.create` | |2222-| ✅ | `sh.tangled.repo.delete` | |2323-| ❌ | `sh.tangled.repo.deleteBranch` | |2424-| ✅ | `sh.tangled.repo.diff` | |2525-| ❌ | `sh.tangled.repo.forkStatus` | |2626-| ❌ | `sh.tangled.repo.forkSync` | |2727-| ✅ | `sh.tangled.repo.getDefaultBranch` | |2828-| ❌ | `sh.tangled.repo.hiddenRef` | |2929-| ✅ | `sh.tangled.repo.mergeCheck` | |3030-| ❌ | `sh.tangled.repo.merge` | |3131-| ❌ | `sh.tangled.repo.languages` | |3232-| ✅ | `sh.tangled.repo.log` | |3333-| ✅ | `sh.tangled.repo.setDefaultBranch` | |3434-| ✅ | `sh.tangled.repo.tags` | |3535-| 🔶 | `sh.tangled.repo.tree` | cheats by not computing most recent commit 🚀 |3636-3737-jetstream ingest:3838-3939-- [x] `sh.tangled.knot.member`4040-- [x] `sh.tangled.publicKey`4141-- [x] `sh.tangled.repo`4242-- [x] `sh.tangled.repo.collaborator`4343-4444-`git` services:4545-4646-- [x] `git-archive`4747-- [x] `git-receive-pack`4848-- [x] `git-upload-pack`4949-5050-5151-`/events`:5252-5353-`sh.tangled.repo.refUpdate` events are generated, but pipelines won’t run because i don’t emit any `sh.tangled.repo.pipeline` events.5454-5555-`sh.tangled.repo.refUpdate` does not compute language breakdown.5656-5757-## differences to the real knot server5858-5959-### `git` transport6060-6161-`ssh` transport for `git` operations is currently unsupported.6262-6363-`http` transport is supported for all `git` services (`git fetch`, `git pull`, `git push`, and `git archive`).6464-6565-`git push` operations are authorized using signed [service-auth](https://atproto.com/specs/xrpc#inter-service-authentication-jwt) tokens, which may be validated against an atproto signing-key or a supported `sh.tangled.publicKey` for the corresponding identity.6666-6767-a `git` credential helper is required to generate such tokens. a slightly dodgy one may be found in [crates/credential-helper](../credential-helper), which uses `ssh-agent` to create a signed jwt.6868-6969-### signed git pushes7070-7171-`knot` expects all `git push` operations to be cryptographically signed.7272-7373-with [tangled](https://tangled.org), `git` operations over `http` are expected to be proxied through an appview to the knot. a sufficiently malicious appview could theoretically modify the content of the push inflight. to prevent inflight modification, `knot` requires pushes to be accompanied by push certificate signed by a key from a `sh.tangled.publicKey` record associated with the authorized identity.7474-7575-cf:7676-- <https://git-scm.com/docs/git-push#documentation/git-push.txt---signed>7777-- <https://git-scm.com/docs/githooks/2.27.0#pre-receive>7878-- <https://git-scm.com/docs/git-receive-pack#_pre_receive_hook>7979-8080-this requirement can be disabled by the knot operator with the `--require-signed-push=false` argument.8181-8282-### `sh.tangled.repo.delete`8383-8484-`knot` doesn’t delete repositories, but instead archives them in a `deleted/` directory.
···11-use std::{22- collections::HashMap,33- env,44- fs::{self, Permissions},55- io::{self, Write},66- os::unix::fs::PermissionsExt,77- path::Path,88-};99-1010-use axum::http::{HeaderMap, HeaderName, HeaderValue, header::InvalidHeaderName};1111-use bytes::Bytes;1212-use knot::private;1313-1414-use crate::cli::{HookArguments, HookName};1515-1616-/// Setup the global hooks directory at `path`.1717-pub fn setup_global_hooks<P: AsRef<Path>>(path: P) -> io::Result<()> {1818- let executable = env::current_exe()1919- .map(|path| path.to_str().map(ToOwned::to_owned))2020- .expect("Current executable must be defined")2121- .expect("Current executable must be valid utf8");2222-2323- let _ = fs::create_dir_all(&path);2424- for hook_name in HookName::iter_variants() {2525- let hook_path = path.as_ref().join(hook_name);2626- let script = format!(2727- "#!/usr/bin/sh\n# This file is generated by gordian-knot. Do not modify.\n{executable} hook {hook_name}\n"2828- );2929- std::fs::write(&hook_path, script)?;3030-3131- let permissions = Permissions::from_mode(0o755);3232- std::fs::set_permissions(&hook_path, permissions)?;3333- tracing::info!(?executable, ?hook_path, "git hook installed");3434- }3535- Ok(())3636-}3737-3838-/// [`core::fmt::Debug`] an [`url::Url`] without causing eye-cancer.3939-#[repr(transparent)]4040-struct DebugUrl(url::Url);4141-4242-impl core::fmt::Debug for DebugUrl {4343- fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {4444- core::fmt::Display::fmt(&self.0, f)4545- }4646-}4747-4848-/// [`core::fmt::Debug`] a slice [`url::Url`] without causing eye-cancer.4949-pub struct DebugUrls<'a>(pub &'a [url::Url]);5050-5151-impl<'a> core::fmt::Debug for DebugUrls<'a> {5252- fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {5353- let urls = unsafe {5454- // SAFETY: Close your eyes an pray!5555- &*(self.0 as *const [url::Url] as *const [DebugUrl])5656- };5757- core::fmt::Debug::fmt(&urls, f)5858- }5959-}6060-6161-#[tracing::instrument(fields(api = ?DebugUrls(&api)))]6262-pub async fn run_hook(6363- HookArguments {6464- api,6565- repo_did,6666- repo_rkey,6767- hook,6868- }: HookArguments,6969-) -> anyhow::Result<()> {7070- if api.is_empty() {7171- tracing::warn!("internal API not specified, skipping hook");7272- return Ok(());7373- };7474-7575- let mut environment_vars: HashMap<_, _> = env::vars()7676- .filter(|(key, _)| !key.trim().is_empty())7777- .collect();7878-7979- let request_id = take_var(&mut environment_vars, "X_REQUEST_ID").ok();8080-8181- // Build a header map with the remaining environment variables.8282- let mut headers = HeaderMap::with_capacity(environment_vars.len());8383- if let Some(request_id) = request_id {8484- headers.insert("X-Request-ID", HeaderValue::from_str(&request_id)?);8585- }8686-8787- for (key, value) in environment_vars {8888- match (variable_to_header_name(&key), HeaderValue::try_from(&value)) {8989- (Ok(key), Ok(value)) => _ = headers.insert(key, value),9090- (Err(error), _) => tracing::warn!(?error, ?key, ?value, "ignoring header"),9191- (_, Err(error)) => tracing::warn!(?error, ?key, ?value, "ignoring header"),9292- }9393- }9494-9595- let stdin = Bytes::from(io::read_to_string(io::stdin())?);9696-9797- let client = reqwest::Client::new();9898- let url_path = format!("/hook/{repo_did}/{repo_rkey}/{hook}");9999- for mut hook_url in api {100100- hook_url.set_path(&url_path);101101- let response = client102102- .post(hook_url)103103- .headers(headers.clone())104104- .body(stdin.clone())105105- .send()106106- .await;107107-108108- match response {109109- Ok(response) if response.status().is_success() => {110110- let body = response.bytes().await?;111111- io::stdout().write_all(&body)?;112112- return Ok(());113113- }114114- Ok(response) => {115115- let status = response.status();116116- let body = response.bytes().await?;117117- io::stdout().write_all(&body)?;118118- return Err(anyhow::anyhow!("Knot returned error status {status}"));119119- }120120- Err(error) => {121121- tracing::error!(?error, "failed to post hook to internal API");122122- continue;123123- }124124- }125125- }126126-127127- Err(anyhow::anyhow!("Failed to find a valid internal endpoint"))128128-}129129-130130-fn take_var(vars: &mut HashMap<String, String>, name: &str) -> anyhow::Result<String> {131131- vars.remove(name).ok_or(anyhow::anyhow!(132132- "Expected environment variable {name:?} to be set",133133- ))134134-}135135-136136-fn variable_to_header_name(name: &str) -> Result<HeaderName, InvalidHeaderName> {137137- format!(138138- "{}-{}",139139- private::ENV_HEADER_PREFIX,140140- name.trim_start_matches("GORDIAN_")141141- )142142- .replace('_', "-")143143- .to_lowercase()144144- .try_into()145145-}
-684
crates/knot/src/lib.rs
···11-use std::io;22-33-use atproto::Nsid;44-use axum::Router;55-use tokio::{net::TcpListener, task::JoinSet};66-use tokio_util::sync::CancellationToken;77-88-pub mod extractors;99-pub mod model;1010-pub mod private;1111-pub mod public;1212-pub mod services;1313-pub mod sync;1414-pub mod types;1515-mod util;1616-1717-#[cfg(test)]1818-pub(crate) mod mock;1919-2020-pub mod nsid {2121- use atproto::Nsid;2222-2323- macro_rules! nsid {2424- ($nsid:literal) => {2525- unsafe { Nsid::from_static_unchecked($nsid) }2626- };2727- }2828-2929- pub const SH_TANGLED_KNOT_MEMBER: &Nsid = nsid!("sh.tangled.knot.member");3030- pub const SH_TANGLED_PUBLICKEY: &Nsid = nsid!("sh.tangled.publicKey");3131- pub const SH_TANGLED_REPO: &Nsid = nsid!("sh.tangled.repo");3232- pub const SH_TANGLED_REPO_COLLABORATOR: &Nsid = nsid!("sh.tangled.repo.collaborator");3333- pub const SH_TANGLED_REPO_CREATE: &Nsid = nsid!("sh.tangled.repo.create");3434- pub const SH_TANGLED_REPO_DELETE: &Nsid = nsid!("sh.tangled.repo.delete");3535- pub const SH_TANGLED_REPO_GITRECEIVEPACK: &Nsid = nsid!("sh.tangled.repo.gitReceivePack");3636- pub const SH_TANGLED_REPO_SETDEFAULTBRANCH: &Nsid = nsid!("sh.tangled.repo.setDefaultBranch");3737-}3838-3939-/// NSIDs of interest to a knot server.4040-pub const NSIDS: &[&Nsid] = {4141- &[4242- nsid::SH_TANGLED_KNOT_MEMBER,4343- nsid::SH_TANGLED_PUBLICKEY,4444- nsid::SH_TANGLED_REPO,4545- nsid::SH_TANGLED_REPO_COLLABORATOR,4646- ]4747-};4848-4949-pub async fn serve_all(5050- router: Router,5151- listeners: impl IntoIterator<Item = TcpListener>,5252- shutdown: CancellationToken,5353-) -> io::Result<()> {5454- let mut service = JoinSet::new();5555- for listener in listeners {5656- let router = router.clone();5757- let addr = listener.local_addr()?;5858- tracing::info!(?addr, "listening on socket");5959-6060- let shutdown = shutdown.child_token();6161- service.spawn(async move {6262- axum::serve(listener, router)6363- .with_graceful_shutdown(async move { shutdown.cancelled().await })6464- .await6565- });6666- }6767-6868- for task in service.join_all().await {6969- task?;7070- }7171-7272- Ok(())7373-}7474-7575-#[cfg(test)]7676-mod tests {7777- use atproto::{Did, tid::Tid};7878- use auth::jwt::Claims;7979-8080- use axum::{8181- body::Body,8282- http::{Request, StatusCode},8383- };8484- use time::{OffsetDateTime, format_description::well_known::Rfc3339};8585- use tower::ServiceExt;8686-8787- use crate::model::Knot;8888-8989- const TEST_DID: &str = "did:plc:65gha4t3avpfpzmvpbwovss7";9090- const TEST_INSTANCE: &str = "lib-knot-test";9191-9292- fn get(uri: &str) -> Request<Body> {9393- Request::builder().uri(uri).body(Body::empty()).unwrap()9494- }9595-9696- #[tokio::test]9797- async fn can_query_knot_owner() {9898- let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;9999- let response = super::public::router()100100- .with_state(knot)101101- .oneshot(get("/xrpc/sh.tangled.owner"))102102- .await103103- .unwrap();104104-105105- assert_eq!(response.status(), StatusCode::OK);106106- let body = axum::body::to_bytes(response.into_body(), 1000)107107- .await108108- .unwrap();109109-110110- assert_eq!(111111- body.as_ref(),112112- format!("{{\"owner\":\"{TEST_DID}\"}}").as_bytes()113113- );114114-115115- let resp: lexicon::sh_tangled::owner::Output = serde_json::from_slice(&body).unwrap();116116- assert_eq!(resp.owner.as_str(), TEST_DID);117117- }118118-119119- #[tokio::test]120120- async fn xrpc_sh_tangled_repo_missing_repo() {121121- let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;122122- for particle in ["tree", "log", "tags", "branches"] {123123- let response = super::public::router()124124- .with_state(knot.clone())125125- .oneshot(get(&format!("/xrpc/sh.tangled.repo.{particle}")))126126- .await127127- .unwrap();128128-129129- assert_eq!(response.status(), StatusCode::BAD_REQUEST);130130- }131131- }132132-133133- #[tokio::test]134134- async fn xrpc_sh_tangled_repo_bad_repo_format() {135135- let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;136136- for particle in ["tree", "log", "tags", "branches"] {137137- // Missing repo name138138- let response = super::public::router()139139- .with_state(knot.clone())140140- .oneshot(get(&format!(141141- "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com"142142- )))143143- .await144144- .unwrap();145145-146146- assert_eq!(response.status(), StatusCode::BAD_REQUEST);147147-148148- // Bad repo names '..'149149-150150- for repo_name in ["", "..", "../../secret-data", ".hidden", "/etc/passwd"] {151151- let response = super::public::router()152152- .with_state(knot.clone())153153- .oneshot(get(&format!(154154- "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com/{repo_name}"155155- )))156156- .await157157- .unwrap();158158-159159- assert_eq!(response.status(), StatusCode::BAD_REQUEST);160160- }161161- }162162- }163163-164164- #[tokio::test]165165- async fn xrpc_sh_tangled_repo_not_found() {166166- let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;167167- for particle in ["tree", "log", "tags", "branches"] {168168- let response = super::public::router()169169- .with_state(knot.clone())170170- .oneshot(get(&format!(171171- "/xrpc/sh.tangled.repo.{particle}?repo=did:web:example.com/non-existent-repo"172172- )))173173- .await174174- .unwrap();175175-176176- assert_eq!(response.status(), StatusCode::NOT_FOUND);177177- }178178- }179179-180180- mod sh_tangled_repo_create {181181- use crate::nsid::{SH_TANGLED_REPO_CREATE, SH_TANGLED_REPO_DELETE};182182-183183- use super::super::public;184184- use super::*;185185- use axum::http::{HeaderValue, Method, Response, header};186186-187187- fn make_claims<F>(iss: &Did, aud: &Did, modify_claims: F) -> Claims188188- where189189- F: FnOnce(&mut Claims),190190- {191191- let jti: [u8; 16] = rand::random();192192- let jti = data_encoding::BASE32_NOPAD_VISUAL193193- .encode(&jti)194194- .to_lowercase();195195-196196- let mut claims = Claims {197197- iss: iss.into(),198198- aud: aud.into(),199199- iat: OffsetDateTime::now_utc().unix_timestamp(),200200- exp: OffsetDateTime::now_utc().unix_timestamp() + 10,201201- lxm: None,202202- jti: jti.into(),203203- };204204-205205- modify_claims(&mut claims);206206- claims207207- }208208-209209- async fn service_auth_with<F>(210210- pds: &mock_pds::Pds,211211- iss: &Did,212212- aud: &Did,213213- modify_claims: F,214214- ) -> HeaderValue215215- where216216- F: FnOnce(&mut Claims),217217- {218218- let claims = make_claims(iss, aud, modify_claims);219219- let authorization = pds.service_auth(&claims).await;220220- HeaderValue::from_str(&authorization).unwrap()221221- }222222-223223- #[tokio::test]224224- async fn reject_wrong_method() {225225- let (_, _, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;226226- let response = public::router()227227- .with_state(knot.clone())228228- .oneshot(get("/xrpc/sh.tangled.repo.create"))229229- .await230230- .unwrap();231231-232232- assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED);233233- }234234-235235- async fn create_repo_with<F>(236236- knot: &Knot,237237- pds: mock_pds::Pds,238238- did: &Did,239239- rkey: &str,240240- repo_name: &str,241241- source: Option<&str>,242242- modify_claims: F,243243- ) -> Response<Body>244244- where245245- F: Fn(&mut Claims) + Copy,246246- {247247- // Create fake PDS record for our new repository.248248- pds.insert_record(249249- did,250250- "sh.tangled.repo",251251- rkey,252252- &serde_json::json!({253253- "name": repo_name,254254- "knot": knot.instance_ident(),255255- "source": source,256256- "createdAt": OffsetDateTime::now_utc().format(&Rfc3339).unwrap()257257- }),258258- )259259- .await;260260-261261- // Generate the body of the 'sh.tangled.repo.create' request.262262- let create = lexicon::sh_tangled::repo::create::Input {263263- rkey: rkey.to_string(),264264- default_branch: Some("main".into()),265265- source: None,266266- };267267-268268- let auth = service_auth_with(&pds, &did, &knot.instance, |claims| {269269- claims.lxm = Some(SH_TANGLED_REPO_CREATE.into_boxed());270270- modify_claims(claims);271271- })272272- .await;273273-274274- let response = public::router()275275- .with_state(knot.clone())276276- .oneshot(277277- Request::post("/xrpc/sh.tangled.repo.create")278278- .header(header::AUTHORIZATION, auth)279279- .header(header::CONTENT_TYPE, "application/json")280280- .body(Body::new(serde_json::to_string(&create).unwrap()))281281- .expect("sh.tangled.repo.create request"),282282- )283283- .await284284- .expect("xrpc response");285285-286286- response287287- }288288-289289- async fn create_repo(290290- knot: &Knot,291291- pds: mock_pds::Pds,292292- did: &Did,293293- rkey: &str,294294- repo_name: &str,295295- source: Option<&str>,296296- ) -> Response<Body> {297297- create_repo_with(knot, pds, did, rkey, repo_name, source, |_| {}).await298298- }299299-300300- async fn repo_exists_in_db(knot: &Knot, did: &Did, rkey: &str) -> bool {301301- knot.resolve_repo_key(&crate::types::repository_path::RepositoryPath {302302- owner: did.into_boxed().into(),303303- name: rkey.into(),304304- })305305- .await306306- .is_ok()307307- }308308-309309- #[tokio::test]310310- async fn can_create_repo() {311311- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;312312-313313- let did = Did::from_static(TEST_DID);314314- pds.insert_identity(did, "tjh.dev").await;315315- knot.add_member(316316- "",317317- "",318318- "",319319- &lexicon::sh_tangled::knot::Member::new(320320- &did,321321- knot.instance_ident(),322322- OffsetDateTime::now_utc(),323323- ),324324- )325325- .await326326- .unwrap();327327-328328- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();329329- assert_eq!(330330- create_repo(&knot, pds, did, &rkey, "test-repo", None)331331- .await332332- .status(),333333- StatusCode::OK334334- );335335-336336- assert!(repo_exists_in_db(&knot, &did, &rkey).await);337337- }338338-339339- #[tokio::test]340340- async fn can_create_fork_from_at() {341341- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;342342-343343- let did = Did::from_static(TEST_DID);344344- pds.insert_identity(did, "tjh.dev").await;345345- knot.add_member(346346- "",347347- "",348348- "",349349- &lexicon::sh_tangled::knot::Member::new(350350- &did,351351- knot.instance_ident(),352352- OffsetDateTime::now_utc(),353353- ),354354- )355355- .await356356- .unwrap();357357-358358- // Create a record for the repository to fork from.359359- // <https://pdsls.dev/at://did:plc:65gha4t3avpfpzmvpbwovss7/sh.tangled.repo/3m24udbjajf22#record>360360- let aturi = pds361361- .insert_record(362362- did,363363- "sh.tangled.repo",364364- "3m24udbjajf22",365365- &serde_json::json!({366366- "name": "gordian",367367- "knot": "gordian.tjh.dev",368368- "createdAt": "2025-10-01T10:45:52Z"369369- }),370370- )371371- .await;372372-373373- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();374374- assert_eq!(375375- create_repo(&knot, pds, did, &rkey, "test-repo", Some(&aturi))376376- .await377377- .status(),378378- StatusCode::OK379379- );380380-381381- assert!(repo_exists_in_db(&knot, &did, &rkey).await);382382- }383383-384384- #[tokio::test]385385- async fn can_create_fork_from_http() {386386- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;387387-388388- let did = Did::from_static(TEST_DID);389389- pds.insert_identity(did, "tjh.dev").await;390390- knot.add_member(391391- "",392392- "",393393- "",394394- &lexicon::sh_tangled::knot::Member::new(395395- &did,396396- knot.instance_ident(),397397- OffsetDateTime::now_utc(),398398- ),399399- )400400- .await401401- .unwrap();402402-403403- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();404404- let source =405405- Some("https://gordian.tjh.dev/did:plc:65gha4t3avpfpzmvpbwovss7/3m24udbjajf22");406406- assert_eq!(407407- create_repo(&knot, pds, did, &rkey, "test-repo", source)408408- .await409409- .status(),410410- StatusCode::OK411411- );412412-413413- assert!(repo_exists_in_db(&knot, &did, &rkey).await);414414- }415415-416416- #[tokio::test]417417- async fn can_create_fork_from_http_fail() {418418- let (base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;419419-420420- let did = Did::from_static(TEST_DID);421421- pds.insert_identity(did, "tjh.dev").await;422422- knot.add_member(423423- "",424424- "",425425- "",426426- &lexicon::sh_tangled::knot::Member::new(427427- &did,428428- knot.instance_ident(),429429- OffsetDateTime::now_utc(),430430- ),431431- )432432- .await433433- .unwrap();434434-435435- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();436436- let source =437437- Some("https://gordian.tjh.dev/did:plc:65gha4t3avpfpmvpbwovss7/3m24udbjajf22");438438-439439- assert_ne!(440440- create_repo(&knot, pds, did, &rkey, "test-repo", source)441441- .await442442- .status(),443443- StatusCode::OK444444- );445445-446446- // Verifiy the repository wasn't created on disk.447447- assert!(448448- std::fs::exists(base.path().join(did.as_str()).join(&rkey)).is_ok_and(|val| !val),449449- );450450-451451- assert!(!repo_exists_in_db(&knot, &did, &rkey).await);452452- }453453-454454- #[tokio::test]455455- async fn rejects_if_owner_is_not_a_member() {456456- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;457457-458458- let did = Did::from_static(TEST_DID);459459- pds.insert_identity(did, "tjh.dev").await;460460-461461- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();462462- assert_ne!(463463- create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |_| {})464464- .await465465- .status(),466466- StatusCode::OK,467467- );468468-469469- assert!(!repo_exists_in_db(&knot, &did, &rkey).await);470470- }471471-472472- #[tokio::test]473473- async fn rejects_auth_issued_in_future() {474474- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;475475-476476- let did = Did::from_static(TEST_DID);477477- pds.insert_identity(did, "tjh.dev").await;478478- knot.add_member(479479- "",480480- "",481481- "",482482- &lexicon::sh_tangled::knot::Member::new(483483- &did,484484- knot.instance_ident(),485485- OffsetDateTime::now_utc(),486486- ),487487- )488488- .await489489- .unwrap();490490-491491- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();492492- assert_eq!(493493- create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |claims| {494494- //495495- claims.iat = OffsetDateTime::now_utc().unix_timestamp() + 60;496496- })497497- .await498498- .status(),499499- StatusCode::FORBIDDEN,500500- "iat > now => should be 403 Forbidden"501501- );502502-503503- assert!(!repo_exists_in_db(&knot, &did, &rkey).await);504504- }505505-506506- #[tokio::test]507507- async fn rejects_auth_expired() {508508- let (_base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;509509-510510- let did = Did::from_static(TEST_DID);511511- pds.insert_identity(did, "tjh.dev").await;512512- knot.add_member(513513- "",514514- "",515515- "",516516- &lexicon::sh_tangled::knot::Member::new(517517- &did,518518- knot.instance_ident(),519519- OffsetDateTime::now_utc(),520520- ),521521- )522522- .await523523- .unwrap();524524-525525- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();526526- assert_eq!(527527- create_repo_with(&knot, pds, did, &rkey, "test-repo", None, |claims| {528528- //529529- claims.exp = OffsetDateTime::now_utc().unix_timestamp() - 1;530530- })531531- .await532532- .status(),533533- StatusCode::FORBIDDEN,534534- "exp < now => should be 403 Forbidden"535535- );536536- }537537-538538- #[tokio::test]539539- async fn can_delete_repo() {540540- let (base, pds, knot) = crate::mock::setup(TEST_DID, TEST_INSTANCE).await;541541-542542- let did = Did::from_static(TEST_DID);543543- pds.insert_identity(did, "tjh.dev").await;544544- knot.add_member(545545- "",546546- "",547547- "",548548- &lexicon::sh_tangled::knot::Member::new(549549- &did,550550- knot.instance_ident(),551551- OffsetDateTime::now_utc(),552552- ),553553- )554554- .await555555- .unwrap();556556-557557- let rkey = Tid::from_datetime(OffsetDateTime::now_utc(), 0).to_string();558558- let name = "another-test-repo";559559- assert_eq!(560560- create_repo(&knot, pds.clone(), did, &rkey, name, None)561561- .await562562- .status(),563563- StatusCode::OK564564- );565565-566566- gix::open(base.path().join(did.as_str()).join(&rkey))567567- .expect("new repository should exist");568568- assert!(repo_exists_in_db(&knot, &did, &rkey).await);569569-570570- let delete = lexicon::sh_tangled::repo::delete::Input {571571- did: did.to_owned(),572572- rkey: rkey.clone(),573573- name: "another-test-repo".to_string(),574574- };575575-576576- // First check we cannot delete without auth.577577- assert_eq!(578578- public::router()579579- .with_state(knot.clone())580580- .oneshot(581581- Request::builder()582582- .method(Method::POST)583583- .uri("/xrpc/sh.tangled.repo.delete")584584- .header(header::CONTENT_TYPE, "application/json")585585- .body(Body::new(serde_json::to_string(&delete).unwrap()))586586- .expect("sh.tangled.repo.delete request"),587587- )588588- .await589589- .expect("xrpc response")590590- .status(),591591- StatusCode::UNAUTHORIZED592592- );593593-594594- // Check repository has not been deleted.595595- gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");596596- assert!(repo_exists_in_db(&knot, &did, &rkey).await);597597-598598- // Or with the wrong lxm.599599- let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {600600- claims.lxm = Some(SH_TANGLED_REPO_CREATE.into_boxed());601601- })602602- .await;603603-604604- assert_eq!(605605- public::router()606606- .with_state(knot.clone())607607- .oneshot(608608- Request::builder()609609- .method(Method::POST)610610- .uri("/xrpc/sh.tangled.repo.delete")611611- .header(header::CONTENT_TYPE, "application/json")612612- .header(header::AUTHORIZATION, auth)613613- .body(Body::new(serde_json::to_string(&delete).unwrap()))614614- .expect("sh.tangled.repo.delete request"),615615- )616616- .await617617- .expect("xrpc response")618618- .status(),619619- StatusCode::FORBIDDEN620620- );621621-622622- // Check repository has not been deleted.623623- gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");624624- assert!(repo_exists_in_db(&knot, &did, &rkey).await);625625-626626- // Valid auth, empty request body.627627- // Or with the wrong auth.628628- let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {629629- claims.lxm = Some(SH_TANGLED_REPO_DELETE.into_boxed());630630- })631631- .await;632632- assert_eq!(633633- public::router()634634- .with_state(knot.clone())635635- .oneshot(636636- Request::builder()637637- .method(Method::POST)638638- .uri("/xrpc/sh.tangled.repo.delete")639639- .header(header::CONTENT_TYPE, "application/json")640640- .header(header::AUTHORIZATION, auth)641641- .body(Body::empty())642642- .expect("sh.tangled.repo.delete request"),643643- )644644- .await645645- .expect("xrpc response")646646- .status(),647647- StatusCode::BAD_REQUEST648648- );649649-650650- // Check repository has not been deleted.651651- gix::open(base.path().join(did.as_str()).join(&rkey)).expect("repository should exist");652652- assert!(repo_exists_in_db(&knot, &did, &rkey).await);653653-654654- // Or with the wrong auth.655655- let auth = service_auth_with(&pds, &did, &knot.instance(), |claims| {656656- claims.lxm = Some("sh.tangled.repo.delete".try_into().unwrap());657657- })658658- .await;659659-660660- assert_eq!(661661- public::router()662662- .with_state(knot.clone())663663- .oneshot(664664- Request::builder()665665- .method(Method::POST)666666- .uri("/xrpc/sh.tangled.repo.delete")667667- .header(header::CONTENT_TYPE, "application/json")668668- .header(header::AUTHORIZATION, auth)669669- .body(Body::new(serde_json::to_string(&delete).unwrap()))670670- .expect("sh.tangled.repo.delete request"),671671- )672672- .await673673- .expect("xrpc response")674674- .status(),675675- StatusCode::OK676676- );677677-678678- // Check repository has been deleted.679679- gix::open(base.path().join(did.as_str()).join(&rkey))680680- .expect_err("deleted repository should not exist");681681- assert!(!repo_exists_in_db(&knot, &did, &rkey).await);682682- }683683- }684684-}
-290
crates/knot/src/main.rs
···11-mod cli;22-mod hooks;33-44-use anyhow::Context as _;55-use axum::http::{Request, Response};66-use futures_util::FutureExt as _;77-use knot::{88- model::{Knot, KnotState, config::KnotConfiguration},99- services::database::DataStore,1010-};1111-use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions};1212-use std::{env, ffi::OsStr, net::ToSocketAddrs as _, time::Duration};1313-use tokio::{net::TcpListener, signal::unix::SignalKind, task::JoinSet};1414-use tokio::{runtime::Builder, signal};1515-use tokio_util::sync::CancellationToken;1616-use tower::ServiceBuilder;1717-use tower_http::{1818- ServiceBuilderExt as _,1919- decompression::RequestDecompressionLayer,2020- request_id::{MakeRequestUuid, RequestId},2121- trace::{MakeSpan, OnResponse, TraceLayer},2222-};2323-use tracing::{Span, field::Empty, level_filters::LevelFilter};2424-use tracing_subscriber::{EnvFilter, layer::SubscriberExt as _, util::SubscriberInitExt as _};2525-2626-#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]2727-use tikv_jemallocator::Jemalloc;2828-2929-#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]3030-#[global_allocator]3131-static GLOBAL: Jemalloc = Jemalloc;3232-3333-const USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));3434-3535-fn main() -> anyhow::Result<()> {3636- tracing_subscriber::registry()3737- .with(3838- EnvFilter::builder()3939- .with_default_directive(LevelFilter::INFO.into())4040- .from_env_lossy(),4141- )4242- .with(4343- tracing_subscriber::fmt::layer()4444- .with_writer(std::io::stderr)4545- .without_time(),4646- )4747- .init();4848-4949- let runtime = Builder::new_current_thread()5050- .enable_all()5151- .build()5252- .expect("Failed to build runtime");5353-5454- match cli::parse() {5555- cli::KnotCommand::Generate(_) => unreachable!("Handled by cli module"),5656- cli::KnotCommand::Serve(arguments) => runtime.block_on(knot_main(arguments)),5757- cli::KnotCommand::Hook(arguments) => runtime.block_on(hooks::run_hook(arguments)),5858- }5959-}6060-6161-pub async fn knot_main(arguments: cli::ServeArguments) -> anyhow::Result<()> {6262- unsafe { env::set_var("GIT_CONFIG_GLOBAL", &arguments.git_config) };6363-6464- let tempdir = tempfile::TempDir::with_prefix("gordian-knot-")?;6565- let hooks_path = if let Some(path) = &arguments.hooks {6666- // @TODO Verify hooks exist in the specified path.6767- tracing::warn!(?path, "assuming existence of hooks at path");6868- path.to_path_buf()6969- } else {7070- let path = tempdir.path().join("hooks");7171- hooks::setup_global_hooks(&path)?;7272- path7373- };7474-7575- assert!(git_config_global("core.hooksPath", &hooks_path)?);7676- assert!(git_config_global("receive.advertisePushOptions", "true")?);7777- if let Some(command) = &arguments.archive_bz2_command {7878- assert!(git_config_global("tar.tar.bz2.command", command)?);7979- }8080- if let Some(command) = &arguments.archive_xz_command {8181- assert!(git_config_global("tar.tar.xz.command", command)?);8282- }8383-8484- let database = {8585- let pool = {8686- let connect_options = SqliteConnectOptions::new()8787- .filename(&arguments.db)8888- .create_if_missing(true)8989- .foreign_keys(true)9090- .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal);9191-9292- SqlitePoolOptions::new()9393- .connect_with(connect_options)9494- .await?9595- };9696-9797- sqlx::migrate!().run(&pool).await?;9898- DataStore::new(pool)9999- };100100-101101- let public_http = reqwest::ClientBuilder::new()102102- .timeout(Duration::from_secs(2))103103- .user_agent(USER_AGENT)104104- .http2_keep_alive_while_idle(true)105105- .https_only(true)106106- .build()107107- .context("Failed to build public HTTP client")?;108108-109109- let resolver = arguments.init_resolver(public_http.clone());110110-111111- // Bind listeners for the public API.112112- let mut public_listeners = Vec::with_capacity(arguments.bind.len());113113- for addr in &arguments.bind {114114- for socket in addr.to_socket_addrs()? {115115- let listener = TcpListener::bind(socket).await?;116116- public_listeners.push(listener);117117- }118118- }119119-120120- // Bind listeners for the private API.121121- let mut private_listeners = Vec::with_capacity(2);122122- for socket in "localhost:0".to_socket_addrs()? {123123- let listener = TcpListener::bind(socket).await?;124124- private_listeners.push(listener);125125- }126126-127127- // The knot needs to know the sockets we've bound the private API.128128- let private_addrs = private_listeners129129- .iter()130130- .map(tokio::net::TcpListener::local_addr)131131- .collect::<Result<Vec<_>, std::io::Error>>()?;132132-133133- tracing::info!(?private_addrs, "bound internal API");134134-135135- let config: KnotConfiguration = arguments.to_knot_config()?;136136- let knot_state = KnotState::new(config, resolver, public_http, database, &private_addrs)?;137137- let knot = Knot::from(knot_state);138138-139139- // Ensure the knot owner's records are seeded.140140- knot.seed_owner()141141- .await142142- .context("seeding knot owner's records")?;143143-144144- let mut tasks = JoinSet::new();145145- let shutdown = CancellationToken::new();146146-147147- // Spawn the internal API.148148- tasks.spawn(knot::serve_all(149149- knot::private::router()150150- .layer(151151- ServiceBuilder::new()152152- .set_x_request_id(MakeRequestUuid)153153- .layer(154154- TraceLayer::new_for_http()155155- .make_span_with(PrivateHttpSpan)156156- .on_request(|_: &Request<_>, _: &Span| {})157157- .on_response(TraceResponse),158158- )159159- .propagate_x_request_id(),160160- )161161- .with_state(knot.clone()),162162- private_listeners,163163- shutdown.child_token(),164164- ));165165-166166- // Spawn the jetstream consumer.167167- tasks.spawn(168168- knot::services::jetstream::init_consumer(169169- &knot,170170- arguments.jetstream.as_slice(),171171- shutdown.child_token(),172172- )173173- .map(|_| Ok(())),174174- );175175-176176- // Build the public API.177177- let router = knot::public::router()178178- .layer(RequestDecompressionLayer::new())179179- .layer(180180- ServiceBuilder::new()181181- .set_x_request_id(MakeRequestUuid)182182- .layer(183183- TraceLayer::new_for_http()184184- .make_span_with(PublicHttpSpan)185185- .on_request(|_: &Request<_>, _: &Span| {})186186- .on_response(TraceResponse),187187- )188188- .propagate_x_request_id(),189189- )190190- .with_state(knot);191191-192192- tasks.spawn(knot::serve_all(193193- router,194194- public_listeners,195195- shutdown.child_token(),196196- ));197197-198198- tasks.spawn(wait_for_shutdown(shutdown));199199-200200- for task in tasks.join_all().await {201201- if let Err(error) = task {202202- tracing::error!(?error, "knot task completed with error");203203- }204204- }205205-206206- Ok(())207207-}208208-209209-async fn wait_for_shutdown(shutdown: CancellationToken) -> std::io::Result<()> {210210- let mut sigterm = signal::unix::signal(SignalKind::terminate())?;211211-212212- tokio::select! {213213- Ok(()) = signal::ctrl_c() => {214214- eprintln!();215215- tracing::info!("ctrl+c received, shutting down ...");216216- },217217- Some(()) = sigterm.recv() => {218218- tracing::info!("SIGTERM received, shutting down ...");219219- }220220- }221221-222222- shutdown.cancel();223223-224224- Ok(())225225-}226226-227227-fn git_config_global<K, V>(key: K, value: V) -> std::io::Result<bool>228228-where229229- K: AsRef<OsStr>,230230- V: AsRef<OsStr>,231231-{232232- use std::process::Stdio;233233-234234- let success = std::process::Command::new("/usr/bin/git")235235- .args(["config", "set", "--global"])236236- .arg(key)237237- .arg(value)238238- .stdout(Stdio::inherit())239239- .stderr(Stdio::inherit())240240- .spawn()?241241- .wait()?242242- .success();243243-244244- Ok(success)245245-}246246-247247-macro_rules! make_span {248248- ($name:ident, $label:literal) => {249249- #[derive(Clone)]250250- struct $name;251251-252252- impl<B> MakeSpan<B> for $name {253253- fn make_span(&mut self, request: &axum::http::Request<B>) -> tracing::Span {254254- let method = request.method();255255- let path = request.uri().path();256256-257257- let span = tracing::error_span!($label, id = Empty, method = Empty, path = Empty);258258- if let Some(id) = request259259- .extensions()260260- .get::<RequestId>()261261- .and_then(|request_id| request_id.header_value().to_str().ok())262262- {263263- span.record("id", &id);264264- }265265-266266- span.record("method", tracing::field::debug(&method));267267- span.record("path", tracing::field::debug(&path));268268-269269- span270270- }271271- }272272- };273273-}274274-275275-make_span!(PublicHttpSpan, "public");276276-make_span!(PrivateHttpSpan, "private");277277-278278-#[derive(Clone)]279279-pub struct TraceResponse;280280-281281-impl<B> OnResponse<B> for TraceResponse {282282- fn on_response(self, response: &Response<B>, latency: Duration, _: &Span) {283283- match response.status() {284284- status if status.is_success() => tracing::trace!(?status, ?latency),285285- status if status.is_client_error() => tracing::warn!(?status, ?latency),286286- status if status.is_server_error() => tracing::error!(?status, ?latency),287287- status => tracing::info!(?status, ?latency),288288- }289289- }290290-}
-35
crates/knot/src/mock.rs
···11-use crate::{22- model::{Knot, config::KnotConfiguration},33- services::database::DataStore,44-};55-use atproto::did::OwnedDid;66-use identity::Resolver;77-88-pub async fn setup(99- owner_did: &str,1010- instance_name: &str,1111-) -> (tempfile::TempDir, mock_pds::Pds, Knot) {1212- let base = tempfile::tempdir().expect("temporary directory");1313- let pool = sqlx::SqlitePool::connect("sqlite://:memory:")1414- .await1515- .unwrap();1616-1717- sqlx::migrate!().run(&pool).await.unwrap();1818-1919- let (pds, listener) = mock_pds::init().await;2020- let pds_api = mock_pds::router(pds.clone());2121- tokio::spawn(async move {2222- axum::serve(listener, pds_api).await.unwrap();2323- });2424-2525- let owner_did = OwnedDid::parse(owner_did).expect("owner DID must be valid");2626- let instance = OwnedDid::parse(format!("did:web:{instance_name}"))2727- .expect("instance name should form a valid DID");2828-2929- let database = DataStore::new(pool);3030- let resolver = Resolver::new(pds.clone());3131- let config = KnotConfiguration::new(owner_did.clone(), instance, base.path());3232- let knot = Knot::new(config, resolver, reqwest::Client::new(), database, []).unwrap();3333-3434- (base, pds, knot)3535-}
-259
crates/knot/src/model.rs
···11-pub mod config;22-pub mod convert;33-pub mod errors;44-pub mod knot_state;55-pub mod nicediff;66-pub mod repository;77-88-use core::ops;99-use std::{borrow::Cow, ffi::OsString, net::SocketAddr, sync::Arc};1010-1111-use atproto::tid::Tid;1212-use axum::{1313- extract::{FromRef, FromRequestParts, OptionalFromRequestParts},1414- http::request::Parts,1515-};1616-use futures_util::future::BoxFuture;1717-use git_service::{state::GitServiceState, util::SetOptionEnv as _};1818-use identity::{HttpClient, Resolver};1919-use lexicon::sh_tangled::knot::Member;2020-use time::OffsetDateTime;2121-use tokio::process::Command;2222-2323-use crate::{2424- extractors::request_id::RequestId,2525- model::{config::KnotConfiguration, repository::TangledRepository},2626- private,2727- public::git::{Error, GitAuthorization},2828- services::{2929- authorization::{AuthorizationClaimsStore, AuthorizationClaimsStoreError},3030- database::DataStore,3131- },3232-};3333-3434-pub use knot_state::KnotState;3535-3636-#[derive(Debug, Clone)]3737-#[repr(transparent)]3838-pub struct Knot {3939- inner: Arc<KnotState>,4040-}4141-4242-impl From<Arc<KnotState>> for Knot {4343- #[inline]4444- fn from(inner: Arc<KnotState>) -> Self {4545- Self { inner }4646- }4747-}4848-4949-impl FromRef<Knot> for Resolver {5050- #[inline]5151- fn from_ref(input: &Knot) -> Self {5252- input.resolver().clone()5353- }5454-}5555-5656-impl ops::Deref for Knot {5757- type Target = KnotState;5858- #[inline]5959- fn deref(&self) -> &Self::Target {6060- &self.inner6161- }6262-}6363-6464-impl Knot {6565- pub fn new<'a>(6666- config: KnotConfiguration,6767- resolver: Resolver,6868- http: HttpClient,6969- database: DataStore,7070- private_binds: impl IntoIterator<Item = &'a SocketAddr>,7171- ) -> std::io::Result<Self> {7272- let inner = KnotState::new(config, resolver, http, database, private_binds)?;7373- Ok(Self { inner })7474- }7575-7676- pub async fn add_member(7777- &self,7878- rkey: &str,7979- rev: &str,8080- cid: &str,8181- member: &Member<'_>,8282- ) -> anyhow::Result<()> {8383- let new_member = self8484- .database()8585- .upsert_knot_member(rkey, rev, cid, member)8686- .await?;8787-8888- if new_member {8989- tracing::info!(member = %member.subject, "new knot member");9090- crate::services::seed::public_keys(self, &member.subject).await?;9191- crate::services::seed::repositories(self, &member.subject).await?;9292- }9393-9494- Ok(())9595- }9696-9797- pub async fn seed_owner(&self) -> anyhow::Result<()> {9898- self.add_member(9999- "",100100- &Tid::MAX.to_string(),101101- "",102102- &Member {103103- subject: Cow::Borrowed(self.owner()),104104- domain: Cow::Borrowed(self.instance_ident()),105105- created_at: OffsetDateTime::now_utc(),106106- },107107- )108108- .await109109- }110110-}111111-112112-impl AuthorizationClaimsStore<auth::jwt::Claims> for Knot {113113- fn get_unexpired_claims<'a: 'b, 'b>(114114- &'a self,115115- jti: &'b str,116116- now: i64,117117- ) -> BoxFuture<'b, Result<Option<auth::jwt::Claims>, AuthorizationClaimsStoreError>> {118118- self.inner.get_unexpired_claims(jti, now)119119- }120120-121121- fn store_claims(122122- &self,123123- claims: auth::jwt::Claims,124124- now: i64,125125- ) -> BoxFuture<'_, Result<(), AuthorizationClaimsStoreError>> {126126- self.inner.store_claims(claims, now)127127- }128128-}129129-130130-impl GitServiceState for Knot {131131- type Rejection = Error;132132-133133- async fn init_upload_archive(&self, parts: &mut Parts) -> Result<Command, Self::Rejection> {134134- let request_id = RequestId::from_request_parts(parts, self).await.unwrap();135135- let repository = TangledRepository::from_git_request(parts, self).await?;136136- let mut command = repository.git();137137- command138138- .option_env("X_REQUEST_ID", request_id)139139- .args(["upload-archive"])140140- .arg(repository.path());141141-142142- Ok(command.into())143143- }144144-145145- async fn init_upload_pack_advertisement(146146- &self,147147- parts: &mut Parts,148148- ) -> Result<tokio::process::Command, Self::Rejection> {149149- let request_id = RequestId::from_request_parts(parts, self).await.unwrap();150150- let repository = TangledRepository::from_git_request(parts, self).await?;151151- let mut command = repository.git();152152- command153153- .option_env("X_REQUEST_ID", request_id)154154- .args([155155- "upload-pack",156156- "--http-backend-info-refs",157157- "--stateless-rpc",158158- "--strict",159159- "--timeout=10",160160- ])161161- .arg(repository.path());162162-163163- Ok(command.into())164164- }165165-166166- async fn init_upload_pack(167167- &self,168168- parts: &mut Parts,169169- ) -> Result<tokio::process::Command, Self::Rejection> {170170- let request_id = RequestId::from_request_parts(parts, self).await.unwrap();171171- let repository = TangledRepository::from_git_request(parts, self).await?;172172- let mut command = repository.git();173173- command174174- .option_env("X_REQUEST_ID", request_id)175175- .args(["upload-pack", "--strict", "--stateless-rpc"])176176- .arg(repository.path());177177-178178- Ok(command.into())179179- }180180-181181- async fn init_receive_pack_advertisement(182182- &self,183183- parts: &mut Parts,184184- ) -> Result<tokio::process::Command, Self::Rejection> {185185- let GitAuthorization(auth) = GitAuthorization::from_request_parts(parts, self).await?;186186- let request_id = RequestId::from_request_parts(parts, self).await.unwrap();187187- let repository = TangledRepository::from_git_request(parts, self).await?;188188-189189- if !self.can_push(repository.repository_key(), &auth.iss).await {190190- tracing::error!(did = %auth.iss, "push denied");191191- return Err(Error::forbidden(192192- self,193193- format!(194194- "'{}' does not have permission to push to this repository",195195- auth.iss196196- ),197197- ))?;198198- }199199-200200- let nonce_seed = self.generate_push_seed(repository.repository_key());201201- let mut command = repository.git();202202- command203203- .env(private::ENV_USER_DID, auth.iss.as_str())204204- .option_env("X_REQUEST_ID", request_id)205205- .args([206206- "-c",207207- &nonce_seed,208208- "receive-pack",209209- "--http-backend-info-refs",210210- "--stateless-rpc",211211- ])212212- .arg(repository.path());213213-214214- Ok(command.into())215215- }216216-217217- async fn init_receive_pack(218218- &self,219219- parts: &mut Parts,220220- ) -> Result<tokio::process::Command, Self::Rejection> {221221- let GitAuthorization(auth) = GitAuthorization::from_request_parts(parts, self).await?;222222- let request_id = RequestId::from_request_parts(parts, self).await.unwrap();223223- let repository = TangledRepository::from_git_request(parts, self).await?;224224-225225- if !self.can_push(repository.repository_key(), &auth.iss).await {226226- tracing::error!(did = %auth.iss, "push denied");227227- return Err(Error::forbidden(228228- self,229229- format!(230230- "'{}' does not have permission to push to this repository",231231- auth.iss232232- ),233233- ))?;234234- }235235-236236- let allowed_signers_path = std::env::current_dir()237237- .unwrap()238238- .join("allowed_signers")239239- .join(auth.iss.as_str());240240-241241- let mut allowed_signers_option = OsString::with_capacity(242242- "gpg.ssh.allowedSignersFile=".len() + allowed_signers_path.as_os_str().len(),243243- );244244- allowed_signers_option.push("gpg.ssh.allowedSignersFile=");245245- allowed_signers_option.push(&allowed_signers_path);246246-247247- let nonce_seed = self.generate_push_seed(repository.repository_key());248248- let mut command = repository.git();249249- command250250- .env(private::ENV_USER_DID, auth.iss.as_str())251251- .option_env("X_REQUEST_ID", request_id)252252- .args(["-c", &nonce_seed, "-c"])253253- .arg(&allowed_signers_option)254254- .args(["receive-pack", "--stateless-rpc"])255255- .arg(repository.path());256256-257257- Ok(command.into())258258- }259259-}
···11-use atproto::Nsid;22-use auth::{33- IntoVerificationKey, OpenSshKey,44- jwt::{Claims, Token, decode},55-};66-use axum::{77- extract::{FromRef, FromRequestParts},88- http::{header::AUTHORIZATION, request::Parts},99-};1010-use identity::Resolver;1111-use time::OffsetDateTime;1212-1313-use crate::{1414- model::Knot,1515- nsid::SH_TANGLED_REPO_GITRECEIVEPACK,1616- services::authorization::{1717- AuthorizationClaimsStore as _, Verification, VerificationError, extract_token,1818- },1919-};2020-2121-use super::Error;2222-2323-#[derive(Debug)]2424-struct GitVerification;2525-2626-impl Verification for GitVerification {2727- const LEXICON_METHOD: &'static Nsid = SH_TANGLED_REPO_GITRECEIVEPACK;2828-}2929-3030-#[derive(Clone, Debug)]3131-pub struct GitAuthorization(pub Claims);3232-3333-impl<S: Sync> FromRequestParts<S> for GitAuthorization3434-where3535- Knot: FromRef<S>,3636- Resolver: FromRef<S>,3737-{3838- type Rejection = Error;3939-4040- async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {4141- let knot = Knot::from_ref(state);4242- let resolver = Resolver::from_ref(state);4343- let now = OffsetDateTime::now_utc().unix_timestamp();4444-4545- let credential = extract_token(parts, AUTHORIZATION, "bearer").ok_or(4646- Error::unauthorized(&knot, "inter-service authorization required"),4747- )?;4848-4949- let unverified_token = Token::decode_unverified(credential)5050- .map_err(|_| Error::unauthorized(&knot, "inter-service authorization required"))?;5151-5252- // Before performing a relatively expensive DID look-up, ensure the token5353- // claims are valid.5454- let unverified_claims = unverified_token.claims;5555- GitVerification::verify(&knot, now, knot.instance(), &unverified_claims)5656- .await5757- .map_err(|error| match error {5858- // Git re-uses the token from the credential helper for each request in a single push.5959- //6060- // Returning 'Forbidden' here will make git abort. Instead, we return an Unauthorized6161- // which will force git to get a new token from the credential helper.6262- VerificationError::Reused => Error::unauthorized(&knot, "authorization re-used"),6363- error => Error::forbidden(&knot, error.to_string()),6464- })?;6565-6666- // Resolve the DID document for the claimed issuer, extract and parse6767- // the verification methods into public keys.6868-6969- let (resolved_did, doc) = resolver7070- .resolve(unverified_claims.iss.as_str())7171- .await7272- .map_err(|error| Error::forbidden(&knot, error.to_string()))?;7373-7474- assert_eq!(unverified_claims.iss, resolved_did);7575-7676- let verification_keys = doc7777- .verification_method7878- .into_iter()7979- .filter_map(|vm| vm.into_verification_key().ok());8080-8181- // Try to decode and verify the JWT using any one of the verification keys8282- // we have for the DID.8383- for verification_key in verification_keys {8484- if let Ok(token) = decode::<Claims>(credential, &verification_key) {8585- // Store the JWT so it cannot be re-used within the claim period.8686- knot.store_claims(token.claims.clone(), now).await?;8787- return Ok(Self(token.claims));8888- }8989- }9090-9191- // Read the 'sh.tangled.publicKey' records the knot has associated9292- // with claimed issuer.9393- let public_keys = knot9494- .database()9595- .public_keys_for_did(&unverified_claims.iss)9696- .await9797- .unwrap_or_default()9898- .into_iter()9999- .filter_map(|public_key| OpenSshKey(public_key.key).into_verification_key().ok());100100-101101- // Try to decode and verify the JWT using any one of the public keys102102- // we have for the DID.103103- for verification_key in public_keys {104104- if let Ok(token) = decode::<Claims>(credential, &verification_key) {105105- // Store the JWT so it cannot be re-used within the claim period.106106- knot.store_claims(token.claims.clone(), now).await?;107107- return Ok(Self(token.claims));108108- }109109- }110110-111111- Err(Error::forbidden(&knot, "No valid authorization found"))?112112- }113113-}
···11-pub mod list_records {22- //!33- //! List a range of records in a repository, matching a specific44- //! collection. Does not require auth.55- //!66- //! <https://docs.bsky.app/docs/api/com-atproto-repo-list-records>77- //!88- use atproto::RecordUri;99-1010- #[derive(Debug, serde::Deserialize, serde::Serialize)]1111- pub struct Input {1212- /// The handle or DID of the repo.1313- pub repo: String,1414-1515- /// The NSID of the record type.1616- pub collection: String,1717-1818- /// The number of records to return.1919- ///2020- /// Possible values: 0..=100.2121- #[serde(skip_serializing_if = "Option::is_none")]2222- pub limit: Option<usize>,2323-2424- #[serde(skip_serializing_if = "Option::is_none")]2525- pub cursor: Option<String>,2626-2727- /// Flag to reverse the order of the returned records.2828- #[serde(default)]2929- pub reverse: bool,3030- }3131-3232- #[derive(Debug, serde::Deserialize, serde::Serialize)]3333- pub struct Output {3434- pub cursor: Option<String>,3535-3636- pub records: Vec<Record>,3737- }3838-3939- #[derive(Debug, serde::Deserialize, serde::Serialize)]4040- pub struct Record {4141- pub uri: RecordUri,4242-4343- pub cid: String,4444-4545- pub value: Box<serde_json::value::RawValue>,4646- }4747-}
···991010pub mod com_atproto {1111 pub mod repo {1212- use atproto::did::OwnedDid;1312 use axum::{1413 Json, Router,1514 extract::{FromRef, Query, State},1615 http::StatusCode,1716 response::IntoResponse,1817 };1818+ use gordian_types::OwnedDid;1919 use serde_json::Value;2020 use sqlx::Row as _;2121
+21-19
crates/mock-pds/src/state.rs
···11use std::{fmt::Debug, net::SocketAddr, sync::Arc};2233-use atproto::{did::OwnedDid, tid::Tid};44-use auth::jwt::{Claims, Header};53use aws_lc_rs::{64 encoding::{AsBigEndian as _, EcPublicKeyCompressedBin},75 rand::SystemRandom,86 signature::{ECDSA_P256K1_SHA256_FIXED_SIGNING, EcdsaKeyPair, KeyPair as _},97};108use futures_util::FutureExt as _;1111-use identity::DidDocument;99+use gordian_auth::jwt;1010+use gordian_identity::DidDocument;1111+use gordian_types::{OwnedDid, Tid};1212use sqlx::{1313 SqlitePool,1414 sqlite::{SqliteConnectOptions, SqlitePoolOptions},···4747 /// The internal address of the mock PDS will be set as the "#atproto_pds" service for4848 /// the new identity.4949 ///5050- pub async fn insert_identity(&self, did: &atproto::Did, handle: &str) {5050+ pub async fn insert_identity(&self, did: &gordian_types::Did, handle: &str) {5151 let mut doc = DidDocument::new(did, handle).expect("valid did for did document");5252- doc.service5353- .push(identity::Service::atproto_pds(self.service_endpoint()));5252+ doc.service.push(gordian_identity::Service::atproto_pds(5353+ self.service_endpoint(),5454+ ));54555556 // Generate a key pair and encode the public key as verification method for5657 // the mock user.···6160 key_data.extend_from_slice(public_key.as_ref());6261 let public_key_multibase = multibase::encode(multibase::Base::Base58Btc, key_data);6362 doc.verification_method6464- .push(identity::VerificationMethod::Multikey {6363+ .push(gordian_identity::VerificationMethod::Multikey {6564 id: format!("{}#atproto", doc.id),6665 controller: doc.id.clone(),6766 public_key_multibase,···84838584 pub async fn insert_record<T>(8685 &self,8787- repo: &atproto::Did,8686+ repo: &gordian_types::Did,8887 collection: &str,8988 rkey: &str,9089 value: &T,···120119 }121120122121 // Create an inter-service auth header for an account in the fake PDS.123123- pub async fn service_auth(&self, claims: &Claims) -> String {122122+ pub async fn service_auth(&self, claims: &jwt::Claims) -> String {124123 use data_encoding::BASE64URL_NOPAD as Encoding;125124 use sqlx::Row as _;126125127126 let mut token = String::new();128127 let header = Encoding.encode(129129- &serde_json::to_vec(&Header {130130- typ: auth::jwt::Type::JWT,131131- alg: auth::jwt::Algorithm::ES256K,128128+ &serde_json::to_vec(&jwt::Header {129129+ typ: jwt::Type::JWT,130130+ alg: jwt::Algorithm::ES256K,132131 crv: None,133132 })134133 .unwrap(),···169168 }170169}171170172172-impl identity::ResolveIdentity for Pds {171171+impl gordian_identity::ResolveIdentity for Pds {173172 fn resolve_handle<'s: 'h, 'h>(174173 &'s self,175174 handle: &'h str,176176- ) -> futures_util::future::BoxFuture<'h, Result<OwnedDid, identity::ResolveError>> {175175+ ) -> futures_util::future::BoxFuture<'h, Result<OwnedDid, gordian_identity::ResolveError>> {177176 use sqlx::Row as _;178177 async move {179178 let result = sqlx::query("SELECT did FROM identity WHERE handle = ?")···181180 .fetch_one(self.db())182181 .await183182 .inspect_err(|error| eprintln!("{error:?}"))184184- .map_err(|_| identity::ResolveError::UnresolvedHandle)?;183183+ .map_err(|_| gordian_identity::ResolveError::UnresolvedHandle)?;185184186186- let did: &atproto::Did = result.get("did");185185+ let did: &gordian_types::Did = result.get("did");187186 Ok(did.to_owned())188187 }189188 .boxed()···191190192191 fn resolve_did<'s: 'd, 'd>(193192 &'s self,194194- did: &'d atproto::Did,195195- ) -> futures_util::future::BoxFuture<'d, Result<DidDocument, identity::ResolveError>> {193193+ did: &'d gordian_types::Did,194194+ ) -> futures_util::future::BoxFuture<'d, Result<DidDocument, gordian_identity::ResolveError>>195195+ {196196 use sqlx::Row as _;197197 async move {198198 let result = sqlx::query("SELECT doc FROM identity WHERE did = ?")···201199 .fetch_one(self.db())202200 .await203201 .inspect_err(|error| eprintln!("{error:?}"))204204- .map_err(|_| identity::ResolveError::UnresolvedHandle)?;202202+ .map_err(|_| gordian_identity::ResolveError::UnresolvedHandle)?;205203206204 let doc: &str = result.get("doc");207205 let doc = serde_json::from_str(doc).unwrap();