atproto user agency toolkit for individuals and groups
8
fork

Configure Feed

Select the types of activity you want to include in your feed.

Drop node identity, add lexicon definitions and Tauri desktop app

Remove the did:web node identity layer — p2pds is now infrastructure
that acts on behalf of authenticated atproto users, not an entity with
its own identity. Records (peer, offer) publish to the user's own repo.

- Delete src/node-identity.ts and its tests
- Remove NODE_DID, NODE_MANAGERS, NODE_NAME from Config
- Simplify server.ts (single repo), index.ts (no NodeIdentityOpts),
auth.ts (config.DID only), replication-manager.ts (no peer publishing)
- Update all xrpc handlers and 13 test files
- Add lexicon JSON schemas for org.p2pds.peer and org.p2pds.replication.offer
- Add src/lexicons.ts loader/validator, wire into RecordValidator
- Add Tauri v2 desktop app skeleton at apps/desktop/ with sidecar pattern
- Add npm workspaces config

+998 -618
+15
apps/desktop/package.json
··· 1 + { 2 + "name": "p2pds-desktop", 3 + "version": "0.1.0", 4 + "private": true, 5 + "scripts": { 6 + "dev": "cargo tauri dev", 7 + "build": "cargo tauri build", 8 + "build:sidecar": "node scripts/build-sidecar.js" 9 + }, 10 + "devDependencies": { 11 + "@tauri-apps/cli": "^2.0.0", 12 + "@tauri-apps/api": "^2.0.0", 13 + "@tauri-apps/plugin-shell": "^2.0.0" 14 + } 15 + }
+98
apps/desktop/scripts/build-sidecar.js
··· 1 + #!/usr/bin/env node 2 + 3 + /** 4 + * Build p2pds as a standalone binary for Tauri sidecar. 5 + * 6 + * Requires: npm install -g @yao-pkg/pkg 7 + * 8 + * This script compiles the p2pds Node.js server into a standalone 9 + * executable and places it in src-tauri/binaries/ with the 10 + * platform-specific naming convention that Tauri expects: 11 + * 12 + * binaries/p2pds-<target-triple> 13 + * 14 + * Target triples: 15 + * - x86_64-apple-darwin (macOS Intel) 16 + * - aarch64-apple-darwin (macOS Apple Silicon) 17 + * - x86_64-unknown-linux-gnu (Linux x86_64) 18 + * - x86_64-pc-windows-msvc (Windows x86_64) 19 + * 20 + * Usage: 21 + * node scripts/build-sidecar.js 22 + * 23 + * Steps to build manually: 24 + * 1. Build the p2pds TypeScript source: 25 + * cd ../../ && npm run build 26 + * 2. Install pkg globally: 27 + * npm install -g @yao-pkg/pkg 28 + * 3. Package the built JS into a binary: 29 + * pkg ../../dist/server.js --targets node20-<platform>-<arch> --output src-tauri/binaries/p2pds-<target-triple> 30 + */ 31 + 32 + import { execSync } from "node:child_process"; 33 + import { existsSync, mkdirSync } from "node:fs"; 34 + import { resolve, dirname } from "node:path"; 35 + import { fileURLToPath } from "node:url"; 36 + 37 + const __dirname = dirname(fileURLToPath(import.meta.url)); 38 + const ROOT = resolve(__dirname, "..", "..", ".."); 39 + const BINARIES_DIR = resolve(__dirname, "..", "src-tauri", "binaries"); 40 + 41 + // Map Node.js platform/arch to Tauri target triples and pkg targets 42 + const TARGET_MAP = { 43 + "darwin-arm64": { 44 + triple: "aarch64-apple-darwin", 45 + pkg: "node20-macos-arm64", 46 + }, 47 + "darwin-x64": { 48 + triple: "x86_64-apple-darwin", 49 + pkg: "node20-macos-x64", 50 + }, 51 + "linux-x64": { 52 + triple: "x86_64-unknown-linux-gnu", 53 + pkg: "node20-linux-x64", 54 + }, 55 + "win32-x64": { 56 + triple: "x86_64-pc-windows-msvc", 57 + pkg: "node20-win-x64", 58 + }, 59 + }; 60 + 61 + const platformKey = `${process.platform}-${process.arch}`; 62 + const target = TARGET_MAP[platformKey]; 63 + 64 + if (!target) { 65 + console.error(`Unsupported platform: ${platformKey}`); 66 + console.error(`Supported: ${Object.keys(TARGET_MAP).join(", ")}`); 67 + process.exit(1); 68 + } 69 + 70 + if (!existsSync(BINARIES_DIR)) { 71 + mkdirSync(BINARIES_DIR, { recursive: true }); 72 + } 73 + 74 + const ext = process.platform === "win32" ? ".exe" : ""; 75 + const outputPath = resolve(BINARIES_DIR, `p2pds-${target.triple}${ext}`); 76 + const entryPoint = resolve(ROOT, "dist", "server.js"); 77 + 78 + console.log(`Building p2pds sidecar for ${platformKey} (${target.triple})`); 79 + console.log(` Entry: ${entryPoint}`); 80 + console.log(` Output: ${outputPath}`); 81 + 82 + // Step 1: Build TypeScript 83 + console.log("\n[1/2] Building TypeScript..."); 84 + execSync("npm run build", { cwd: ROOT, stdio: "inherit" }); 85 + 86 + // Step 2: Package with pkg 87 + console.log("\n[2/2] Packaging with pkg..."); 88 + try { 89 + execSync( 90 + `npx @yao-pkg/pkg "${entryPoint}" --targets ${target.pkg} --output "${outputPath}"`, 91 + { cwd: ROOT, stdio: "inherit" } 92 + ); 93 + console.log(`\nSidecar built successfully: ${outputPath}`); 94 + } catch (err) { 95 + console.error("\nFailed to build sidecar. Make sure @yao-pkg/pkg is available:"); 96 + console.error(" npm install -g @yao-pkg/pkg"); 97 + process.exit(1); 98 + }
+13
apps/desktop/src-tauri/Cargo.toml
··· 1 + [package] 2 + name = "p2pds-desktop" 3 + version = "0.1.0" 4 + edition = "2021" 5 + 6 + [dependencies] 7 + tauri = { version = "2", features = [] } 8 + tauri-plugin-shell = "2" 9 + serde = { version = "1", features = ["derive"] } 10 + serde_json = "1" 11 + 12 + [build-dependencies] 13 + tauri-build = { version = "2", features = [] }
+3
apps/desktop/src-tauri/build.rs
··· 1 + fn main() { 2 + tauri_build::build() 3 + }
+10
apps/desktop/src-tauri/capabilities/default.json
··· 1 + { 2 + "identifier": "default", 3 + "description": "Default capabilities for the desktop app", 4 + "windows": ["main"], 5 + "permissions": [ 6 + "shell:allow-spawn", 7 + "shell:allow-execute", 8 + "shell:allow-kill" 9 + ] 10 + }
+6
apps/desktop/src-tauri/src/lib.rs
··· 1 + pub fn run() { 2 + tauri::Builder::default() 3 + .plugin(tauri_plugin_shell::init()) 4 + .run(tauri::generate_context!()) 5 + .expect("error while running tauri application"); 6 + }
+6
apps/desktop/src-tauri/src/main.rs
··· 1 + // Prevents additional console window on Windows in release 2 + #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] 3 + 4 + fn main() { 5 + p2pds_desktop_lib::run() 6 + }
+35
apps/desktop/src-tauri/tauri.conf.json
··· 1 + { 2 + "$schema": "https://raw.githubusercontent.com/nicedoc/tauri/main/packages/api/schema.json", 3 + "productName": "P2PDS", 4 + "version": "0.1.0", 5 + "identifier": "org.p2pds.desktop", 6 + "build": { 7 + "frontendDist": "../src", 8 + "devUrl": "http://localhost:1420" 9 + }, 10 + "app": { 11 + "windows": [ 12 + { 13 + "title": "P2PDS", 14 + "width": 1200, 15 + "height": 800, 16 + "center": true 17 + } 18 + ], 19 + "security": { 20 + "csp": "default-src 'self'; connect-src 'self' http://127.0.0.1:3000; script-src 'self'" 21 + } 22 + }, 23 + "bundle": { 24 + "active": true, 25 + "targets": "all", 26 + "icon": [ 27 + "icons/32x32.png", 28 + "icons/128x128.png", 29 + "icons/128x128@2x.png", 30 + "icons/icon.icns", 31 + "icons/icon.ico" 32 + ], 33 + "externalBin": ["binaries/p2pds"] 34 + } 35 + }
+21
apps/desktop/src/index.html
··· 1 + <!DOCTYPE html> 2 + <html lang="en"> 3 + <head> 4 + <meta charset="UTF-8"> 5 + <meta name="viewport" content="width=device-width, initial-scale=1.0"> 6 + <title>P2PDS</title> 7 + <link rel="stylesheet" href="styles.css"> 8 + </head> 9 + <body> 10 + <div id="loading"> 11 + <div class="spinner"></div> 12 + <p id="status">Starting p2pds...</p> 13 + </div> 14 + <div id="error" hidden> 15 + <h2>Failed to start p2pds</h2> 16 + <p id="error-message"></p> 17 + <button id="retry-btn">Retry</button> 18 + </div> 19 + <script type="module" src="main.ts"></script> 20 + </body> 21 + </html>
+104
apps/desktop/src/main.ts
··· 1 + import { Command, type Child } from "@tauri-apps/plugin-shell"; 2 + 3 + const HEALTH_URL = "http://127.0.0.1:3000/xrpc/_health"; 4 + const DASHBOARD_URL = "http://127.0.0.1:3000/xrpc/org.p2pds.admin.dashboard"; 5 + const POLL_INTERVAL_MS = 500; 6 + const STARTUP_TIMEOUT_MS = 30_000; 7 + 8 + let sidecarProcess: Child | null = null; 9 + 10 + function setStatus(text: string): void { 11 + const el = document.getElementById("status"); 12 + if (el) el.textContent = text; 13 + } 14 + 15 + function showError(message: string): void { 16 + const loading = document.getElementById("loading"); 17 + const error = document.getElementById("error"); 18 + const errorMsg = document.getElementById("error-message"); 19 + if (loading) loading.hidden = true; 20 + if (error) error.hidden = false; 21 + if (errorMsg) errorMsg.textContent = message; 22 + } 23 + 24 + async function waitForServer(): Promise<void> { 25 + const deadline = Date.now() + STARTUP_TIMEOUT_MS; 26 + 27 + while (Date.now() < deadline) { 28 + try { 29 + const res = await fetch(HEALTH_URL); 30 + if (res.ok) { 31 + return; 32 + } 33 + } catch { 34 + // Server not ready yet 35 + } 36 + setStatus("Waiting for p2pds to start..."); 37 + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)); 38 + } 39 + 40 + throw new Error( 41 + `p2pds did not become healthy within ${STARTUP_TIMEOUT_MS / 1000}s` 42 + ); 43 + } 44 + 45 + async function spawnSidecar(): Promise<Child> { 46 + const command = Command.sidecar("binaries/p2pds"); 47 + 48 + command.stdout.on("data", (line: string) => { 49 + console.log(`[p2pds] ${line}`); 50 + }); 51 + 52 + command.stderr.on("data", (line: string) => { 53 + console.error(`[p2pds] ${line}`); 54 + }); 55 + 56 + command.on("error", (error: string) => { 57 + console.error(`[p2pds] process error: ${error}`); 58 + showError(`Sidecar process error: ${error}`); 59 + }); 60 + 61 + command.on("close", (data: { code: number | null; signal: string | null }) => { 62 + console.log(`[p2pds] process exited with code ${data.code}`); 63 + if (data.code !== 0 && data.code !== null) { 64 + showError(`p2pds exited with code ${data.code}`); 65 + } 66 + }); 67 + 68 + const child = await command.spawn(); 69 + return child; 70 + } 71 + 72 + async function start(): Promise<void> { 73 + try { 74 + setStatus("Starting p2pds..."); 75 + sidecarProcess = await spawnSidecar(); 76 + 77 + setStatus("Waiting for server..."); 78 + await waitForServer(); 79 + 80 + setStatus("Redirecting to dashboard..."); 81 + window.location.href = DASHBOARD_URL; 82 + } catch (err) { 83 + const message = err instanceof Error ? err.message : String(err); 84 + showError(message); 85 + } 86 + } 87 + 88 + // Clean up sidecar on window unload 89 + window.addEventListener("beforeunload", () => { 90 + if (sidecarProcess) { 91 + sidecarProcess.kill().catch(console.error); 92 + } 93 + }); 94 + 95 + // Retry button handler 96 + document.getElementById("retry-btn")?.addEventListener("click", () => { 97 + const loading = document.getElementById("loading"); 98 + const error = document.getElementById("error"); 99 + if (loading) loading.hidden = false; 100 + if (error) error.hidden = true; 101 + start(); 102 + }); 103 + 104 + start();
+69
apps/desktop/src/styles.css
··· 1 + * { 2 + margin: 0; 3 + padding: 0; 4 + box-sizing: border-box; 5 + } 6 + 7 + body { 8 + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, 9 + "Helvetica Neue", Arial, sans-serif; 10 + background: #1a1a2e; 11 + color: #e0e0e0; 12 + display: flex; 13 + align-items: center; 14 + justify-content: center; 15 + min-height: 100vh; 16 + } 17 + 18 + #loading, 19 + #error { 20 + text-align: center; 21 + padding: 2rem; 22 + } 23 + 24 + .spinner { 25 + width: 48px; 26 + height: 48px; 27 + border: 4px solid #333; 28 + border-top-color: #6c63ff; 29 + border-radius: 50%; 30 + animation: spin 0.8s linear infinite; 31 + margin: 0 auto 1.5rem; 32 + } 33 + 34 + @keyframes spin { 35 + to { 36 + transform: rotate(360deg); 37 + } 38 + } 39 + 40 + #status { 41 + font-size: 1.1rem; 42 + color: #aaa; 43 + } 44 + 45 + #error h2 { 46 + color: #ff6b6b; 47 + margin-bottom: 0.75rem; 48 + } 49 + 50 + #error-message { 51 + color: #ccc; 52 + margin-bottom: 1.5rem; 53 + max-width: 400px; 54 + } 55 + 56 + #retry-btn { 57 + background: #6c63ff; 58 + color: #fff; 59 + border: none; 60 + padding: 0.6rem 1.5rem; 61 + border-radius: 6px; 62 + font-size: 1rem; 63 + cursor: pointer; 64 + transition: background 0.2s; 65 + } 66 + 67 + #retry-btn:hover { 68 + background: #5a52d5; 69 + }
+12
apps/desktop/tsconfig.json
··· 1 + { 2 + "compilerOptions": { 3 + "target": "ES2022", 4 + "module": "ESNext", 5 + "moduleResolution": "bundler", 6 + "lib": ["ES2022", "DOM"], 7 + "strict": true, 8 + "esModuleInterop": true, 9 + "skipLibCheck": true 10 + }, 11 + "include": ["src"] 12 + }
+33
lexicons/org/p2pds/peer.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "org.p2pds.peer", 4 + "defs": { 5 + "main": { 6 + "type": "record", 7 + "description": "Binds an atproto DID to a libp2p PeerID and its multiaddrs. Published in the user's own repo at rkey 'self'.", 8 + "key": "literal:self", 9 + "record": { 10 + "type": "object", 11 + "required": ["peerId", "multiaddrs", "createdAt"], 12 + "properties": { 13 + "peerId": { 14 + "type": "string", 15 + "description": "The libp2p PeerID of the node running p2pds for this account." 16 + }, 17 + "multiaddrs": { 18 + "type": "array", 19 + "description": "Multiaddr strings where this peer can be reached.", 20 + "items": { 21 + "type": "string" 22 + } 23 + }, 24 + "createdAt": { 25 + "type": "string", 26 + "format": "datetime", 27 + "description": "Timestamp when this record was created or last updated." 28 + } 29 + } 30 + } 31 + } 32 + } 33 + }
+43
lexicons/org/p2pds/replication/offer.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "org.p2pds.replication.offer", 4 + "defs": { 5 + "main": { 6 + "type": "record", 7 + "description": "Declares willingness to replicate a specific DID's data. Published in the user's own repo. Mutual offers between two accounts form a replication agreement.", 8 + "key": "any", 9 + "record": { 10 + "type": "object", 11 + "required": ["subject", "minCopies", "intervalSec", "priority", "createdAt"], 12 + "properties": { 13 + "subject": { 14 + "type": "string", 15 + "format": "did", 16 + "description": "The DID whose data this account is willing to replicate." 17 + }, 18 + "minCopies": { 19 + "type": "integer", 20 + "minimum": 1, 21 + "description": "Desired minimum number of redundant copies (default 2)." 22 + }, 23 + "intervalSec": { 24 + "type": "integer", 25 + "minimum": 1, 26 + "description": "Desired sync frequency in seconds (default 600)." 27 + }, 28 + "priority": { 29 + "type": "integer", 30 + "minimum": 0, 31 + "maximum": 100, 32 + "description": "Priority level from 0-100 (default 50). Higher values indicate higher importance." 33 + }, 34 + "createdAt": { 35 + "type": "string", 36 + "format": "datetime", 37 + "description": "Timestamp when this offer was created." 38 + } 39 + } 40 + } 41 + } 42 + } 43 + }
+2 -1
package.json
··· 45 45 "typescript": "^5.9.3", 46 46 "vitest": "^3.0.0" 47 47 }, 48 - "license": "MIT" 48 + "license": "MIT", 49 + "workspaces": ["apps/*"] 49 50 }
+1 -10
src/config.ts
··· 24 24 POLICY_FILE?: string; 25 25 FIREHOSE_URL: string; 26 26 FIREHOSE_ENABLED: boolean; 27 - /** Node's did:web identity, derived from PDS_HOSTNAME. Always present. */ 28 - NODE_DID: string; 29 - /** DIDs authorized to manage this node (parsed from comma-separated env var). */ 30 - NODE_MANAGERS: string[]; 31 - /** Optional human-readable node name. */ 32 - NODE_NAME?: string; 33 27 /** Whether rate limiting is enabled (default true). */ 34 28 RATE_LIMIT_ENABLED: boolean; 35 29 /** Per-pool rate limit overrides (requests per minute). */ ··· 86 80 * Optionally loads a .env file first. 87 81 * 88 82 * Social account fields (DID, HANDLE, SIGNING_KEY, SIGNING_KEY_PUBLIC) are optional. 89 - * When omitted, the node runs as replication-only with its own did:web identity. 83 + * When omitted, the node runs as replication-only. 90 84 */ 91 85 export function loadConfig(envPath?: string): Config { 92 86 // Load .env file if it exists ··· 126 120 POLICY_FILE: process.env.POLICY_FILE || undefined, 127 121 FIREHOSE_URL: process.env.FIREHOSE_URL ?? "wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos", 128 122 FIREHOSE_ENABLED: process.env.FIREHOSE_ENABLED !== "false", 129 - NODE_DID: `did:web:${pdsHostname.replace(/:/g, "%3A")}`, 130 - NODE_MANAGERS: (process.env.NODE_MANAGERS ?? "").split(",").map(s => s.trim()).filter(Boolean), 131 - NODE_NAME: process.env.NODE_NAME || undefined, 132 123 RATE_LIMIT_ENABLED: process.env.RATE_LIMIT_ENABLED !== "false", 133 124 RATE_LIMIT_READ_PER_MIN: parseInt(process.env.RATE_LIMIT_READ_PER_MIN ?? "300", 10), 134 125 RATE_LIMIT_SYNC_PER_MIN: parseInt(process.env.RATE_LIMIT_SYNC_PER_MIN ?? "30", 10),
+29 -58
src/index.ts
··· 25 25 import type { StorageChallenge } from "./replication/challenge-response/types.js"; 26 26 import { MAX_RECORD_PATHS, MAX_BLOCK_CIDS } from "./replication/challenge-response/types.js"; 27 27 import { generateMstProof } from "./replication/mst-proof.js"; 28 - import { generateNodeDidDocument } from "./node-identity.js"; 29 28 30 29 const VERSION = "0.1.0"; 31 - 32 - /** Node identity options passed from server.ts */ 33 - export interface NodeIdentityOpts { 34 - nodeDid: string; 35 - nodePublicKeyMultibase: string; 36 - nodeRepoManager: RepoManager; 37 - /** Social account repo manager (undefined when no social account configured). */ 38 - repoManager?: RepoManager; 39 - } 40 30 41 31 /** 42 32 * Create the Hono app with all routes. ··· 50 40 blobStore?: BlobStore, 51 41 replicationManager?: ReplicationManager, 52 42 replicatedRepoReader?: ReplicatedRepoReader, 53 - nodeOpts?: NodeIdentityOpts, 43 + repoManager?: RepoManager, 54 44 rateLimiter?: RateLimiter, 55 45 ) { 56 - const nodeDid = nodeOpts?.nodeDid ?? config.NODE_DID; 57 - const nodePublicKeyMultibase = nodeOpts?.nodePublicKeyMultibase ?? ""; 58 - const nodeRepoManager = nodeOpts?.nodeRepoManager; 59 - // Social account repo manager (may be undefined for replication-only nodes) 60 - const socialRepoManager = nodeOpts?.repoManager; 61 - // Primary repo manager: social if available, else node 62 - const primaryRepoManager = socialRepoManager ?? nodeRepoManager; 46 + const configDid = config.DID ?? ""; 63 47 64 48 const app = new Hono<{ Bindings: Config }>(); 65 49 ··· 177 161 // Default 1MB JSON body limit for all other POST endpoints 178 162 app.post("/xrpc/*", jsonBodyLimit()); 179 163 180 - // DID document for did:web resolution — serves the node's DID document 181 - app.get("/.well-known/did.json", (c) => { 182 - const didDocument = generateNodeDidDocument( 183 - nodeDid, 184 - nodePublicKeyMultibase, 185 - config.PDS_HOSTNAME, 186 - ); 187 - return c.json(didDocument); 188 - }); 189 - 190 164 // Handle verification for AT Protocol 191 - // If the hostname matches the handle, return the social DID (for did:web handle resolution). 192 - // Also resolves the hostname itself to the node DID. 193 165 app.get("/.well-known/atproto-did", (c) => { 194 - if (config.HANDLE && config.DID && config.HANDLE === config.PDS_HOSTNAME) { 166 + if (config.DID) { 195 167 return new Response(config.DID, { 196 168 headers: { "Content-Type": "text/plain" }, 197 169 }); 198 170 } 199 - // Resolve hostname to node DID 200 - return new Response(nodeDid, { 201 - headers: { "Content-Type": "text/plain" }, 202 - }); 171 + return c.json( 172 + { error: "NotConfigured", message: "No DID configured" }, 173 + 404, 174 + ); 203 175 }); 204 176 205 177 // ============================================ ··· 216 188 } 217 189 218 190 // 2. Fallback: SQLite blocks table 219 - if (!bytes && primaryRepoManager) { 191 + if (!bytes && repoManager) { 220 192 try { 221 193 const { CID } = await import("@atproto/lex-data"); 222 194 const cid = CID.parse(cidStr); 223 - bytes = await primaryRepoManager.storage.getBytes(cid); 195 + bytes = await repoManager.storage.getBytes(cid); 224 196 } catch { 225 197 // CID parse failure or storage error 226 198 } ··· 253 225 // Health check 254 226 app.get("/xrpc/_health", (c) => { 255 227 try { 256 - if (primaryRepoManager) { 257 - primaryRepoManager.healthCheck(); 228 + if (repoManager) { 229 + repoManager.healthCheck(); 258 230 } 259 231 const health: Record<string, unknown> = { 260 232 status: "ok", ··· 277 249 app.get("/", (c) => { 278 250 const handleHtml = config.HANDLE 279 251 ? `<div class="handle"><a href="https://bsky.app/profile/${config.HANDLE}" target="_blank">@${config.HANDLE}</a></div>` 280 - : `<div class="handle">${nodeDid}</div>`; 252 + : config.DID 253 + ? `<div class="handle">${config.DID}</div>` 254 + : ""; 281 255 const html = `<!DOCTYPE html> 282 256 <html lang="en"> 283 257 <head> ··· 320 294 // ============================================ 321 295 322 296 // Only register sync endpoints if we have a repo manager 323 - if (primaryRepoManager) { 324 - const rm = primaryRepoManager; 297 + if (repoManager) { 298 + const rm = repoManager; 325 299 app.get("/xrpc/com.atproto.sync.getRepo", (c) => 326 300 sync.getRepo(c, rm, blockStore, replicationManager?.getSyncStorage()), 327 301 ); ··· 351 325 // ============================================ 352 326 // Repository operations 353 327 // ============================================ 354 - if (primaryRepoManager) { 355 - const rm = primaryRepoManager; 328 + if (repoManager) { 329 + const rm = repoManager; 356 330 357 331 app.use("/xrpc/com.atproto.repo.describeRepo", async (c, next) => { 358 332 const requestedRepo = c.req.query("repo"); 359 - if (!requestedRepo || requestedRepo === config.DID || requestedRepo === nodeDid) { 333 + if (!requestedRepo || requestedRepo === config.DID) { 360 334 return repo.describeRepo(c, rm); 361 335 } 362 336 if ( ··· 371 345 372 346 app.use("/xrpc/com.atproto.repo.getRecord", async (c, next) => { 373 347 const requestedRepo = c.req.query("repo"); 374 - if (!requestedRepo || requestedRepo === config.DID || requestedRepo === nodeDid) { 348 + if (!requestedRepo || requestedRepo === config.DID) { 375 349 return repo.getRecord(c, rm); 376 350 } 377 351 if ( ··· 386 360 387 361 app.use("/xrpc/com.atproto.repo.listRecords", async (c, next) => { 388 362 const requestedRepo = c.req.query("repo"); 389 - if (!requestedRepo || requestedRepo === config.DID || requestedRepo === nodeDid) { 363 + if (!requestedRepo || requestedRepo === config.DID) { 390 364 return repo.listRecords(c, rm); 391 365 } 392 366 if ( ··· 434 408 if (config.HANDLE && config.DID && handle === config.HANDLE) { 435 409 return c.json({ did: config.DID }); 436 410 } 437 - if (handle === config.PDS_HOSTNAME) { 438 - return c.json({ did: nodeDid }); 439 - } 440 411 return c.json( 441 412 { error: "HandleNotFound", message: `Handle not found: ${handle}` }, 442 413 404, ··· 444 415 }); 445 416 446 417 // ============================================ 447 - // Session management (requires social account) 418 + // Session management (requires repo manager) 448 419 // ============================================ 449 - if (primaryRepoManager) { 450 - const rm = primaryRepoManager; 420 + if (repoManager) { 421 + const rm = repoManager; 451 422 app.post("/xrpc/com.atproto.server.createSession", (c) => 452 423 server.createSession(c, rm), 453 424 ); ··· 569 540 570 541 const challenge = (await c.req.json()) as StorageChallenge; 571 542 572 - // Validate challenge is targeted at this node 573 - if (challenge.targetDid !== nodeDid) { 543 + // Validate challenge is targeted at this node's configured DID 544 + if (challenge.targetDid !== configDid) { 574 545 return c.json( 575 546 { error: "InvalidChallenge", message: "Challenge is not targeted at this node" }, 576 547 400, ··· 600 571 const response = await respondToChallenge( 601 572 challenge, 602 573 blockStore, 603 - nodeDid, 574 + configDid, 604 575 ); 605 576 return c.json(serializeResponse(response)); 606 577 }); ··· 710 681 // Admin monitoring 711 682 // ============================================ 712 683 app.get("/xrpc/org.p2pds.admin.getOverview", requireAuth, (c) => 713 - admin.getOverview(c, nodeDid, networkService, replicationManager), 684 + admin.getOverview(c, configDid, networkService, replicationManager), 714 685 ); 715 686 app.get("/xrpc/org.p2pds.admin.getDidStatus", requireAuth, (c) => 716 687 admin.getDidStatus(c, replicationManager), ··· 725 696 admin.getSyncHistory(c, replicationManager), 726 697 ); 727 698 app.post("/xrpc/org.p2pds.admin.addDid", requireAuth, (c) => 728 - admin.addDid(c, nodeDid, replicationManager), 699 + admin.addDid(c, configDid, replicationManager), 729 700 ); 730 701 app.post("/xrpc/org.p2pds.admin.removeDid", requireAuth, (c) => 731 702 admin.removeDid(c, replicationManager),
+1 -3
src/ipfs.test.ts
··· 43 43 REPLICATE_DIDS: [], 44 44 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 45 45 FIREHOSE_ENABLED: false, 46 - NODE_DID: "did:web:test.example.com", 47 - NODE_MANAGERS: [], 48 46 RATE_LIMIT_ENABLED: false, 49 47 RATE_LIMIT_READ_PER_MIN: 300, 50 48 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 259 257 260 258 blobStore = new BlobStore(tmpDir, config.DID!); 261 259 262 - app = createApp(config, firehose, ipfsService, ipfsService, blobStore, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 260 + app = createApp(config, firehose, ipfsService, ipfsService, blobStore, undefined, undefined, repoManager); 263 261 }); 264 262 265 263 afterEach(async () => {
+250
src/lexicons.test.ts
··· 1 + import { describe, it, expect, beforeAll } from "vitest"; 2 + import { 3 + loadP2pdsLexicons, 4 + getLexicon, 5 + validateP2pdsRecord, 6 + } from "./lexicons.js"; 7 + 8 + describe("lexicons", () => { 9 + beforeAll(() => { 10 + loadP2pdsLexicons(); 11 + }); 12 + 13 + describe("loadP2pdsLexicons", () => { 14 + it("loads peer and offer lexicons", () => { 15 + const peer = getLexicon("org.p2pds.peer"); 16 + const offer = getLexicon("org.p2pds.replication.offer"); 17 + 18 + expect(peer).toBeDefined(); 19 + expect(peer!.id).toBe("org.p2pds.peer"); 20 + expect(peer!.lexicon).toBe(1); 21 + expect(peer!.defs.main.type).toBe("record"); 22 + 23 + expect(offer).toBeDefined(); 24 + expect(offer!.id).toBe("org.p2pds.replication.offer"); 25 + expect(offer!.defs.main.type).toBe("record"); 26 + }); 27 + 28 + it("peer lexicon has correct schema", () => { 29 + const peer = getLexicon("org.p2pds.peer")!; 30 + const props = peer.defs.main.record.properties; 31 + const required = peer.defs.main.record.required; 32 + 33 + expect(required).toContain("peerId"); 34 + expect(required).toContain("multiaddrs"); 35 + expect(required).toContain("createdAt"); 36 + 37 + expect(props.peerId.type).toBe("string"); 38 + expect(props.multiaddrs.type).toBe("array"); 39 + expect(props.createdAt.type).toBe("string"); 40 + expect(props.createdAt.format).toBe("datetime"); 41 + }); 42 + 43 + it("offer lexicon has correct schema", () => { 44 + const offer = getLexicon("org.p2pds.replication.offer")!; 45 + const props = offer.defs.main.record.properties; 46 + const required = offer.defs.main.record.required; 47 + 48 + expect(required).toContain("subject"); 49 + expect(required).toContain("minCopies"); 50 + expect(required).toContain("intervalSec"); 51 + expect(required).toContain("priority"); 52 + expect(required).toContain("createdAt"); 53 + 54 + expect(props.subject.type).toBe("string"); 55 + expect(props.subject.format).toBe("did"); 56 + expect(props.minCopies.type).toBe("integer"); 57 + expect(props.priority.maximum).toBe(100); 58 + }); 59 + }); 60 + 61 + describe("validateP2pdsRecord — org.p2pds.peer", () => { 62 + const validPeer = { 63 + $type: "org.p2pds.peer", 64 + peerId: "12D3KooWNvSZnPi3RrhrTwEY4LuuBeB6K6facKUCJcyWG1aoDd2p", 65 + multiaddrs: ["/ip4/127.0.0.1/tcp/4001"], 66 + createdAt: "2026-01-15T10:30:00.000Z", 67 + }; 68 + 69 + it("accepts a valid peer record", () => { 70 + expect(() => 71 + validateP2pdsRecord("org.p2pds.peer", validPeer), 72 + ).not.toThrow(); 73 + }); 74 + 75 + it("rejects missing peerId", () => { 76 + const { peerId: _, ...bad } = validPeer; 77 + expect(() => 78 + validateP2pdsRecord("org.p2pds.peer", bad), 79 + ).toThrow("Missing required field: peerId"); 80 + }); 81 + 82 + it("rejects missing multiaddrs", () => { 83 + const { multiaddrs: _, ...bad } = validPeer; 84 + expect(() => 85 + validateP2pdsRecord("org.p2pds.peer", bad), 86 + ).toThrow("Missing required field: multiaddrs"); 87 + }); 88 + 89 + it("rejects missing createdAt", () => { 90 + const { createdAt: _, ...bad } = validPeer; 91 + expect(() => 92 + validateP2pdsRecord("org.p2pds.peer", bad), 93 + ).toThrow("Missing required field: createdAt"); 94 + }); 95 + 96 + it("rejects non-string peerId", () => { 97 + expect(() => 98 + validateP2pdsRecord("org.p2pds.peer", { 99 + ...validPeer, 100 + peerId: 123, 101 + }), 102 + ).toThrow("must be a string"); 103 + }); 104 + 105 + it("rejects non-array multiaddrs", () => { 106 + expect(() => 107 + validateP2pdsRecord("org.p2pds.peer", { 108 + ...validPeer, 109 + multiaddrs: "not-an-array", 110 + }), 111 + ).toThrow("must be an array"); 112 + }); 113 + 114 + it("rejects invalid datetime", () => { 115 + expect(() => 116 + validateP2pdsRecord("org.p2pds.peer", { 117 + ...validPeer, 118 + createdAt: "not-a-date", 119 + }), 120 + ).toThrow("valid datetime"); 121 + }); 122 + 123 + it("accepts empty multiaddrs array", () => { 124 + expect(() => 125 + validateP2pdsRecord("org.p2pds.peer", { 126 + ...validPeer, 127 + multiaddrs: [], 128 + }), 129 + ).not.toThrow(); 130 + }); 131 + 132 + it("rejects non-string items in multiaddrs", () => { 133 + expect(() => 134 + validateP2pdsRecord("org.p2pds.peer", { 135 + ...validPeer, 136 + multiaddrs: [123], 137 + }), 138 + ).toThrow("must be a string"); 139 + }); 140 + }); 141 + 142 + describe("validateP2pdsRecord — org.p2pds.replication.offer", () => { 143 + const validOffer = { 144 + $type: "org.p2pds.replication.offer", 145 + subject: "did:plc:abc123", 146 + minCopies: 2, 147 + intervalSec: 600, 148 + priority: 50, 149 + createdAt: "2026-01-15T10:30:00.000Z", 150 + }; 151 + 152 + it("accepts a valid offer record", () => { 153 + expect(() => 154 + validateP2pdsRecord("org.p2pds.replication.offer", validOffer), 155 + ).not.toThrow(); 156 + }); 157 + 158 + it("rejects missing subject", () => { 159 + const { subject: _, ...bad } = validOffer; 160 + expect(() => 161 + validateP2pdsRecord("org.p2pds.replication.offer", bad), 162 + ).toThrow("Missing required field: subject"); 163 + }); 164 + 165 + it("rejects non-DID subject", () => { 166 + expect(() => 167 + validateP2pdsRecord("org.p2pds.replication.offer", { 168 + ...validOffer, 169 + subject: "not-a-did", 170 + }), 171 + ).toThrow("must be a valid DID"); 172 + }); 173 + 174 + it("rejects non-integer minCopies", () => { 175 + expect(() => 176 + validateP2pdsRecord("org.p2pds.replication.offer", { 177 + ...validOffer, 178 + minCopies: 1.5, 179 + }), 180 + ).toThrow("must be an integer"); 181 + }); 182 + 183 + it("rejects minCopies below minimum", () => { 184 + expect(() => 185 + validateP2pdsRecord("org.p2pds.replication.offer", { 186 + ...validOffer, 187 + minCopies: 0, 188 + }), 189 + ).toThrow(">= 1"); 190 + }); 191 + 192 + it("rejects priority above maximum", () => { 193 + expect(() => 194 + validateP2pdsRecord("org.p2pds.replication.offer", { 195 + ...validOffer, 196 + priority: 101, 197 + }), 198 + ).toThrow("<= 100"); 199 + }); 200 + 201 + it("rejects priority below minimum", () => { 202 + expect(() => 203 + validateP2pdsRecord("org.p2pds.replication.offer", { 204 + ...validOffer, 205 + priority: -1, 206 + }), 207 + ).toThrow(">= 0"); 208 + }); 209 + 210 + it("accepts boundary values", () => { 211 + expect(() => 212 + validateP2pdsRecord("org.p2pds.replication.offer", { 213 + ...validOffer, 214 + minCopies: 1, 215 + priority: 0, 216 + intervalSec: 1, 217 + }), 218 + ).not.toThrow(); 219 + 220 + expect(() => 221 + validateP2pdsRecord("org.p2pds.replication.offer", { 222 + ...validOffer, 223 + priority: 100, 224 + }), 225 + ).not.toThrow(); 226 + }); 227 + }); 228 + 229 + describe("validateP2pdsRecord — unknown NSID", () => { 230 + it("throws for unknown NSID", () => { 231 + expect(() => 232 + validateP2pdsRecord("org.p2pds.unknown", {}), 233 + ).toThrow("No lexicon loaded for NSID"); 234 + }); 235 + }); 236 + 237 + describe("validateP2pdsRecord — non-object", () => { 238 + it("rejects null", () => { 239 + expect(() => 240 + validateP2pdsRecord("org.p2pds.peer", null), 241 + ).toThrow("must be an object"); 242 + }); 243 + 244 + it("rejects string", () => { 245 + expect(() => 246 + validateP2pdsRecord("org.p2pds.peer", "string"), 247 + ).toThrow("must be an object"); 248 + }); 249 + }); 250 + });
+168
src/lexicons.ts
··· 1 + /** 2 + * Lexicon loader and validator for org.p2pds.* record types. 3 + * 4 + * Loads lexicon JSON schemas from the lexicons/ directory and provides 5 + * validation for custom p2pds record types (peer, offer). 6 + */ 7 + 8 + import { readFileSync } from "node:fs"; 9 + import { resolve, dirname } from "node:path"; 10 + import { fileURLToPath } from "node:url"; 11 + 12 + /** A parsed lexicon document. */ 13 + export interface LexiconDoc { 14 + lexicon: number; 15 + id: string; 16 + defs: { 17 + main: { 18 + type: string; 19 + description?: string; 20 + key?: string; 21 + record: { 22 + type: "object"; 23 + required: string[]; 24 + properties: Record<string, LexiconProperty>; 25 + }; 26 + }; 27 + }; 28 + } 29 + 30 + /** A property definition within a lexicon record schema. */ 31 + export interface LexiconProperty { 32 + type: string; 33 + description?: string; 34 + format?: string; 35 + minimum?: number; 36 + maximum?: number; 37 + items?: { type: string }; 38 + } 39 + 40 + /** Loaded lexicons keyed by NSID. */ 41 + const loadedLexicons = new Map<string, LexiconDoc>(); 42 + 43 + /** 44 + * Get the path to the lexicons directory relative to this module. 45 + */ 46 + function getLexiconsDir(): string { 47 + const thisFile = fileURLToPath(import.meta.url); 48 + return resolve(dirname(thisFile), "..", "lexicons"); 49 + } 50 + 51 + /** 52 + * Load a lexicon JSON file and register it. 53 + */ 54 + function loadLexicon(relativePath: string): LexiconDoc { 55 + const fullPath = resolve(getLexiconsDir(), relativePath); 56 + const content = readFileSync(fullPath, "utf-8"); 57 + const doc = JSON.parse(content) as LexiconDoc; 58 + 59 + if (doc.lexicon !== 1) { 60 + throw new Error(`Unsupported lexicon version: ${doc.lexicon}`); 61 + } 62 + 63 + loadedLexicons.set(doc.id, doc); 64 + return doc; 65 + } 66 + 67 + /** 68 + * Load all p2pds lexicon schemas. 69 + */ 70 + export function loadP2pdsLexicons(): Map<string, LexiconDoc> { 71 + loadLexicon("org/p2pds/peer.json"); 72 + loadLexicon("org/p2pds/replication/offer.json"); 73 + return loadedLexicons; 74 + } 75 + 76 + /** 77 + * Get a loaded lexicon by NSID. 78 + */ 79 + export function getLexicon(nsid: string): LexiconDoc | undefined { 80 + return loadedLexicons.get(nsid); 81 + } 82 + 83 + /** 84 + * Validate a record against its lexicon schema. 85 + * Throws if the record is invalid. 86 + */ 87 + export function validateP2pdsRecord(nsid: string, record: unknown): void { 88 + const lexicon = loadedLexicons.get(nsid); 89 + if (!lexicon) { 90 + throw new Error(`No lexicon loaded for NSID: ${nsid}`); 91 + } 92 + 93 + if (typeof record !== "object" || record === null) { 94 + throw new Error(`Record must be an object`); 95 + } 96 + 97 + const rec = record as Record<string, unknown>; 98 + const schema = lexicon.defs.main.record; 99 + 100 + // Check required fields 101 + for (const field of schema.required) { 102 + if (!(field in rec) || rec[field] === undefined || rec[field] === null) { 103 + throw new Error(`Missing required field: ${field}`); 104 + } 105 + } 106 + 107 + // Validate property types 108 + for (const [key, prop] of Object.entries(schema.properties)) { 109 + const value = rec[key]; 110 + if (value === undefined || value === null) continue; 111 + 112 + validateProperty(key, value, prop); 113 + } 114 + } 115 + 116 + /** 117 + * Validate a single property value against its schema definition. 118 + */ 119 + function validateProperty( 120 + key: string, 121 + value: unknown, 122 + prop: LexiconProperty, 123 + ): void { 124 + switch (prop.type) { 125 + case "string": { 126 + if (typeof value !== "string") { 127 + throw new Error(`Field '${key}' must be a string, got ${typeof value}`); 128 + } 129 + if (prop.format === "datetime") { 130 + const d = new Date(value); 131 + if (isNaN(d.getTime())) { 132 + throw new Error(`Field '${key}' must be a valid datetime string`); 133 + } 134 + } 135 + if (prop.format === "did") { 136 + if (!value.startsWith("did:")) { 137 + throw new Error(`Field '${key}' must be a valid DID`); 138 + } 139 + } 140 + break; 141 + } 142 + case "integer": { 143 + if (typeof value !== "number" || !Number.isInteger(value)) { 144 + throw new Error(`Field '${key}' must be an integer, got ${typeof value}`); 145 + } 146 + if (prop.minimum !== undefined && value < prop.minimum) { 147 + throw new Error(`Field '${key}' must be >= ${prop.minimum}`); 148 + } 149 + if (prop.maximum !== undefined && value > prop.maximum) { 150 + throw new Error(`Field '${key}' must be <= ${prop.maximum}`); 151 + } 152 + break; 153 + } 154 + case "array": { 155 + if (!Array.isArray(value)) { 156 + throw new Error(`Field '${key}' must be an array, got ${typeof value}`); 157 + } 158 + if (prop.items?.type === "string") { 159 + for (let i = 0; i < value.length; i++) { 160 + if (typeof value[i] !== "string") { 161 + throw new Error(`Field '${key}[${i}]' must be a string`); 162 + } 163 + } 164 + } 165 + break; 166 + } 167 + } 168 + }
+5 -22
src/middleware/auth.ts
··· 12 12 auth: AuthInfo; 13 13 }; 14 14 15 - /** 16 - * Build the set of DIDs accepted for authentication. 17 - * Includes: node DID, social DID (if configured), and any manager DIDs. 18 - */ 19 - function getAcceptedDids(env: Config): Set<string> { 20 - const accepted = new Set<string>(); 21 - accepted.add(env.NODE_DID); 22 - if (env.DID) { 23 - accepted.add(env.DID); 24 - } 25 - for (const managerDid of env.NODE_MANAGERS) { 26 - accepted.add(managerDid); 27 - } 28 - return accepted; 29 - } 30 - 31 15 export async function requireAuth( 32 16 c: Context<{ Bindings: Config; Variables: AuthVariables }>, 33 17 next: Next, ··· 59 43 60 44 // Try static token first 61 45 if (token === c.env.AUTH_TOKEN) { 62 - c.set("auth", { did: c.env.NODE_DID, scope: "com.atproto.access" }); 46 + c.set("auth", { did: c.env.DID ?? "", scope: "com.atproto.access" }); 63 47 return next(); 64 48 } 65 49 66 50 const serviceDid = `did:web:${c.env.PDS_HOSTNAME}`; 67 - const acceptedDids = getAcceptedDids(c.env); 68 51 69 52 // Try session JWT verification (HS256, signed with JWT_SECRET) 70 53 try { ··· 74 57 serviceDid, 75 58 ); 76 59 77 - if (!payload.sub || !acceptedDids.has(payload.sub)) { 60 + if (!payload.sub || payload.sub !== c.env.DID) { 78 61 return c.json( 79 62 { 80 63 error: "AuthenticationRequired", ··· 100 83 } 101 84 102 85 // Try service JWT verification (ES256K, signed with our signing key) 103 - if (c.env.SIGNING_KEY) { 86 + if (c.env.SIGNING_KEY && c.env.DID) { 104 87 try { 105 88 const payload = await verifyServiceJwt( 106 89 token, 107 90 c.env.SIGNING_KEY, 108 91 serviceDid, 109 - c.env.DID ?? c.env.NODE_DID, 92 + c.env.DID, 110 93 ); 111 94 112 - if (acceptedDids.has(payload.iss)) { 95 + if (payload.iss === c.env.DID) { 113 96 c.set("auth", { did: payload.iss, scope: payload.lxm || "atproto" }); 114 97 return next(); 115 98 }
-124
src/node-identity.test.ts
··· 1 - import { describe, it, expect, beforeEach, afterEach } from "vitest"; 2 - import { mkdtempSync, rmSync, readFileSync, existsSync } from "node:fs"; 3 - import { join } from "node:path"; 4 - import { tmpdir } from "node:os"; 5 - import { 6 - loadOrCreateNodeIdentity, 7 - getNodeDid, 8 - getPublicKeyMultibase, 9 - generateNodeDidDocument, 10 - } from "./node-identity.js"; 11 - 12 - describe("node-identity", () => { 13 - let tmpDir: string; 14 - 15 - beforeEach(() => { 16 - tmpDir = mkdtempSync(join(tmpdir(), "node-identity-test-")); 17 - }); 18 - 19 - afterEach(() => { 20 - try { 21 - rmSync(tmpDir, { recursive: true, force: true }); 22 - } catch {} 23 - }); 24 - 25 - describe("loadOrCreateNodeIdentity", () => { 26 - it("creates a new keypair when none exists", async () => { 27 - const { keypair, exported } = await loadOrCreateNodeIdentity(tmpDir); 28 - 29 - expect(keypair).toBeDefined(); 30 - expect(exported).toMatch(/^[0-9a-f]{64}$/); 31 - 32 - // Key file should exist 33 - const keyPath = join(tmpDir, "node-signing.key"); 34 - expect(existsSync(keyPath)).toBe(true); 35 - 36 - // File content should match exported hex 37 - const fileContent = readFileSync(keyPath, "utf-8").trim(); 38 - expect(fileContent).toBe(exported); 39 - }); 40 - 41 - it("loads existing keypair from file", async () => { 42 - // Create first 43 - const { exported: hex1 } = await loadOrCreateNodeIdentity(tmpDir); 44 - 45 - // Load again — should get same key 46 - const { exported: hex2 } = await loadOrCreateNodeIdentity(tmpDir); 47 - 48 - expect(hex2).toBe(hex1); 49 - }); 50 - 51 - it("produces a keypair that can sign and verify", async () => { 52 - const { keypair } = await loadOrCreateNodeIdentity(tmpDir); 53 - 54 - expect(keypair.jwtAlg).toBe("ES256K"); 55 - expect(keypair.publicKeyBytes()).toBeInstanceOf(Uint8Array); 56 - expect(keypair.publicKeyBytes().length).toBeGreaterThan(0); 57 - }); 58 - }); 59 - 60 - describe("getNodeDid", () => { 61 - it("returns did:web for simple hostname", () => { 62 - expect(getNodeDid("example.com")).toBe("did:web:example.com"); 63 - }); 64 - 65 - it("encodes port colons as %3A", () => { 66 - expect(getNodeDid("localhost:3000")).toBe("did:web:localhost%3A3000"); 67 - }); 68 - 69 - it("handles hostname with multiple colons", () => { 70 - expect(getNodeDid("host:1234:extra")).toBe( 71 - "did:web:host%3A1234%3Aextra", 72 - ); 73 - }); 74 - }); 75 - 76 - describe("getPublicKeyMultibase", () => { 77 - it("returns a multibase-encoded public key", async () => { 78 - const { keypair } = await loadOrCreateNodeIdentity(tmpDir); 79 - const multibase = getPublicKeyMultibase(keypair); 80 - 81 - expect(multibase).toMatch(/^z/); // multibase prefix for base58btc 82 - expect(multibase.length).toBeGreaterThan(10); 83 - }); 84 - }); 85 - 86 - describe("generateNodeDidDocument", () => { 87 - it("generates a valid DID document structure", () => { 88 - const did = "did:web:example.com"; 89 - const publicKeyMultibase = "zQ3shP2mWsZYWgvZM9GJ3EvMfRXQJwuTh6BdXLvJB9gFhT3Lr"; 90 - const hostname = "example.com"; 91 - 92 - const doc = generateNodeDidDocument( 93 - did, 94 - publicKeyMultibase, 95 - hostname, 96 - ) as Record<string, unknown>; 97 - 98 - expect(doc["@context"]).toContain("https://www.w3.org/ns/did/v1"); 99 - expect(doc.id).toBe(did); 100 - 101 - const vm = (doc.verificationMethod as Array<Record<string, unknown>>)[0]!; 102 - expect(vm.id).toBe(`${did}#atproto`); 103 - expect(vm.type).toBe("Multikey"); 104 - expect(vm.controller).toBe(did); 105 - expect(vm.publicKeyMultibase).toBe(publicKeyMultibase); 106 - 107 - const svc = (doc.service as Array<Record<string, unknown>>)[0]!; 108 - expect(svc.id).toBe("#atproto_pds"); 109 - expect(svc.type).toBe("AtprotoPersonalDataServer"); 110 - expect(svc.serviceEndpoint).toBe("https://example.com"); 111 - }); 112 - 113 - it("uses hostname with port in service endpoint", () => { 114 - const doc = generateNodeDidDocument( 115 - "did:web:localhost%3A3000", 116 - "zTest", 117 - "localhost:3000", 118 - ) as Record<string, unknown>; 119 - 120 - const svc = (doc.service as Array<Record<string, unknown>>)[0]!; 121 - expect(svc.serviceEndpoint).toBe("https://localhost:3000"); 122 - }); 123 - }); 124 - });
-84
src/node-identity.ts
··· 1 - /** 2 - * Node identity management: keypair lifecycle, did:web derivation, DID document generation. 3 - * 4 - * The node has its own did:web identity, separate from any social account it may host. 5 - * The keypair is stored as a hex-encoded private key file in the data directory. 6 - */ 7 - 8 - import { readFileSync, writeFileSync, existsSync } from "node:fs"; 9 - import { resolve } from "node:path"; 10 - import { Secp256k1Keypair, formatMultikey } from "@atproto/crypto"; 11 - 12 - const KEY_FILENAME = "node-signing.key"; 13 - 14 - /** 15 - * Load or create the node's signing keypair. 16 - * Checks for `{dataDir}/node-signing.key` (hex string). 17 - * If missing, generates a new keypair, exports to hex, writes to file with mode 0o600. 18 - */ 19 - export async function loadOrCreateNodeIdentity( 20 - dataDir: string, 21 - ): Promise<{ keypair: Secp256k1Keypair; exported: string }> { 22 - const keyPath = resolve(dataDir, KEY_FILENAME); 23 - 24 - if (existsSync(keyPath)) { 25 - const hex = readFileSync(keyPath, "utf-8").trim(); 26 - const keypair = await Secp256k1Keypair.import(hex, { 27 - exportable: true, 28 - }); 29 - return { keypair, exported: hex }; 30 - } 31 - 32 - const keypair = await Secp256k1Keypair.create({ exportable: true }); 33 - const exported = Buffer.from(await keypair.export()).toString("hex"); 34 - writeFileSync(keyPath, exported + "\n", { mode: 0o600 }); 35 - return { keypair, exported }; 36 - } 37 - 38 - /** 39 - * Derive the node's did:web from its hostname. 40 - * Encodes `:` as `%3A` per the did:web spec (for ports). 41 - */ 42 - export function getNodeDid(hostname: string): string { 43 - return `did:web:${hostname.replace(/:/g, "%3A")}`; 44 - } 45 - 46 - /** 47 - * Get the public key multibase string for a keypair. 48 - */ 49 - export function getPublicKeyMultibase(keypair: Secp256k1Keypair): string { 50 - return formatMultikey(keypair.jwtAlg, keypair.publicKeyBytes()); 51 - } 52 - 53 - /** 54 - * Generate a DID document for the node's did:web identity. 55 - */ 56 - export function generateNodeDidDocument( 57 - did: string, 58 - publicKeyMultibase: string, 59 - hostname: string, 60 - ): object { 61 - return { 62 - "@context": [ 63 - "https://www.w3.org/ns/did/v1", 64 - "https://w3id.org/security/multikey/v1", 65 - "https://w3id.org/security/suites/secp256k1-2019/v1", 66 - ], 67 - id: did, 68 - verificationMethod: [ 69 - { 70 - id: `${did}#atproto`, 71 - type: "Multikey", 72 - controller: did, 73 - publicKeyMultibase, 74 - }, 75 - ], 76 - service: [ 77 - { 78 - id: "#atproto_pds", 79 - type: "AtprotoPersonalDataServer", 80 - serviceEndpoint: `https://${hostname}`, 81 - }, 82 - ], 83 - }; 84 - }
-2
src/replication/challenge-response/challenge-response.test.ts
··· 35 35 REPLICATE_DIDS: [], 36 36 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 37 37 FIREHOSE_ENABLED: false, 38 - NODE_DID: "did:web:test.example.com", 39 - NODE_MANAGERS: [], 40 38 RATE_LIMIT_ENABLED: false, 41 39 RATE_LIMIT_READ_PER_MIN: 300, 42 40 RATE_LIMIT_SYNC_PER_MIN: 30,
-2
src/replication/challenge-response/e2e-challenge.test.ts
··· 43 43 REPLICATE_DIDS: [], 44 44 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 45 45 FIREHOSE_ENABLED: false, 46 - NODE_DID: "did:web:test.example.com", 47 - NODE_MANAGERS: [], 48 46 RATE_LIMIT_ENABLED: false, 49 47 RATE_LIMIT_READ_PER_MIN: 300, 50 48 RATE_LIMIT_SYNC_PER_MIN: 30,
-2
src/replication/e2e-multi-node.test.ts
··· 48 48 REPLICATE_DIDS: [], 49 49 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 50 50 FIREHOSE_ENABLED: false, 51 - NODE_DID: "did:web:test.example.com", 52 - NODE_MANAGERS: [], 53 51 RATE_LIMIT_ENABLED: false, 54 52 RATE_LIMIT_READ_PER_MIN: 300, 55 53 RATE_LIMIT_SYNC_PER_MIN: 30,
-2
src/replication/firehose-incremental.test.ts
··· 62 62 REPLICATE_DIDS: replicateDids, 63 63 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 64 64 FIREHOSE_ENABLED: false, 65 - NODE_DID: "did:web:local.example.com", 66 - NODE_MANAGERS: [], 67 65 RATE_LIMIT_ENABLED: false, 68 66 RATE_LIMIT_READ_PER_MIN: 300, 69 67 RATE_LIMIT_SYNC_PER_MIN: 30,
-6
src/replication/gossipsub-notifications.test.ts
··· 297 297 REPLICATE_DIDS: ["did:plc:remote1", "did:plc:remote2"], 298 298 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 299 299 FIREHOSE_ENABLED: false, 300 - NODE_DID: "did:web:test.example.com", 301 - NODE_MANAGERS: [], 302 300 RATE_LIMIT_ENABLED: false, 303 301 RATE_LIMIT_READ_PER_MIN: 300, 304 302 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 365 363 REPLICATE_DIDS: ["did:plc:remote1"], 366 364 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 367 365 FIREHOSE_ENABLED: false, 368 - NODE_DID: "did:web:test.example.com", 369 - NODE_MANAGERS: [], 370 366 RATE_LIMIT_ENABLED: false, 371 367 RATE_LIMIT_READ_PER_MIN: 300, 372 368 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 455 451 REPLICATE_DIDS: ["did:plc:remote1"], 456 452 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 457 453 FIREHOSE_ENABLED: false, 458 - NODE_DID: "did:web:test.example.com", 459 - NODE_MANAGERS: [], 460 454 RATE_LIMIT_ENABLED: false, 461 455 RATE_LIMIT_READ_PER_MIN: 300, 462 456 RATE_LIMIT_SYNC_PER_MIN: 30,
-2
src/replication/mst-proof.test.ts
··· 28 28 REPLICATE_DIDS: [], 29 29 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 30 30 FIREHOSE_ENABLED: false, 31 - NODE_DID: "did:web:test.example.com", 32 - NODE_MANAGERS: [], 33 31 RATE_LIMIT_ENABLED: false, 34 32 RATE_LIMIT_READ_PER_MIN: 300, 35 33 RATE_LIMIT_SYNC_PER_MIN: 30,
-2
src/replication/offer-manager.test.ts
··· 30 30 REPLICATE_DIDS: [], 31 31 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 32 32 FIREHOSE_ENABLED: false, 33 - NODE_DID: "did:web:test.example.com", 34 - NODE_MANAGERS: [], 35 33 RATE_LIMIT_ENABLED: false, 36 34 RATE_LIMIT_READ_PER_MIN: 300, 37 35 RATE_LIMIT_SYNC_PER_MIN: 30,
-143
src/replication/peer-freshness.test.ts
··· 44 44 REPLICATE_DIDS: replicateDids, 45 45 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 46 46 FIREHOSE_ENABLED: false, 47 - NODE_DID: "did:web:test.example.com", 48 - NODE_MANAGERS: [], 49 47 RATE_LIMIT_ENABLED: false, 50 48 RATE_LIMIT_READ_PER_MIN: 300, 51 49 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 326 324 }); 327 325 328 326 // ============================================ 329 - // Republish on multiaddr change 330 - // ============================================ 331 - 332 - describe("republish on multiaddr change", () => { 333 - let tmpDir: string; 334 - 335 - beforeEach(() => { 336 - tmpDir = mkdtempSync(join(tmpdir(), "peer-republish-test-")); 337 - }); 338 - 339 - afterEach(() => { 340 - rmSync(tmpDir, { recursive: true, force: true }); 341 - }); 342 - 343 - it("republishes peer identity when multiaddrs change", async () => { 344 - const db = new Database(join(tmpDir, "test.db")); 345 - const config = testConfig(tmpDir, ["did:plc:remote1"]); 346 - 347 - const { RepoManager } = await import("../repo-manager.js"); 348 - const { ReplicationManager } = await import("./replication-manager.js"); 349 - const { DidResolver } = await import("../did-resolver.js"); 350 - 351 - const repoManager = new RepoManager(db, config); 352 - repoManager.init(); 353 - 354 - const mockNet = createMockNetworkService(); 355 - const mockBlocks = createMockBlockStore(); 356 - const didResolver = new DidResolver(); 357 - 358 - const manager = new ReplicationManager( 359 - db, config, repoManager, mockBlocks, mockNet, didResolver, 360 - ); 361 - 362 - try { 363 - await manager.init(); 364 - 365 - // After init(), publishPeerIdentity was called, storing lastPublishedMultiaddrs 366 - const putRecordSpy = vi.spyOn(repoManager, "putRecord"); 367 - putRecordSpy.mockClear(); 368 - 369 - // Change multiaddrs 370 - mockNet._setMultiaddrs(["/ip4/127.0.0.1/tcp/4001", "/ip4/192.168.1.1/tcp/4002"]); 371 - 372 - // Mock syncDid to avoid actual repo fetch 373 - vi.spyOn(manager, "syncDid").mockResolvedValue(undefined); 374 - 375 - await manager.syncAll(); 376 - 377 - // putRecord should have been called for republish (org.p2pds.peer/self) 378 - const peerRecordCalls = putRecordSpy.mock.calls.filter( 379 - (call) => call[0] === "org.p2pds.peer" && call[1] === "self", 380 - ); 381 - expect(peerRecordCalls.length).toBe(1); 382 - 383 - vi.restoreAllMocks(); 384 - } finally { 385 - manager.stop(); 386 - db.close(); 387 - } 388 - }); 389 - 390 - it("skips republish when multiaddrs are unchanged", async () => { 391 - const db = new Database(join(tmpDir, "test-skip.db")); 392 - const config = testConfig(tmpDir, ["did:plc:remote1"]); 393 - 394 - const { RepoManager } = await import("../repo-manager.js"); 395 - const { ReplicationManager } = await import("./replication-manager.js"); 396 - const { DidResolver } = await import("../did-resolver.js"); 397 - 398 - const repoManager = new RepoManager(db, config); 399 - repoManager.init(); 400 - 401 - const mockNet = createMockNetworkService(); 402 - const mockBlocks = createMockBlockStore(); 403 - const didResolver = new DidResolver(); 404 - 405 - const manager = new ReplicationManager( 406 - db, config, repoManager, mockBlocks, mockNet, didResolver, 407 - ); 408 - 409 - try { 410 - await manager.init(); 411 - 412 - const putRecordSpy = vi.spyOn(repoManager, "putRecord"); 413 - putRecordSpy.mockClear(); 414 - 415 - // Don't change multiaddrs — keep same as init() 416 - 417 - // Mock syncDid 418 - vi.spyOn(manager, "syncDid").mockResolvedValue(undefined); 419 - 420 - await manager.syncAll(); 421 - 422 - // No republish should occur 423 - const peerRecordCalls = putRecordSpy.mock.calls.filter( 424 - (call) => call[0] === "org.p2pds.peer" && call[1] === "self", 425 - ); 426 - expect(peerRecordCalls.length).toBe(0); 427 - 428 - vi.restoreAllMocks(); 429 - } finally { 430 - manager.stop(); 431 - db.close(); 432 - } 433 - }); 434 - }); 435 - 436 - // ============================================ 437 327 // getRemoteAddrs 438 328 // ============================================ 439 329 ··· 700 590 } 701 591 }); 702 592 703 - it("publishPeerIdentity broadcasts identity notification via gossipsub", async () => { 704 - const db = new Database(join(tmpDir, "test-publish.db")); 705 - const config = testConfig(tmpDir, []); 706 - 707 - const { RepoManager } = await import("../repo-manager.js"); 708 - const { ReplicationManager } = await import("./replication-manager.js"); 709 - const { DidResolver } = await import("../did-resolver.js"); 710 - 711 - const repoManager = new RepoManager(db, config); 712 - repoManager.init(); 713 - 714 - const mockNet = createMockNetworkService(); 715 - const mockBlocks = createMockBlockStore(); 716 - const didResolver = new DidResolver(); 717 - 718 - const manager = new ReplicationManager( 719 - db, config, repoManager, mockBlocks, mockNet, didResolver, 720 - ); 721 - 722 - try { 723 - await manager.init(); 724 - 725 - // publishPeerIdentity was called during init 726 - expect(mockNet.publishIdentityNotification).toHaveBeenCalledWith( 727 - "did:web:test.example.com", 728 - "12D3KooWMockPeer", 729 - ["/ip4/127.0.0.1/tcp/4001"], 730 - ); 731 - } finally { 732 - manager.stop(); 733 - db.close(); 734 - } 735 - }); 736 593 });
-2
src/replication/policy-integration.test.ts
··· 52 52 REPLICATE_DIDS: replicateDids, 53 53 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 54 54 FIREHOSE_ENABLED: false, 55 - NODE_DID: "did:web:test.example.com", 56 - NODE_MANAGERS: [], 57 55 RATE_LIMIT_ENABLED: false, 58 56 RATE_LIMIT_READ_PER_MIN: 300, 59 57 RATE_LIMIT_SYNC_PER_MIN: 30,
+4 -61
src/replication/replication-manager.ts
··· 1 1 /** 2 2 * Main replication orchestrator. 3 - * Publishes peer identity + manifest records, syncs remote repos to IPFS. 3 + * Publishes manifest records, syncs remote repos to IPFS. 4 4 * Optionally driven by a PolicyEngine for per-DID intervals, priority, and filtering. 5 5 */ 6 6 ··· 17 17 import type { PolicyEngine } from "../policy/engine.js"; 18 18 19 19 import { 20 - PEER_NSID, 21 20 MANIFEST_NSID, 22 21 didToRkey, 23 - type PeerIdentityRecord, 24 22 type ManifestRecord, 25 23 type SyncState, 26 24 type VerificationConfig, ··· 71 69 private offerManager: OfferManager | null = null; 72 70 /** Per-DID last-sync timestamps (epoch ms) for policy-driven interval tracking. */ 73 71 private lastSyncTimestamps: Map<string, number> = new Map(); 74 - /** Multiaddrs at last publishPeerIdentity() call, for change detection. */ 75 - private lastPublishedMultiaddrs: string[] = []; 76 72 /** Dedup set for gossipsub notifications, keyed by `${did}:${rev}`. */ 77 73 private recentNotifications: Set<string> = new Set(); 78 74 private notificationCleanupTimer: ReturnType<typeof setInterval> | null = null; 79 75 80 - /** The node's own DID (did:web), used for identity in replication/challenges. */ 81 - private nodeDid: string; 82 - 83 76 constructor( 84 77 db: Database.Database, 85 78 private config: Config, ··· 90 83 verificationConfig?: Partial<VerificationConfig>, 91 84 private replicatedRepoReader?: ReplicatedRepoReader, 92 85 policyEngine?: PolicyEngine, 93 - nodeOpts?: { nodeDid: string }, 94 86 ) { 95 - this.nodeDid = nodeOpts?.nodeDid ?? config.NODE_DID; 96 87 this.syncStorage = new SyncStorage(db); 97 88 this.challengeStorage = new ChallengeStorage(db); 98 89 this.repoFetcher = new RepoFetcher(didResolver); ··· 112 103 repoManager, 113 104 this.peerDiscovery, 114 105 policyEngine, 115 - this.nodeDid, 106 + config.DID ?? "", 116 107 ); 117 108 } 118 109 } 119 110 120 111 /** 121 - * Get the node's DID (did:web identity). 122 - */ 123 - getNodeDid(): string { 124 - return this.nodeDid; 125 - } 126 - 127 - /** 128 112 * Get the PolicyEngine, if one is configured. 129 113 */ 130 114 getPolicyEngine(): PolicyEngine | null { ··· 139 123 } 140 124 141 125 /** 142 - * Initialize replication: create tables, publish identity, sync manifests. 126 + * Initialize replication: create tables, sync manifests. 143 127 */ 144 128 async init(): Promise<void> { 145 129 this.syncStorage.initSchema(); 146 130 this.challengeStorage.initSchema(); 147 - await this.publishPeerIdentity(); 148 131 await this.syncManifests(); 149 132 await this.discoverPeerEndpoints(); 150 133 await this.runOfferDiscovery(); 151 134 this.setupGossipsubSubscription(); 152 - } 153 - 154 - /** 155 - * Publish (or update) the org.p2pds.peer/self record with our IPFS PeerID. 156 - * No-op if networking is disabled (getPeerId returns null). 157 - */ 158 - async publishPeerIdentity(): Promise<void> { 159 - const peerId = this.networkService.getPeerId(); 160 - if (!peerId) return; // networking disabled 161 - 162 - const multiaddrs = this.networkService.getMultiaddrs(); 163 - const record: PeerIdentityRecord = { 164 - $type: PEER_NSID, 165 - peerId, 166 - multiaddrs, 167 - createdAt: new Date().toISOString(), 168 - }; 169 - 170 - await this.repoManager.putRecord(PEER_NSID, "self", record); 171 - this.lastPublishedMultiaddrs = multiaddrs; 172 - 173 - // Broadcast identity change via gossipsub (fire-and-forget) 174 - this.networkService.publishIdentityNotification(this.nodeDid, peerId, multiaddrs).catch(() => {}); 175 - } 176 - 177 - /** 178 - * Republish peer identity if our multiaddrs have changed since the last publish. 179 - * Called at the start of syncAll() to piggyback on the existing sync loop. 180 - */ 181 - private async republishIfMultiaddrsChanged(): Promise<void> { 182 - const current = this.networkService.getMultiaddrs(); 183 - const changed = 184 - current.length !== this.lastPublishedMultiaddrs.length || 185 - current.some((addr, i) => addr !== this.lastPublishedMultiaddrs[i]); 186 - if (changed) { 187 - await this.publishPeerIdentity(); 188 - } 189 135 } 190 136 191 137 /** ··· 471 417 * - Respects per-DID sync intervals (skips DIDs not yet due) 472 418 */ 473 419 async syncAll(): Promise<void> { 474 - // Republish our identity if multiaddrs changed (e.g. after network reconnect) 475 - await this.republishIfMultiaddrsChanged(); 476 - 477 420 const dids = this.getReplicateDids(); 478 421 479 422 // Sort by priority (highest first) when policy engine is present ··· 1677 1620 if (this.challengeScheduler) return; 1678 1621 1679 1622 this.challengeScheduler = new ChallengeScheduler( 1680 - this.nodeDid, 1623 + this.config.DID ?? "", 1681 1624 this.policyEngine, 1682 1625 this.syncStorage, 1683 1626 this.challengeStorage,
+4 -6
src/replication/replication.test.ts
··· 55 55 REPLICATE_DIDS: replicateDids, 56 56 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 57 57 FIREHOSE_ENABLED: false, 58 - NODE_DID: "did:web:test.example.com", 59 - NODE_MANAGERS: [], 60 58 RATE_LIMIT_ENABLED: false, 61 59 RATE_LIMIT_READ_PER_MIN: 300, 62 60 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 307 305 try { rmSync(tmpDir, { recursive: true, force: true }); } catch {} 308 306 }); 309 307 310 - it("publishPeerIdentity is no-op when networking is off", async () => { 308 + it("peer identity record not created when networking is off", async () => { 311 309 // getPeerId() returns null when networking=false 312 310 expect(ipfsService.getPeerId()).toBeNull(); 313 311 314 - // Manually call what ReplicationManager.publishPeerIdentity does 312 + // Simulate peer identity publish: no record created because getPeerId() is null 315 313 const peerId = ipfsService.getPeerId(); 316 314 if (peerId) { 317 315 await repoManager.putRecord(PEER_NSID, "self", { ··· 1571 1569 undefined, 1572 1570 undefined, 1573 1571 reader, 1574 - { nodeDid: replicaConfig.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: replicaRepo, repoManager: replicaRepo }, 1572 + replicaRepo, 1575 1573 ); 1576 1574 }); 1577 1575 ··· 1993 1991 undefined, // blobStore 1994 1992 mockReplicationManager, 1995 1993 undefined, // replicatedRepoReader 1996 - { nodeDid: replicaConfig.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: replicaRepo, repoManager: replicaRepo }, 1994 + replicaRepo, 1997 1995 ); 1998 1996 }); 1999 1997
+6 -36
src/server.ts
··· 22 22 import { FailoverChallengeTransport } from "./replication/challenge-response/failover-transport.js"; 23 23 import type { ChallengeTransport } from "./replication/challenge-response/transport.js"; 24 24 import type { Libp2p } from "@libp2p/interface"; 25 - import { loadOrCreateNodeIdentity, getPublicKeyMultibase } from "./node-identity.js"; 26 25 import { RateLimiter } from "./rate-limiter.js"; 27 26 28 27 // Load configuration ··· 60 59 }); 61 60 } 62 61 63 - // Load node identity (keypair + did:web) 64 - const { keypair: nodeKeypair } = await loadOrCreateNodeIdentity(dataDir); 65 - const nodeDid = config.NODE_DID; 66 - const nodePublicKeyMultibase = getPublicKeyMultibase(nodeKeypair); 67 - 68 - // Initialize node repo (separate DB for node's own records: peer identity, manifests, offers) 69 - const nodeDbPath = resolve(dataDir, "node-repo.db"); 70 - const nodeDb = new Database(nodeDbPath); 71 - nodeDb.pragma("journal_mode = WAL"); 72 - nodeDb.pragma("synchronous = NORMAL"); 73 - 74 - const nodeKeyHex = Buffer.from(await nodeKeypair.export()).toString("hex"); 75 - const nodeRepoConfig = { 76 - ...config, 77 - DID: nodeDid, 78 - SIGNING_KEY: nodeKeyHex, 79 - SIGNING_KEY_PUBLIC: nodePublicKeyMultibase, 80 - }; 81 - const nodeRepoManager = new RepoManager(nodeDb, nodeRepoConfig); 82 - nodeRepoManager.init(); 83 - 84 - // Initialize social account repo (optional — only if DID is configured) 62 + // Initialize repo manager (requires DID + signing key) 85 63 let repoManager: RepoManager | undefined; 86 64 let blobStore: BlobStore | undefined; 87 65 if (config.DID && config.SIGNING_KEY) { ··· 90 68 repoManager.init(blobStore, ipfsService, ipfsService); 91 69 } 92 70 93 - // Initialize firehose (uses social repo if available, else node repo) 94 - const firehose = new Firehose(repoManager ?? nodeRepoManager); 71 + // Initialize firehose 72 + const firehose = new Firehose(repoManager!); 95 73 96 74 // Initialize DID resolver 97 75 const didResolver = new DidResolver({ ··· 116 94 // Initialize replication manager and replicated repo reader (if IPFS enabled and DIDs configured) 117 95 let replicationManager: ReplicationManager | undefined; 118 96 let replicatedRepoReader: ReplicatedRepoReader | undefined; 119 - if (ipfsService && hasReplicateDids) { 97 + if (ipfsService && hasReplicateDids && repoManager) { 120 98 replicationManager = new ReplicationManager( 121 99 db, 122 100 config, 123 - nodeRepoManager, 101 + repoManager, 124 102 ipfsService, 125 103 ipfsService, 126 104 didResolver, 127 105 undefined, 128 106 undefined, 129 107 policyEngine, 130 - { nodeDid }, 131 108 ); 132 109 replicatedRepoReader = new ReplicatedRepoReader( 133 110 ipfsService, ··· 150 127 blobStore, 151 128 replicationManager, 152 129 replicatedRepoReader, 153 - { 154 - nodeDid, 155 - nodePublicKeyMultibase, 156 - nodeRepoManager, 157 - repoManager, 158 - }, 130 + repoManager, 159 131 rateLimiter, 160 132 ); 161 133 ··· 230 202 pc.bold(`\nP2PDS running at `) + 231 203 pc.cyan(`http://localhost:${config.PORT}`), 232 204 ); 233 - console.log(pc.dim(` Node: ${nodeDid}`)); 234 205 if (config.DID) { 235 206 console.log(pc.dim(` DID: ${config.DID}`)); 236 207 } ··· 312 283 if (ipfsService) { 313 284 await ipfsService.stop(); 314 285 } 315 - nodeDb.close(); 316 286 db.close(); 317 287 }; 318 288 cleanup().finally(() => process.exit(0));
+14 -2
src/validation.ts
··· 1 1 import { parse, ValidationError, type BaseSchema } from "@atcute/lexicons/validations"; 2 + import { validateP2pdsRecord, loadP2pdsLexicons, getLexicon } from "./lexicons.js"; 2 3 3 4 import { 4 5 AppBskyActorProfile, ··· 39 40 "app.bsky.labeler.service": AppBskyLabelerService.mainSchema, 40 41 }; 41 42 43 + // Load p2pds lexicons at module init so they're available for validation. 44 + loadP2pdsLexicons(); 45 + 42 46 /** 43 47 * Record validator for AT Protocol records. 44 48 * Uses optimistic validation: known schemas are validated, unknown are allowed. 49 + * Delegates org.p2pds.* collections to the custom lexicon validator. 45 50 */ 46 51 export class RecordValidator { 47 52 private strictMode: boolean; ··· 51 56 } 52 57 53 58 validateRecord(collection: string, record: unknown): void { 59 + // Delegate p2pds collections to the custom lexicon validator 60 + if (collection.startsWith("org.p2pds.")) { 61 + validateP2pdsRecord(collection, record); 62 + return; 63 + } 64 + 54 65 const schema = recordSchemas[collection]; 55 66 56 67 if (!schema) { ··· 75 86 } 76 87 77 88 hasSchema(collection: string): boolean { 78 - return collection in recordSchemas; 89 + return collection in recordSchemas || getLexicon(collection) !== undefined; 79 90 } 80 91 81 92 getLoadedSchemas(): string[] { 82 - return Object.keys(recordSchemas); 93 + const p2pdsSchemas = Array.from(loadP2pdsLexicons().keys()); 94 + return [...Object.keys(recordSchemas), ...p2pdsSchemas]; 83 95 } 84 96 } 85 97
+2 -4
src/xrpc/admin-e2e.test.ts
··· 45 45 REPLICATE_DIDS: replicateDids, 46 46 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 47 47 FIREHOSE_ENABLED: false, 48 - NODE_DID: "did:web:test.example.com", 49 - NODE_MANAGERS: [], 50 48 RATE_LIMIT_ENABLED: false, 51 49 RATE_LIMIT_READ_PER_MIN: 300, 52 50 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 123 121 } 124 122 125 123 const firehoseA = new Firehose(repoManagerA); 126 - const appA = createApp(configA, firehoseA, undefined, undefined, undefined, undefined, undefined, { nodeDid: configA.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManagerA, repoManager: repoManagerA }); 124 + const appA = createApp(configA, firehoseA, undefined, undefined, undefined, undefined, undefined, repoManagerA); 127 125 ({ server: serverA, port: portA } = await startServer(appA)); 128 126 129 127 // ---- Node B: replicator with admin dashboard ---- ··· 180 178 undefined, 181 179 replicationManager, 182 180 undefined, 183 - { nodeDid: configB.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManagerB, repoManager: repoManagerB }, 181 + repoManagerB, 184 182 ); 185 183 ({ server: serverB, port: portB } = await startServer(appB)); 186 184
+11 -13
src/xrpc/admin.test.ts
··· 31 31 REPLICATE_DIDS: replicateDids, 32 32 FIREHOSE_URL: "wss://localhost/xrpc/com.atproto.sync.subscribeRepos", 33 33 FIREHOSE_ENABLED: false, 34 - NODE_DID: "did:web:test.example.com", 35 - NODE_MANAGERS: [], 36 34 RATE_LIMIT_ENABLED: false, 37 35 RATE_LIMIT_READ_PER_MIN: 300, 38 36 RATE_LIMIT_SYNC_PER_MIN: 30, ··· 88 86 const repoManager = new RepoManager(db, config); 89 87 repoManager.init(); 90 88 const firehose = new Firehose(repoManager); 91 - app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 89 + app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, repoManager); 92 90 }); 93 91 94 92 afterEach(() => { ··· 145 143 const repoManager = new RepoManager(db, config); 146 144 repoManager.init(); 147 145 const firehose = new Firehose(repoManager); 148 - const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 146 + const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, repoManager); 149 147 150 148 const res = await authGet(app, "/xrpc/org.p2pds.admin.getOverview"); 151 149 expect(res.status).toBe(200); ··· 209 207 undefined, 210 208 replicationManager, 211 209 undefined, 212 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 210 + repoManager, 213 211 ); 214 212 215 213 const res = await authGet(app, "/xrpc/org.p2pds.admin.getOverview"); ··· 294 292 undefined, 295 293 replicationManager, 296 294 undefined, 297 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 295 + repoManager, 298 296 ); 299 297 }); 300 298 ··· 377 375 const repoManager = new RepoManager(db, config); 378 376 repoManager.init(); 379 377 const firehose = new Firehose(repoManager); 380 - const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 378 + const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, repoManager); 381 379 382 380 const res = await authGet(app, "/xrpc/org.p2pds.admin.getNetworkStatus"); 383 381 expect(res.status).toBe(200); ··· 418 416 undefined, 419 417 undefined, 420 418 undefined, 421 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 419 + repoManager, 422 420 ); 423 421 424 422 const res = await authGet(app, "/xrpc/org.p2pds.admin.getNetworkStatus"); ··· 454 452 const repoManager = new RepoManager(db, config); 455 453 repoManager.init(); 456 454 const firehose = new Firehose(repoManager); 457 - const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 455 + const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, repoManager); 458 456 459 457 const res = await authGet(app, "/xrpc/org.p2pds.admin.getPolicies"); 460 458 expect(res.status).toBe(200); ··· 505 503 undefined, 506 504 replicationManager, 507 505 undefined, 508 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 506 + repoManager, 509 507 ); 510 508 511 509 const res = await authGet(app, "/xrpc/org.p2pds.admin.getPolicies"); ··· 575 573 undefined, 576 574 replicationManager, 577 575 undefined, 578 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 576 + repoManager, 579 577 ); 580 578 581 579 const res = await authGet(app, "/xrpc/org.p2pds.admin.getPolicies"); ··· 614 612 const repoManager = new RepoManager(db, config); 615 613 repoManager.init(); 616 614 const firehose = new Firehose(repoManager); 617 - const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }); 615 + const app = createApp(config, firehose, undefined, undefined, undefined, undefined, undefined, repoManager); 618 616 619 617 const res = await noAuthGet(app, "/xrpc/org.p2pds.admin.dashboard"); 620 618 expect(res.status).toBe(200); ··· 696 694 undefined, 697 695 replicationManager, 698 696 undefined, 699 - { nodeDid: config.NODE_DID, nodePublicKeyMultibase: "", nodeRepoManager: repoManager, repoManager }, 697 + repoManager, 700 698 ); 701 699 }); 702 700
+11 -11
src/xrpc/repo.ts
··· 59 59 ); 60 60 } 61 61 62 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 62 + if (repo !== c.env.DID) { 63 63 return c.json( 64 64 { error: "RepoNotFound", message: `Repository not found: ${repo}` }, 65 65 404, ··· 69 69 const data = await repoManager.describeRepo(); 70 70 71 71 return c.json({ 72 - did: c.env.DID ?? c.env.NODE_DID, 72 + did: c.env.DID, 73 73 handle: c.env.HANDLE ?? c.env.PDS_HOSTNAME, 74 74 didDoc: { 75 75 "@context": ["https://www.w3.org/ns/did/v1"], 76 - id: c.env.DID ?? c.env.NODE_DID, 76 + id: c.env.DID, 77 77 alsoKnownAs: [`at://${c.env.HANDLE ?? c.env.PDS_HOSTNAME}`], 78 78 verificationMethod: [ 79 79 { 80 - id: `${c.env.DID ?? c.env.NODE_DID}#atproto`, 80 + id: `${c.env.DID}#atproto`, 81 81 type: "Multikey", 82 - controller: c.env.DID ?? c.env.NODE_DID, 82 + controller: c.env.DID, 83 83 publicKeyMultibase: c.env.SIGNING_KEY_PUBLIC ?? "", 84 84 }, 85 85 ], ··· 114 114 ); 115 115 } 116 116 117 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 117 + if (repo !== c.env.DID) { 118 118 return c.json( 119 119 { error: "RepoNotFound", message: `Repository not found: ${repo}` }, 120 120 404, ··· 167 167 ); 168 168 } 169 169 170 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 170 + if (repo !== c.env.DID) { 171 171 return c.json( 172 172 { error: "RepoNotFound", message: `Repository not found: ${repo}` }, 173 173 404, ··· 203 203 ); 204 204 } 205 205 206 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 206 + if (repo !== c.env.DID) { 207 207 return c.json( 208 208 { error: "InvalidRepo", message: `Invalid repository: ${repo}` }, 209 209 400, ··· 243 243 ); 244 244 } 245 245 246 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 246 + if (repo !== c.env.DID) { 247 247 return c.json( 248 248 { error: "InvalidRepo", message: `Invalid repository: ${repo}` }, 249 249 400, ··· 288 288 ); 289 289 } 290 290 291 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 291 + if (repo !== c.env.DID) { 292 292 return c.json( 293 293 { error: "InvalidRepo", message: `Invalid repository: ${repo}` }, 294 294 400, ··· 334 334 ); 335 335 } 336 336 337 - if (repo !== (c.env.DID ?? c.env.NODE_DID)) { 337 + if (repo !== c.env.DID) { 338 338 return c.json( 339 339 { error: "InvalidRepo", message: `Invalid repository: ${repo}` }, 340 340 400,
+15 -13
src/xrpc/server.ts
··· 13 13 14 14 export async function describeServer(c: Context<AppEnv>): Promise<Response> { 15 15 return c.json({ 16 - did: c.env.DID ?? c.env.NODE_DID, 16 + did: c.env.DID, 17 17 availableUserDomains: [], 18 18 inviteCodeRequired: false, 19 19 }); ··· 37 37 ); 38 38 } 39 39 40 - if (identifier !== c.env.HANDLE && identifier !== (c.env.DID ?? c.env.NODE_DID)) { 40 + if (identifier !== c.env.HANDLE && identifier !== c.env.DID) { 41 41 return c.json( 42 42 { 43 43 error: "AuthenticationRequired", ··· 59 59 } 60 60 61 61 const serviceDid = `did:web:${c.env.PDS_HOSTNAME}`; 62 + const did = c.env.DID ?? ""; 62 63 const accessJwt = await createAccessToken( 63 64 c.env.JWT_SECRET, 64 - c.env.DID ?? c.env.NODE_DID, 65 + did, 65 66 serviceDid, 66 67 ); 67 68 const refreshJwt = await createRefreshToken( 68 69 c.env.JWT_SECRET, 69 - c.env.DID ?? c.env.NODE_DID, 70 + did, 70 71 serviceDid, 71 72 ); 72 73 ··· 77 78 accessJwt, 78 79 refreshJwt, 79 80 handle: c.env.HANDLE ?? c.env.PDS_HOSTNAME, 80 - did: c.env.DID ?? c.env.NODE_DID, 81 + did: c.env.DID, 81 82 ...(email ? { email } : {}), 82 83 emailConfirmed: true, 83 84 active: true, ··· 107 108 serviceDid, 108 109 ); 109 110 110 - if (payload.sub !== (c.env.DID ?? c.env.NODE_DID)) { 111 + if (payload.sub !== c.env.DID) { 111 112 return c.json( 112 113 { 113 114 error: "AuthenticationRequired", ··· 117 118 ); 118 119 } 119 120 121 + const did = c.env.DID ?? ""; 120 122 const accessJwt = await createAccessToken( 121 123 c.env.JWT_SECRET, 122 - c.env.DID ?? c.env.NODE_DID, 124 + did, 123 125 serviceDid, 124 126 ); 125 127 const refreshJwt = await createRefreshToken( 126 128 c.env.JWT_SECRET, 127 - c.env.DID ?? c.env.NODE_DID, 129 + did, 128 130 serviceDid, 129 131 ); 130 132 ··· 135 137 accessJwt, 136 138 refreshJwt, 137 139 handle: c.env.HANDLE ?? c.env.PDS_HOSTNAME, 138 - did: c.env.DID ?? c.env.NODE_DID, 140 + did: c.env.DID, 139 141 ...(email ? { email } : {}), 140 142 emailConfirmed: true, 141 143 active: true, ··· 178 180 const email = storedEmail || c.env.EMAIL; 179 181 return c.json({ 180 182 handle: c.env.HANDLE ?? c.env.PDS_HOSTNAME, 181 - did: c.env.DID ?? c.env.NODE_DID, 183 + did: c.env.DID, 182 184 ...(email ? { email } : {}), 183 185 emailConfirmed: true, 184 186 active: true, ··· 192 194 serviceDid, 193 195 ); 194 196 195 - if (payload.sub !== (c.env.DID ?? c.env.NODE_DID)) { 197 + if (payload.sub !== c.env.DID) { 196 198 return c.json( 197 199 { 198 200 error: "AuthenticationRequired", ··· 206 208 const email = storedEmail || c.env.EMAIL; 207 209 return c.json({ 208 210 handle: c.env.HANDLE ?? c.env.PDS_HOSTNAME, 209 - did: c.env.DID ?? c.env.NODE_DID, 211 + did: c.env.DID, 210 212 ...(email ? { email } : {}), 211 213 emailConfirmed: true, 212 214 active: true, ··· 300 302 301 303 const keypair = await getSigningKeypair(c.env.SIGNING_KEY); 302 304 const token = await createServiceJwt({ 303 - iss: c.env.DID ?? c.env.NODE_DID, 305 + iss: c.env.DID ?? "", 304 306 aud, 305 307 lxm, 306 308 keypair,
+6 -6
src/xrpc/sync.ts
··· 32 32 } 33 33 34 34 // Local DID: serve from RepoManager 35 - if (did === (c.env.DID ?? c.env.NODE_DID)) { 35 + if (did === c.env.DID) { 36 36 const carBytes = await repoManager.getRepoCar(); 37 37 return new Response(carBytes, { 38 38 status: 200, ··· 94 94 ); 95 95 } 96 96 97 - if (did === (c.env.DID ?? c.env.NODE_DID)) { 97 + if (did === c.env.DID) { 98 98 const data = await repoManager.getRepoStatus(); 99 99 return c.json({ 100 100 did: data.did, ··· 179 179 ); 180 180 } 181 181 182 - if (did !== (c.env.DID ?? c.env.NODE_DID)) { 182 + if (did !== c.env.DID) { 183 183 return c.json( 184 184 { error: "RepoNotFound", message: `Repository not found for DID: ${did}` }, 185 185 404, ··· 234 234 } 235 235 236 236 // Local DID: serve from RepoManager 237 - if (did === (c.env.DID ?? c.env.NODE_DID)) { 237 + if (did === c.env.DID) { 238 238 const carBytes = await repoManager.getBlocks(cidsParam); 239 239 return new Response(carBytes, { 240 240 status: 200, ··· 302 302 } 303 303 304 304 // Local DID: serve from filesystem BlobStore 305 - if (did === (c.env.DID ?? c.env.NODE_DID)) { 305 + if (did === c.env.DID) { 306 306 if (!repoManager.blobStore) { 307 307 return c.json( 308 308 { error: "ServiceUnavailable", message: "Blob storage is not configured" }, ··· 406 406 ); 407 407 } 408 408 409 - if (did !== (c.env.DID ?? c.env.NODE_DID)) { 409 + if (did !== c.env.DID) { 410 410 return c.json( 411 411 { error: "RepoNotFound", message: `Repository not found for DID: ${did}` }, 412 412 404,
+1 -1
tsconfig.json
··· 16 16 "sourceMap": true 17 17 }, 18 18 "include": ["src"], 19 - "exclude": ["node_modules", "dist"] 19 + "exclude": ["node_modules", "dist", "apps"] 20 20 }