A container registry that uses the AT Protocol for manifest storage and S3 for blob storage.
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

post to bluesky when manifests uploaded. linting fixes

+2918 -707
+9
.env.hold.example
··· 84 84 # HOLD_KEY_PATH=/var/lib/atcr-hold/signing.key 85 85 86 86 # ============================================================================== 87 + # Bluesky Integration 88 + # ============================================================================== 89 + 90 + # Enable Bluesky posts when users push container images (default: false) 91 + # When enabled, the hold's embedded PDS will create posts announcing image pushes 92 + # Can be overridden per-hold via the captain record's enableManifestPosts field 93 + # HOLD_BLUESKY_POSTS_ENABLED=false 94 + 95 + # ============================================================================== 87 96 # Registration (REQUIRED) 88 97 # ============================================================================== 89 98
+3 -1
cmd/credential-helper/main.go
··· 127 127 fmt.Fprintf(os.Stderr, "Stored credentials for %s are invalid or expired\n", appViewURL) 128 128 // Delete the invalid credentials 129 129 delete(allCreds.Credentials, appViewURL) 130 - saveDeviceCredentials(configPath, allCreds) 130 + if err := saveDeviceCredentials(configPath, allCreds); err != nil { 131 + fmt.Fprintf(os.Stderr, "Warning: failed to save updated credentials: %v\n", err) 132 + } 131 133 // Mark as not found so we re-authorize below 132 134 found = false 133 135 }
-8
cmd/oauth-helper/main.go
··· 2 2 3 3 import ( 4 4 "context" 5 - "crypto/sha256" 6 - "encoding/base64" 7 5 "flag" 8 6 "fmt" 9 7 "log" ··· 135 133 // Use the session's NewHostDPoP method to generate the proof 136 134 return session.NewHostDPoP(method, reqURL) 137 135 } 138 - 139 - // sha256Hash computes SHA-256 hash and returns base64url-encoded string 140 - func sha256Hash(data []byte) string { 141 - hash := sha256.Sum256(data) 142 - return base64.RawURLEncoding.EncodeToString(hash[:]) 143 - }
+13
deploy/.env.prod.template
··· 87 87 # Default: false 88 88 HOLD_ALLOW_ALL_CREW=false 89 89 90 + # Enable Bluesky posts when manifests are pushed 91 + # When enabled, the hold service creates Bluesky posts announcing new container 92 + # image pushes. Posts include image name, tag, size, and layer count. 93 + # 94 + # - true: Create Bluesky posts for manifest uploads 95 + # - false: Silent operation (no Bluesky posts) 96 + # 97 + # Note: This requires the hold owner to have OAuth credentials for posting. 98 + # See docs/BLUESKY_MANIFEST_POSTS.md for setup instructions. 99 + # 100 + # Default: false 101 + HOLD_BLUESKY_POSTS_ENABLED=false 102 + 90 103 # ============================================================================== 91 104 # S3/UpCloud Object Storage Configuration 92 105 # ==============================================================================
+1
deploy/docker-compose.prod.yml
··· 97 97 HOLD_ALLOW_ALL_CREW: ${HOLD_ALLOW_ALL_CREW:-false} 98 98 HOLD_PUBLIC: ${HOLD_PUBLIC:-false} 99 99 HOLD_OWNER: ${HOLD_OWNER:-} 100 + HOLD_BLUESKY_POSTS_ENABLED: ${HOLD_BLUESKY_POSTS_ENABLED:-false} 100 101 101 102 # Embedded PDS configuration 102 103 HOLD_DATABASE_DIR: ${HOLD_DATABASE_DIR:-/var/lib/atcr-hold}
+182 -60
docs/BLUESKY_MANIFEST_POSTS.md
··· 271 271 } 272 272 ``` 273 273 274 - ### 4. Bluesky Post Creation 274 + ### 4. Bluesky Post Creation with Facets 275 275 276 276 **File**: `pkg/hold/pds/manifest_post.go` (new file) 277 277 278 - **Pattern**: Reuse existing `status.go` pattern 278 + **Pattern**: Extends `status.go` pattern with rich text facets 279 279 280 280 ```go 281 281 // CreateManifestPost creates a Bluesky post announcing a manifest upload 282 - func (p *HoldPDS) CreateManifestPost(ctx context.Context, repository, tag, userHandle string) (string, error) { 282 + // Includes facets for clickable mentions and links 283 + func (p *HoldPDS) CreateManifestPost( 284 + ctx context.Context, 285 + repository, tag, userHandle, digest string, 286 + totalSize int64, 287 + ) (string, error) { 283 288 now := time.Now() 284 289 285 - // Format post text (similar to "what's new" feed) 286 - text := formatManifestPostText(repository, tag, userHandle) 290 + // Build AppView repository URL 291 + appViewURL := fmt.Sprintf("https://atcr.io/r/%s/%s", userHandle, repository) 292 + 293 + // Format post text components 294 + digestShort := formatDigest(digest) 295 + sizeStr := formatSize(totalSize) 296 + repoWithTag := fmt.Sprintf("%s:%s", repository, tag) 297 + 298 + // Build text: "@alice.bsky.social just pushed hsm-secrets-operator:latest\nDigest: sha256:abc...def Size: 12.2 MB" 299 + text := fmt.Sprintf("@%s just pushed %s\nDigest: %s Size: %s", userHandle, repoWithTag, digestShort, sizeStr) 300 + 301 + // Create facets for mentions and links 302 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 287 303 288 - // Create post struct 304 + // Create post struct with facets 289 305 post := &bsky.FeedPost{ 290 306 LexiconTypeID: "app.bsky.feed.post", 291 307 Text: text, 308 + Facets: facets, 292 309 CreatedAt: now.Format(time.RFC3339), 293 - // Optional: Add embed with link to AppView 294 - // Embed: &bsky.FeedPost_Embed{...} 295 310 } 296 311 297 312 // Create record with auto-generated TID ··· 314 329 return postURI, nil 315 330 } 316 331 317 - // formatManifestPostText generates the post text 318 - func formatManifestPostText(repository, tag, userHandle string) string { 319 - // Example formats: 320 - // "@alice.bsky.social pushed alice/myapp:latest to ATCR" 321 - // "New image pushed: alice/myapp:v1.0.0 by @alice.bsky.social" 322 - // "📦 alice/myapp:latest pushed by @alice.bsky.social" 332 + // formatDigest truncates digest to first 7 and last 7 chars 333 + // Example: sha256:abc1234567890...fedcba9876543210 -> sha256:abc1234...9876543 334 + func formatDigest(digest string) string { 335 + if !strings.HasPrefix(digest, "sha256:") { 336 + return digest // Return as-is if not sha256 337 + } 323 338 324 - return fmt.Sprintf("📦 %s:%s pushed by @%s", repository, tag, userHandle) 339 + hash := strings.TrimPrefix(digest, "sha256:") 340 + if len(hash) <= 14 { 341 + return digest // Too short to truncate 342 + } 343 + 344 + return fmt.Sprintf("sha256:%s...%s", hash[:7], hash[len(hash)-7:]) 345 + } 346 + 347 + // formatSize converts bytes to human-readable format 348 + // Examples: 1024 -> "1.0 KB", 1048576 -> "1.0 MB", 1073741824 -> "1.0 GB" 349 + func formatSize(bytes int64) string { 350 + const ( 351 + KB = 1024 352 + MB = 1024 * KB 353 + GB = 1024 * MB 354 + ) 355 + 356 + switch { 357 + case bytes >= GB: 358 + return fmt.Sprintf("%.1f GB", float64(bytes)/float64(GB)) 359 + case bytes >= MB: 360 + return fmt.Sprintf("%.1f MB", float64(bytes)/float64(MB)) 361 + case bytes >= KB: 362 + return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB)) 363 + default: 364 + return fmt.Sprintf("%d B", bytes) 365 + } 325 366 } 326 - ``` 327 367 328 - **Advanced Post Options**: 368 + // buildFacets creates mention and link facets for rich text 369 + // IMPORTANT: Byte offsets must be calculated for UTF-8 encoded text 370 + func buildFacets(text, userHandle, repoWithTag, appViewURL string) []*bsky.RichtextFacet { 371 + facets := []*bsky.RichtextFacet{} 329 372 330 - ```go 331 - // Example with embedded link to AppView 332 - post := &bsky.FeedPost{ 333 - LexiconTypeID: "app.bsky.feed.post", 334 - Text: text, 335 - CreatedAt: now.Format(time.RFC3339), 336 - Embed: &bsky.FeedPost_Embed{ 337 - FeedPost_External: &bsky.EmbedExternal{ 338 - External: &bsky.EmbedExternal_External{ 339 - Uri: fmt.Sprintf("https://atcr.io/%s", repository), 340 - Title: fmt.Sprintf("%s:%s", repository, tag), 341 - Description: "View on ATCR", 373 + // Find mention: "@alice.bsky.social" 374 + mentionText := "@" + userHandle 375 + mentionStart := strings.Index(text, mentionText) 376 + if mentionStart >= 0 { 377 + // Calculate byte offsets (not character offsets!) 378 + byteStart := int64(len(text[:mentionStart])) 379 + byteEnd := int64(len(text[:mentionStart+len(mentionText)])) 380 + 381 + facets = append(facets, &bsky.RichtextFacet{ 382 + Index: &bsky.RichtextFacet_ByteSlice{ 383 + ByteStart: byteStart, 384 + ByteEnd: byteEnd, 342 385 }, 343 - }, 344 - }, 345 - } 386 + Features: []*bsky.RichtextFacet_Features_Elem{ 387 + { 388 + RichtextFacet_Mention: &bsky.RichtextFacet_Mention{ 389 + Did: "", // Will be resolved by Bluesky from handle 390 + }, 391 + }, 392 + }, 393 + }) 394 + } 346 395 347 - // Example with facets (mentions) 348 - // This would require parsing the text and creating facet structs 349 - // for @mentions to be clickable in Bluesky 396 + // Find repository link: "hsm-secrets-operator:latest" 397 + linkStart := strings.Index(text, repoWithTag) 398 + if linkStart >= 0 { 399 + // Calculate byte offsets 400 + byteStart := int64(len(text[:linkStart])) 401 + byteEnd := int64(len(text[:linkStart+len(repoWithTag)])) 402 + 403 + facets = append(facets, &bsky.RichtextFacet{ 404 + Index: &bsky.RichtextFacet_ByteSlice{ 405 + ByteStart: byteStart, 406 + ByteEnd: byteEnd, 407 + }, 408 + Features: []*bsky.RichtextFacet_Features_Elem{ 409 + { 410 + RichtextFacet_Link: &bsky.RichtextFacet_Link{ 411 + Uri: appViewURL, 412 + }, 413 + }, 414 + }, 415 + }) 416 + } 417 + 418 + return facets 419 + } 350 420 ``` 351 421 422 + **Facet Implementation Notes:** 423 + 424 + 1. **Byte Offsets**: ATProto uses byte offsets (UTF-8 encoded), not character offsets 425 + - For ASCII text: `len(text[:index])` gives correct byte offset 426 + - For Unicode: Must use `len()` on substring to get byte count 427 + - Never use `rune` indexes directly 428 + 429 + 2. **Mention Facets**: 430 + - Include `@` symbol in the facet range 431 + - DID field can be empty; Bluesky resolves from handle 432 + - Type: `app.bsky.richtext.facet#mention` 433 + 434 + 3. **Link Facets**: 435 + - Text can be anything (doesn't have to be URL) 436 + - URI field contains actual target URL 437 + - Type: `app.bsky.richtext.facet#link` 438 + 439 + 4. **Ordering**: Facets should not overlap; order doesn't matter 440 + 352 441 ### 5. AppView Integration 353 442 354 443 **File**: `pkg/appview/storage/manifest_store.go` ··· 411 500 } 412 501 413 502 // 5. Build notification request 414 - notifyReq := map[string]interface{}{ 503 + notifyReq := map[string]any{ 415 504 "repository": ms.repository, 416 505 "tag": tag, 417 506 "userDid": regCtx.DID, 418 507 "userHandle": regCtx.Handle, // Need to add this to RegistryContext 419 - "manifest": map[string]interface{}{ 508 + "manifest": map[string]any{ 420 509 "mediaType": parsedManifest.MediaType, 421 - "config": map[string]interface{}{ 510 + "config": map[string]any{ 422 511 "digest": parsedManifest.Config.Digest.String(), 423 512 "size": parsedManifest.Config.Size, 424 513 }, 425 - "layers": func() []map[string]interface{} { 426 - layers := make([]map[string]interface{}, len(parsedManifest.Layers)) 514 + "layers": func() []map[string]any { 515 + layers := make([]map[string]any, len(parsedManifest.Layers)) 427 516 for i, layer := range parsedManifest.Layers { 428 - layers[i] = map[string]interface{}{ 517 + layers[i] = map[string]any{ 429 518 "digest": layer.Digest.String(), 430 519 "size": layer.Size, 431 520 "mediaType": layer.MediaType, ··· 463 552 } 464 553 465 554 // 7. Parse response (optional logging) 466 - var notifyResp map[string]interface{} 555 + var notifyResp map[string]any 467 556 if err := json.NewDecoder(resp.Body).Decode(&notifyResp); err == nil { 468 557 log.Printf("Hold notification successful: %+v", notifyResp) 469 558 } ··· 603 692 604 693 **Hold Service** (`.env.hold.example`): 605 694 ```bash 606 - # Enable/disable Bluesky posting 607 - HOLD_BLUESKY_POSTS_ENABLED=true 695 + # Enable/disable Bluesky manifest posting (default: false) 696 + # When enabled, hold will create Bluesky posts when users push images 697 + # Can be overridden per-hold via captain record's enableManifestPosts field 698 + HOLD_BLUESKY_POSTS_ENABLED=false 699 + ``` 700 + 701 + **AppView** - No configuration needed. AppView always attempts to notify holds after manifest uploads, but handles failures gracefully. 702 + 703 + ### Feature Flags 608 704 609 - # Enable/disable layer record creation 610 - HOLD_LAYER_RECORDS_ENABLED=true 611 - ``` 705 + **Captain Record Override:** 706 + The hold's captain record includes an `enableManifestPosts` field that overrides the environment variable: 612 707 613 - **AppView** (`.env.appview.example`): 614 - ```bash 615 - # Enable/disable manifest notifications to holds 616 - ATCR_NOTIFY_HOLDS_ENABLED=true 708 + ```go 709 + type CaptainRecord struct { 710 + // ... other fields ... 711 + EnableManifestPosts bool `json:"enableManifestPosts" cborgen:"enableManifestPosts"` 712 + } 617 713 ``` 618 714 619 - ### Feature Flags 715 + **Precedence (highest to lowest):** 716 + 1. Captain record `enableManifestPosts` field (if set) 717 + 2. `HOLD_BLUESKY_POSTS_ENABLED` environment variable 718 + 3. Default: `false` (opt-in feature) 620 719 621 - Consider making this feature opt-in initially: 622 - - Add flag to captain record: `enableSocialPosts bool` 623 - - Check flag before creating posts 624 - - Allow hold owners to disable social features 720 + **Rationale:** 721 + - Default off for backward compatibility and privacy 722 + - Hold owners can enable via env var at deployment 723 + - Per-hold override via captain record for multi-tenant scenarios 724 + - Follows same pattern as existing status post feature 625 725 626 726 ## Performance Considerations 627 727 ··· 816 916 817 917 ## Example Post Formats 818 918 819 - ### Simple Format 919 + ### Preferred Format (Facet-Based) 920 + 921 + **Text representation:** 922 + ``` 923 + @alice.bsky.social just pushed hsm-secrets-operator:latest 924 + Digest: sha256:abc1234...def5678 Size: 12.2 MB 925 + ``` 926 + 927 + **Actual implementation:** 928 + - `@alice.bsky.social` - Clickable mention (facet type: `app.bsky.richtext.facet#mention`) 929 + - `hsm-secrets-operator:latest` - Clickable link to `https://atcr.io/r/alice.bsky.social/hsm-secrets-operator` (facet type: `app.bsky.richtext.facet#link`) 930 + - `sha256:abc1234...def5678` - Truncated digest (first 7 + last 7 chars) 931 + - `12.2 MB` - Human-readable size (auto-formatted from bytes) 932 + 933 + **Why facets?** 934 + - Mentions are clickable and link to user profiles in Bluesky 935 + - Repository names link directly to AppView repository pages 936 + - Better user experience than plain text URLs 937 + - Standard ATProto rich text format 938 + 939 + ### Alternative Formats 940 + 941 + #### Simple Format 820 942 ``` 821 943 📦 alice/myapp:latest pushed by @alice.bsky.social 822 944 ``` 823 945 824 - ### Detailed Format 946 + #### Detailed Format 825 947 ``` 826 948 📦 New container image pushed! 827 949 ··· 832 954 View: https://atcr.io/alice/myapp 833 955 ``` 834 956 835 - ### With Emoji/Styling 957 + #### With Emoji/Styling 836 958 ``` 837 959 🚀 alice/myapp:latest 838 960 ··· 842 964 🔗 atcr.io/alice/myapp 843 965 ``` 844 966 845 - ### With Tags 967 + #### With Tags 846 968 ``` 847 969 📦 alice/myapp:latest pushed by @alice.bsky.social 848 970
+691
docs/RELAY.md
··· 1 + # Running an ATProto Relay for ATCR Hold Discovery 2 + 3 + This document explains what it takes to run an ATProto relay for indexing ATCR hold records, including infrastructure requirements, configuration, and trade-offs. 4 + 5 + ## Overview 6 + 7 + ### What is an ATProto Relay? 8 + 9 + An ATProto relay is a service that: 10 + - **Subscribes to multiple PDS hosts** and aggregates their data streams 11 + - **Outputs a combined "firehose"** event stream for real-time network updates 12 + - **Validates data integrity** and identity signatures 13 + - **Provides discovery endpoints** like `com.atproto.sync.listReposByCollection` 14 + 15 + The relay acts as a network-wide indexer, making it possible to discover which DIDs have records of specific types (collections). 16 + 17 + ### Why ATCR Needs a Relay 18 + 19 + ATCR uses hold captain records (`io.atcr.hold.captain`) stored in hold PDSs to enable hold discovery. The `listReposByCollection` endpoint allows AppViews to efficiently discover all holds in the network without crawling every PDS individually. 20 + 21 + **The problem**: Standard Bluesky relays appear to only index collections from `did:plc` DIDs, not `did:web` DIDs. Since ATCR holds use `did:web` (e.g., `did:web:hold01.atcr.io`), they aren't discoverable via Bluesky's public relays. 22 + 23 + ## Recommended Approach: Phased Implementation 24 + 25 + ATCR's discovery needs evolve as the network grows. Start simple, scale as needed. 26 + 27 + ## MVP: Minimal Discovery Service 28 + 29 + For initial deployment with a small number of holds (dozens, not thousands), build a **lightweight custom discovery service** focused solely on `io.atcr.*` collections. 30 + 31 + ### Why Minimal Service for MVP? 32 + 33 + - **Scope**: Only index `io.atcr.*` collections (manifests, tags, captain/crew, sailor profiles) 34 + - **Opt-in**: Only crawls PDSs that explicitly call `requestCrawl` 35 + - **Small scale**: Dozens of holds, not millions of users 36 + - **Simple storage**: SQLite sufficient for current scale 37 + - **Cost-effective**: $5-10/month VPS 38 + 39 + ### Architecture 40 + 41 + **Inbound endpoints:** 42 + ``` 43 + POST /xrpc/com.atproto.sync.requestCrawl 44 + → Hold registers itself for crawling 45 + 46 + GET /xrpc/com.atproto.sync.listReposByCollection?collection=io.atcr.hold.captain 47 + → AppView discovers holds 48 + ``` 49 + 50 + **Outbound (client to PDS):** 51 + ``` 52 + 1. com.atproto.repo.describeRepo → verify PDS exists 53 + 2. com.atproto.sync.getRepo → fetch full CAR file (initial backfill) 54 + 3. com.atproto.sync.subscribeRepos → WebSocket for real-time updates 55 + 4. Parse events → extract io.atcr.* records → index in SQLite 56 + ``` 57 + 58 + **Data flow:** 59 + 60 + **Initial crawl (on requestCrawl):** 61 + ``` 62 + 1. Hold POSTs requestCrawl → service queues crawl job 63 + 2. Service fetches getRepo (CAR file) from hold's PDS for backfill 64 + 3. Service parses CAR using indigo libraries 65 + 4. Service extracts io.atcr.* records (captain, crew, manifests, etc.) 66 + 5. Service stores: (did, collection, rkey, record_data) in SQLite 67 + 6. Service opens WebSocket to subscribeRepos for this DID 68 + 7. Service stores cursor for reconnection handling 69 + ``` 70 + 71 + **Ongoing updates (WebSocket):** 72 + ``` 73 + 1. Receive commit events via subscribeRepos WebSocket 74 + 2. Parse event, filter to io.atcr.* collections only 75 + 3. Update indexed_records incrementally (insert/update/delete) 76 + 4. Update cursor after processing each event 77 + 5. On disconnect: reconnect with stored cursor to resume 78 + ``` 79 + 80 + **Discovery (AppView query):** 81 + ``` 82 + 1. AppView GETs listReposByCollection?collection=io.atcr.hold.captain 83 + 2. Service queries SQLite WHERE collection='io.atcr.hold.captain' 84 + 3. Service returns list of DIDs with that collection 85 + ``` 86 + 87 + ### Implementation Requirements 88 + 89 + **Technologies:** 90 + - Go (reuse indigo libraries for CAR parsing and WebSocket) 91 + - SQLite (sufficient for dozens/hundreds of holds) 92 + - Standard HTTP server + WebSocket client 93 + 94 + **Core components:** 95 + 96 + 1. **HTTP handlers** (`cmd/atcr-discovery/handlers/`): 97 + - `requestCrawl` - queue crawl jobs 98 + - `listReposByCollection` - query indexed collections 99 + 100 + 2. **Crawler** (`pkg/discovery/crawler.go`): 101 + - Fetch CAR files from PDSs for initial backfill 102 + - Parse with `github.com/bluesky-social/indigo/repo` 103 + - Extract records, filter to `io.atcr.*` only 104 + 105 + 3. **WebSocket subscriber** (`pkg/discovery/subscriber.go`): 106 + - WebSocket client for `com.atproto.sync.subscribeRepos` 107 + - Event parsing and filtering 108 + - Cursor management and persistence 109 + - Automatic reconnection with resume 110 + 111 + 4. **Storage** (`pkg/discovery/storage.go`): 112 + - SQLite schema for indexed records 113 + - Indexes on (collection, did) for fast queries 114 + - Cursor storage for reconnection 115 + 116 + 5. **Worker** (`pkg/discovery/worker.go`): 117 + - Background crawl job processor 118 + - WebSocket connection manager 119 + - Health monitoring for subscriptions 120 + 121 + **Database schema:** 122 + ```sql 123 + CREATE TABLE indexed_records ( 124 + did TEXT NOT NULL, 125 + collection TEXT NOT NULL, 126 + rkey TEXT NOT NULL, 127 + record_data TEXT NOT NULL, -- JSON 128 + indexed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 129 + PRIMARY KEY (did, collection, rkey) 130 + ); 131 + 132 + CREATE INDEX idx_collection ON indexed_records(collection); 133 + CREATE INDEX idx_did ON indexed_records(did); 134 + 135 + CREATE TABLE crawl_queue ( 136 + id INTEGER PRIMARY KEY AUTOINCREMENT, 137 + hostname TEXT NOT NULL UNIQUE, 138 + did TEXT, 139 + status TEXT DEFAULT 'pending', -- pending, in_progress, subscribed, failed 140 + last_crawled_at TIMESTAMP, 141 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP 142 + ); 143 + 144 + CREATE TABLE subscriptions ( 145 + did TEXT PRIMARY KEY, 146 + hostname TEXT NOT NULL, 147 + cursor INTEGER, -- Last processed sequence number 148 + status TEXT DEFAULT 'active', -- active, disconnected, failed 149 + last_event_at TIMESTAMP, 150 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 151 + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP 152 + ); 153 + ``` 154 + 155 + **Leveraging indigo libraries:** 156 + 157 + ```go 158 + import ( 159 + "github.com/bluesky-social/indigo/repo" 160 + "github.com/bluesky-social/indigo/atproto/syntax" 161 + "github.com/bluesky-social/indigo/events" 162 + "github.com/gorilla/websocket" 163 + "github.com/ipfs/go-cid" 164 + ) 165 + 166 + // Initial backfill: Parse CAR file 167 + r, err := repo.ReadRepoFromCar(ctx, bytes.NewReader(carData)) 168 + if err != nil { 169 + return err 170 + } 171 + 172 + // Iterate records 173 + err = r.ForEach(ctx, "", func(path string, nodeCid cid.Cid) error { 174 + // Parse collection from path (e.g., "io.atcr.hold.captain/self") 175 + parts := strings.Split(path, "/") 176 + if len(parts) != 2 { 177 + return nil // skip invalid paths 178 + } 179 + 180 + collection := parts[0] 181 + rkey := parts[1] 182 + 183 + // Filter to io.atcr.* only 184 + if !strings.HasPrefix(collection, "io.atcr.") { 185 + return nil 186 + } 187 + 188 + // Get record data 189 + recordBytes, err := r.GetRecord(ctx, path) 190 + if err != nil { 191 + return err 192 + } 193 + 194 + // Store in database 195 + return store.IndexRecord(did, collection, rkey, recordBytes) 196 + }) 197 + 198 + // WebSocket subscription: Listen for updates 199 + wsURL := fmt.Sprintf("wss://%s/xrpc/com.atproto.sync.subscribeRepos", hostname) 200 + conn, _, err := websocket.DefaultDialer.Dial(wsURL, nil) 201 + if err != nil { 202 + return err 203 + } 204 + 205 + // Read events 206 + rsc := &events.RepoStreamCallbacks{ 207 + RepoCommit: func(evt *events.RepoCommit) error { 208 + // Filter to io.atcr.* collections only 209 + for _, op := range evt.Ops { 210 + if !strings.HasPrefix(op.Collection, "io.atcr.") { 211 + continue 212 + } 213 + 214 + // Process create/update/delete operations 215 + switch op.Action { 216 + case "create", "update": 217 + store.IndexRecord(evt.Repo, op.Collection, op.Rkey, op.Record) 218 + case "delete": 219 + store.DeleteRecord(evt.Repo, op.Collection, op.Rkey) 220 + } 221 + } 222 + 223 + // Update cursor 224 + return store.UpdateCursor(evt.Repo, evt.Seq) 225 + }, 226 + } 227 + 228 + // Process stream 229 + scheduler := events.NewScheduler("discovery-worker", conn.RemoteAddr().String(), rsc) 230 + return events.HandleRepoStream(ctx, conn, scheduler) 231 + ``` 232 + 233 + ### Infrastructure Requirements 234 + 235 + **Minimum specs:** 236 + - 1 vCPU 237 + - 1-2GB RAM 238 + - 20GB SSD 239 + - Minimal bandwidth (<1GB/day for dozens of holds) 240 + 241 + **Estimated cost:** 242 + - Hetzner CX11: €4.15/month (~$5/month) 243 + - DigitalOcean Basic: $6/month 244 + - Fly.io: ~$5-10/month 245 + 246 + **Deployment:** 247 + ```bash 248 + # Build 249 + go build -o atcr-discovery ./cmd/atcr-discovery 250 + 251 + # Run 252 + export DATABASE_PATH="/var/lib/atcr-discovery/discovery.db" 253 + export HTTP_ADDR=":8080" 254 + ./atcr-discovery 255 + ``` 256 + 257 + ### Limitations 258 + 259 + **What it does NOT do:** 260 + - ❌ Serve outbound `subscribeRepos` firehose (AppViews query via listReposByCollection) 261 + - ❌ Full MST validation (trust PDS validation) 262 + - ❌ Scale to millions of accounts (SQLite limits) 263 + - ❌ Multi-instance deployment (single process with SQLite) 264 + 265 + **When to migrate to full relay:** When you have 1000+ holds, need PostgreSQL, or multi-instance deployment. 266 + 267 + ## Future Scale: Full Relay (Sync v1.1) 268 + 269 + When ATCR grows beyond dozens of holds and needs real-time indexing, migrate to Bluesky's relay v1.1 implementation. 270 + 271 + ### When to Upgrade 272 + 273 + **Indicators:** 274 + - 100+ holds requesting frequent crawls 275 + - Need real-time updates (re-crawl latency too high) 276 + - Multiple AppView instances need coordinated discovery 277 + - SQLite performance becomes bottleneck 278 + 279 + ### Relay v1.1 Characteristics 280 + 281 + Released May 2025, this is Bluesky's current reference implementation. 282 + 283 + **Key features:** 284 + - **Non-archival**: Doesn't mirror full repository data, only processes firehose 285 + - **WebSocket subscriptions**: Real-time updates from PDSs 286 + - **Scalable**: 2 vCPU, 12GB RAM handles ~100M accounts 287 + - **PostgreSQL**: Required for production scale 288 + - **Admin UI**: Web dashboard for management 289 + 290 + **Source**: `github.com/bluesky-social/indigo/cmd/relay` 291 + 292 + ### Migration Path 293 + 294 + **Step 1: Deploy relay v1.1** 295 + ```bash 296 + git clone https://github.com/bluesky-social/indigo.git 297 + cd indigo 298 + go build -o relay ./cmd/relay 299 + 300 + export DATABASE_URL="postgres://relay:password@localhost:5432/atcr_relay" 301 + ./relay --admin-password="secure-password" 302 + ``` 303 + 304 + **Step 2: Migrate data** 305 + - Export indexed records from SQLite 306 + - Trigger crawls in relay for all known holds 307 + - Verify relay indexes correctly 308 + 309 + **Step 3: Update AppView configuration** 310 + ```bash 311 + # Point to new relay 312 + export ATCR_RELAY_ENDPOINT="https://relay.atcr.io" 313 + ``` 314 + 315 + **Step 4: Decommission minimal service** 316 + - Monitor relay for stability 317 + - Shut down old discovery service 318 + 319 + ### Infrastructure Requirements (Full Relay) 320 + 321 + **Minimum specs:** 322 + - 2 vCPU cores 323 + - 12GB RAM 324 + - 100GB SSD 325 + - 30 Mbps bandwidth 326 + 327 + **Estimated cost:** 328 + - Hetzner: ~$30-40/month 329 + - DigitalOcean: ~$50/month (with managed PostgreSQL) 330 + - Fly.io: ~$35-50/month 331 + 332 + ## Collection Indexing: The `collectiondir` Microservice 333 + 334 + The `com.atproto.sync.listReposByCollection` endpoint is **not part of the relay core**. It's provided by a separate microservice called **`collectiondir`**. 335 + 336 + ### What is collectiondir? 337 + 338 + - **Separate service** that indexes collections for efficient discovery 339 + - **Optional**: Not required by the ATProto spec, but very useful for AppViews 340 + - **Deployed alongside relay** by Bluesky's public instances 341 + 342 + ### Current Limitation: did:plc Only? 343 + 344 + Based on testing, Bluesky's public relays (with collectiondir) appear to: 345 + - ✅ Index `io.atcr.*` collections from `did:plc` DIDs 346 + - ❌ NOT index `io.atcr.*` collections from `did:web` DIDs 347 + 348 + This means: 349 + - ATCR manifests from users (did:plc) are discoverable 350 + - ATCR hold captain records (did:web) are NOT discoverable 351 + - The relay still **stores** all data (CAR file includes did:web records) 352 + - The issue is specifically with **indexing** for `listReposByCollection` 353 + 354 + ### Configuring collectiondir 355 + 356 + Documentation on configuring collectiondir is sparse. Possible approaches: 357 + 358 + 1. **Fork and modify**: Clone indigo repo, modify collectiondir to index all DIDs 359 + 2. **Configuration file**: Check if collectiondir accepts whitelist/configuration for indexed collections 360 + 3. **No filtering**: Default behavior might be to index everything, but Bluesky's deployment filters 361 + 362 + **Action item**: Review `indigo/cmd/collectiondir` source code to understand configuration options. 363 + 364 + ## Multi-Relay Strategy 365 + 366 + Holds can request crawls from **multiple relays** simultaneously. This enables: 367 + 368 + ### Scenario: Bluesky + ATCR Relays 369 + 370 + **Setup:** 371 + 1. Hold deploys with embedded PDS at `did:web:hold01.atcr.io` 372 + 2. Hold creates captain record (`io.atcr.hold.captain/self`) 373 + 3. Hold requests crawl from **both**: 374 + - Bluesky relay: `https://bsky.network/xrpc/com.atproto.sync.requestCrawl` 375 + - ATCR relay: `https://relay.atcr.io/xrpc/com.atproto.sync.requestCrawl` 376 + 377 + **Result:** 378 + - ✅ Bluesky relay indexes social posts (if hold owner posts) 379 + - ✅ ATCR relay indexes hold captain records 380 + - ✅ AppViews query ATCR relay for hold discovery 381 + - ✅ Independent networks - Bluesky posts work regardless of ATCR relay 382 + 383 + ### Request Crawl Script 384 + 385 + The existing script can be modified to support multiple relays: 386 + 387 + ```bash 388 + #!/bin/bash 389 + # deploy/request-crawl.sh 390 + 391 + HOSTNAME=$1 392 + BLUESKY_RELAY=${2:-"https://bsky.network"} 393 + ATCR_RELAY=${3:-"https://relay.atcr.io"} 394 + 395 + echo "Requesting crawl for $HOSTNAME from Bluesky relay..." 396 + curl -X POST "$BLUESKY_RELAY/xrpc/com.atproto.sync.requestCrawl" \ 397 + -H "Content-Type: application/json" \ 398 + -d "{\"hostname\": \"$HOSTNAME\"}" 399 + 400 + echo "Requesting crawl for $HOSTNAME from ATCR relay..." 401 + curl -X POST "$ATCR_RELAY/xrpc/com.atproto.sync.requestCrawl" \ 402 + -H "Content-Type: application/json" \ 403 + -d "{\"hostname\": \"$HOSTNAME\"}" 404 + ``` 405 + 406 + Usage: 407 + ```bash 408 + ./deploy/request-crawl.sh hold01.atcr.io 409 + ``` 410 + 411 + ## Deployment: Minimal Discovery Service 412 + 413 + ### 1. Infrastructure Setup 414 + 415 + **Provision VPS:** 416 + - Hetzner CX11, DigitalOcean Basic, or Fly.io 417 + - Public domain (e.g., `discovery.atcr.io`) 418 + - TLS certificate (Let's Encrypt) 419 + 420 + **Configure reverse proxy (optional - nginx):** 421 + ```nginx 422 + upstream discovery { 423 + server 127.0.0.1:8080; 424 + } 425 + 426 + server { 427 + listen 443 ssl http2; 428 + server_name discovery.atcr.io; 429 + 430 + ssl_certificate /etc/letsencrypt/live/discovery.atcr.io/fullchain.pem; 431 + ssl_certificate_key /etc/letsencrypt/live/discovery.atcr.io/privkey.pem; 432 + 433 + location / { 434 + proxy_pass http://discovery; 435 + proxy_set_header Host $host; 436 + proxy_set_header X-Real-IP $remote_addr; 437 + } 438 + } 439 + ``` 440 + 441 + ### 2. Build and Deploy 442 + 443 + ```bash 444 + # Clone ATCR repo 445 + git clone https://github.com/atcr-io/atcr.git 446 + cd atcr 447 + 448 + # Build discovery service 449 + go build -o atcr-discovery ./cmd/atcr-discovery 450 + 451 + # Run 452 + export DATABASE_PATH="/var/lib/atcr-discovery/discovery.db" 453 + export HTTP_ADDR=":8080" 454 + export CRAWL_INTERVAL="12h" 455 + ./atcr-discovery 456 + ``` 457 + 458 + ### 3. Update Hold Startup 459 + 460 + Each hold should request crawl on startup: 461 + 462 + ```bash 463 + # In hold startup script or environment 464 + export ATCR_DISCOVERY_URL="https://discovery.atcr.io" 465 + 466 + # Request crawl from both Bluesky and ATCR 467 + curl -X POST "https://bsky.network/xrpc/com.atproto.sync.requestCrawl" \ 468 + -H "Content-Type: application/json" \ 469 + -d "{\"hostname\": \"$HOLD_PUBLIC_URL\"}" 470 + 471 + curl -X POST "$ATCR_DISCOVERY_URL/xrpc/com.atproto.sync.requestCrawl" \ 472 + -H "Content-Type: application/json" \ 473 + -d "{\"hostname\": \"$HOLD_PUBLIC_URL\"}" 474 + ``` 475 + 476 + ### 4. Update AppView Configuration 477 + 478 + Point AppView discovery worker to the discovery service: 479 + 480 + ```bash 481 + # In .env.appview or environment 482 + export ATCR_RELAY_ENDPOINT="https://discovery.atcr.io" 483 + export ATCR_HOLD_DISCOVERY_ENABLED="true" 484 + export ATCR_HOLD_DISCOVERY_INTERVAL="6h" 485 + ``` 486 + 487 + ### 5. Monitor and Maintain 488 + 489 + **Monitoring:** 490 + - Check crawl queue status 491 + - Monitor SQLite database size 492 + - Track failed crawls 493 + 494 + **Maintenance:** 495 + - Re-crawl on schedule (every 6-24 hours) 496 + - Prune stale records (>7 days old) 497 + - Backup SQLite database regularly 498 + 499 + ## Trade-Offs and Considerations 500 + 501 + ### Running Your Own Relay 502 + 503 + **Pros:** 504 + - ✅ Full control over indexing (can index `did:web` holds) 505 + - ✅ No dependency on third-party relay policies 506 + - ✅ Can customize collection filters for ATCR-specific needs 507 + - ✅ Relatively lightweight with modern relay implementation 508 + 509 + **Cons:** 510 + - ❌ Infrastructure cost (~$30-50/month minimum) 511 + - ❌ Operational overhead (monitoring, updates, backups) 512 + - ❌ Need to maintain as network grows 513 + - ❌ Single point of failure for discovery (unless multi-relay) 514 + 515 + ### Alternatives to Running a Relay 516 + 517 + #### 1. Direct Registration API 518 + 519 + Holds POST to AppView on startup to register themselves: 520 + 521 + **Pros:** 522 + - ✅ Simplest implementation 523 + - ✅ No relay infrastructure needed 524 + - ✅ Immediate registration (no crawl delay) 525 + 526 + **Cons:** 527 + - ❌ Ties holds to specific AppView instances 528 + - ❌ Breaks decentralized discovery model 529 + - ❌ Each AppView has different hold registry 530 + 531 + #### 2. Static Discovery File 532 + 533 + Maintain `https://atcr.io/.well-known/holds.json`: 534 + 535 + **Pros:** 536 + - ✅ No infrastructure beyond static hosting 537 + - ✅ All AppViews share same registry 538 + - ✅ Simple to implement 539 + 540 + **Cons:** 541 + - ❌ Manual process (PRs/issues to add holds) 542 + - ❌ Not real-time discovery 543 + - ❌ Centralized control point 544 + 545 + #### 3. Hybrid Approach 546 + 547 + Combine multiple discovery mechanisms: 548 + 549 + ```go 550 + func (w *HoldDiscoveryWorker) DiscoverHolds(ctx context.Context) error { 551 + // 1. Fetch static registry 552 + staticHolds := w.fetchStaticRegistry() 553 + 554 + // 2. Query relay (if available) 555 + relayHolds := w.queryRelay(ctx) 556 + 557 + // 3. Accept direct registrations 558 + registeredHolds := w.getDirectRegistrations() 559 + 560 + // Merge and deduplicate 561 + allHolds := mergeHolds(staticHolds, relayHolds, registeredHolds) 562 + 563 + // Cache in database 564 + for _, hold := range allHolds { 565 + w.cacheHold(hold) 566 + } 567 + } 568 + ``` 569 + 570 + **Pros:** 571 + - ✅ Multiple discovery paths (resilient) 572 + - ✅ Gradual migration to relay-based discovery 573 + - ✅ Supports both centralized bootstrap and decentralized growth 574 + 575 + **Cons:** 576 + - ❌ More complex implementation 577 + - ❌ Potential for stale data if sources conflict 578 + 579 + ## Recommendations for ATCR 580 + 581 + ### Phase 1: MVP (Now - 1000 holds) 582 + 583 + **Build minimal discovery service with WebSocket** (~$5-10/month): 584 + 1. Implement `requestCrawl` + `listReposByCollection` endpoints 585 + 2. Initial backfill via `getRepo` (CAR file parsing) 586 + 3. Real-time updates via WebSocket `subscribeRepos` 587 + 4. SQLite storage with cursor management 588 + 5. Filter to `io.atcr.*` collections only 589 + 590 + **Deliverables:** 591 + - `cmd/atcr-discovery` service 592 + - SQLite schema with cursor storage 593 + - CAR file parser (indigo libraries) 594 + - WebSocket subscriber with reconnection 595 + - Deployment scripts 596 + 597 + **Cost**: ~$5-10/month VPS 598 + 599 + **Why**: Minimal infrastructure, real-time updates, full control over indexing, sufficient for hundreds of holds. 600 + 601 + ### Phase 2: Migrate to Full Relay (1000+ holds) 602 + 603 + **Deploy Bluesky relay v1.1** when scaling needed (~$30-50/month): 604 + 1. Set up PostgreSQL database 605 + 2. Deploy indigo relay with admin UI 606 + 3. Migrate indexed data from SQLite 607 + 4. Configure for `io.atcr.*` collection filtering (if possible) 608 + 5. Handle thousands of concurrent WebSocket connections 609 + 610 + **Cost**: ~$30-50/month 611 + 612 + **Why**: Proven scalability to 100M+ accounts, standardized protocol, community support, production-ready infrastructure. 613 + 614 + ### Phase 3: Multi-Relay Federation (Future) 615 + 616 + **Decentralized relay network:** 617 + 1. Multiple ATCR relays operated independently 618 + 2. AppViews query multiple relays (fallback/redundancy) 619 + 3. Holds request crawls from all known ATCR relays 620 + 4. Cross-relay synchronization (optional) 621 + 622 + **Why**: No single point of failure, fully decentralized discovery, geographic distribution. 623 + 624 + ## Next Steps 625 + 626 + ### For MVP Implementation 627 + 628 + 1. **Create `cmd/atcr-discovery` package structure** 629 + - HTTP handlers for XRPC endpoints (`requestCrawl`, `listReposByCollection`) 630 + - Crawler with indigo CAR parsing for initial backfill 631 + - WebSocket subscriber for real-time updates 632 + - SQLite storage layer with cursor management 633 + - Background worker for managing subscriptions 634 + 635 + 2. **Database schema** 636 + - `indexed_records` table for collection data 637 + - `crawl_queue` table for crawl job management 638 + - `subscriptions` table for WebSocket cursor tracking 639 + - Indexes for efficient queries 640 + 641 + 3. **WebSocket implementation** 642 + - Use `github.com/bluesky-social/indigo/events` for event handling 643 + - Implement reconnection logic with cursor resume 644 + - Filter events to `io.atcr.*` collections only 645 + - Health monitoring for active subscriptions 646 + 647 + 4. **Testing strategy** 648 + - Unit tests for CAR parsing 649 + - Unit tests for event filtering 650 + - Integration tests with mock PDSs and WebSocket 651 + - Connection failure and reconnection testing 652 + - Load testing with SQLite 653 + 654 + 5. **Deployment** 655 + - Dockerfile for discovery service 656 + - Deployment scripts (systemd, docker-compose) 657 + - Monitoring setup (logs, metrics, WebSocket health) 658 + - Alert on subscription failures 659 + 660 + 6. **Documentation** 661 + - API documentation for XRPC endpoints 662 + - Deployment guide 663 + - Troubleshooting guide (WebSocket connection issues) 664 + 665 + ### Open Questions 666 + 667 + 1. **CAR parsing edge cases**: How to handle malformed CAR files or invalid records? 668 + 2. **WebSocket reconnection**: What's the optimal backoff strategy for reconnection attempts? 669 + 3. **Subscription management**: How many concurrent WebSocket connections can SQLite handle? 670 + 4. **Rate limiting**: Should discovery service rate-limit requestCrawl to prevent abuse? 671 + 5. **Authentication**: Should requestCrawl require authentication, or remain open? 672 + 6. **Cursor storage**: Should cursors be persisted immediately or batched for performance? 673 + 7. **Monitoring**: What metrics are most important for operational visibility (active subs, event rate, lag)? 674 + 8. **Error handling**: When a WebSocket dies, should we re-backfill via getRepo or trust cursor resume? 675 + 676 + ## References 677 + 678 + ### ATProto Specifications 679 + - [ATProto Sync Specification](https://atproto.com/specs/sync) 680 + - [Repository Specification](https://atproto.com/specs/repository) 681 + - [CAR File Format](https://ipld.io/specs/transport/car/) 682 + 683 + ### Indigo Libraries 684 + - [Indigo Repository](https://github.com/bluesky-social/indigo) 685 + - [Indigo Repo Package](https://pkg.go.dev/github.com/bluesky-social/indigo/repo) 686 + - [Indigo ATProto Package](https://pkg.go.dev/github.com/bluesky-social/indigo/atproto) 687 + 688 + ### Relay Reference (Future) 689 + - [Relay v1.1 Updates](https://docs.bsky.app/blog/relay-sync-updates) 690 + - [Indigo Relay Implementation](https://github.com/bluesky-social/indigo/tree/main/cmd/relay) 691 + - [Running a Full-Network Relay](https://whtwnd.com/bnewbold.net/3kwzl7tye6u2y)
+7 -1
pkg/appview/db/device_store.go
··· 417 417 func generateUserCode() string { 418 418 chars := "ABCDEFGHJKLMNPQRSTUVWXYZ23456789" 419 419 code := make([]byte, 8) 420 - rand.Read(code) 420 + if _, err := rand.Read(code); err != nil { 421 + // Fallback to timestamp-based generation if crypto rand fails 422 + now := time.Now().UnixNano() 423 + for i := range code { 424 + code[i] = byte(now >> (i * 8)) 425 + } 426 + } 421 427 for i := range code { 422 428 code[i] = chars[int(code[i])%len(chars)] 423 429 }
+3 -3
pkg/appview/db/oauth_store_test.go
··· 85 85 } 86 86 87 87 // Verify mismatched session was deleted 88 - retrieved, err = store.GetSession(ctx, mismatchedSession.AccountDID, mismatchedSession.SessionID) 88 + _, err = store.GetSession(ctx, mismatchedSession.AccountDID, mismatchedSession.SessionID) 89 89 if err == nil { 90 90 t.Error("Expected session to be deleted (should error), but got no error") 91 91 } ··· 154 154 } 155 155 156 156 // Verify malformed session was deleted 157 - retrieved, err = store.GetSession(ctx, parsedDID, "malformed") 157 + _, err = store.GetSession(ctx, parsedDID, "malformed") 158 158 if err == nil { 159 159 t.Error("Expected malformed session to be deleted, but got no error") 160 160 } ··· 284 284 } 285 285 286 286 // Verify deletion 287 - retrieved, err = store.GetSession(ctx, did, "test_session_id") 287 + _, err = store.GetSession(ctx, did, "test_session_id") 288 288 if err == nil { 289 289 t.Error("Expected error after deletion, got nil") 290 290 }
+3 -1
pkg/appview/db/readonly_test.go
··· 13 13 dbPath := filepath.Join(tmpDir, "test.db") 14 14 15 15 // Set environment for database path 16 - os.Setenv("ATCR_UI_DATABASE_PATH", dbPath) 16 + if err := os.Setenv("ATCR_UI_DATABASE_PATH", dbPath); err != nil { 17 + t.Fatalf("Failed to set environment variable: %v", err) 18 + } 17 19 defer os.Unsetenv("ATCR_UI_DATABASE_PATH") 18 20 19 21 // Initialize database (creates schema)
+1 -1
pkg/appview/jetstream/backfill.go
··· 402 402 } 403 403 404 404 // Update annotations from newest manifest only 405 - if manifestRecord.Annotations != nil && len(manifestRecord.Annotations) > 0 { 405 + if len(manifestRecord.Annotations) > 0 { 406 406 // Filter out empty annotations 407 407 hasData := false 408 408 for _, value := range manifestRecord.Annotations {
+7 -2
pkg/appview/middleware/registry.go
··· 26 26 "atcr.io/pkg/auth/token" 27 27 ) 28 28 29 + // holdDIDKey is the context key for storing hold DID 30 + const holdDIDKey contextKey = "hold.did" 31 + 29 32 // Global variables for initialization only 30 33 // These are set by main.go during startup and copied into NamespaceResolver instances. 31 34 // After initialization, request handling uses the NamespaceResolver's instance fields. ··· 131 134 } 132 135 133 136 did := ident.DID.String() 137 + handle := ident.Handle.String() 134 138 pdsEndpoint := ident.PDSEndpoint() 135 139 if pdsEndpoint == "" { 136 140 return nil, fmt.Errorf("no PDS endpoint found for %s", identityStr) 137 141 } 138 142 139 - fmt.Printf("DEBUG [registry/middleware]: Resolved identity: did=%s, pds=%s, handle=%s\n", did, pdsEndpoint, ident.Handle.String()) 143 + fmt.Printf("DEBUG [registry/middleware]: Resolved identity: did=%s, pds=%s, handle=%s\n", did, pdsEndpoint, handle) 140 144 141 145 // Query for hold DID - either user's hold or default hold service 142 146 holdDID := nr.findHoldDID(ctx, did, pdsEndpoint) ··· 144 148 // This is a fatal configuration error - registry cannot function without a hold service 145 149 return nil, fmt.Errorf("no hold DID configured: ensure default_hold_did is set in middleware config") 146 150 } 147 - ctx = context.WithValue(ctx, "hold.did", holdDID) 151 + ctx = context.WithValue(ctx, holdDIDKey, holdDID) 148 152 149 153 // Get service token for hold authentication 150 154 // Check cache first to avoid unnecessary PDS calls on every request ··· 308 312 // Bundle all context into a single RegistryContext struct 309 313 registryCtx := &storage.RegistryContext{ 310 314 DID: did, 315 + Handle: handle, 311 316 HoldDID: holdDID, 312 317 PDSEndpoint: pdsEndpoint, 313 318 Repository: repositoryName,
+1
pkg/appview/storage/context.go
··· 17 17 type RegistryContext struct { 18 18 // Per-request identity and routing information 19 19 DID string // User's DID (e.g., "did:plc:abc123") 20 + Handle string // User's handle (e.g., "alice.bsky.social") 20 21 HoldDID string // Hold service DID (e.g., "did:web:hold01.atcr.io") 21 22 PDSEndpoint string // User's PDS endpoint URL 22 23 Repository string // Image repository name (e.g., "debian")
+372
pkg/appview/storage/manifest_store.go
··· 1 + package storage 2 + 3 + import ( 4 + "bytes" 5 + "context" 6 + "encoding/json" 7 + "errors" 8 + "fmt" 9 + "io" 10 + "maps" 11 + "net/http" 12 + "strings" 13 + 14 + "atcr.io/pkg/atproto" 15 + "github.com/distribution/distribution/v3" 16 + "github.com/opencontainers/go-digest" 17 + ) 18 + 19 + // HoldNotifier interface for notifying holds about manifest uploads 20 + type HoldNotifier interface { 21 + GetServiceToken(ctx context.Context, userDID, audienceDID string) (string, error) 22 + } 23 + 24 + // ManifestStore implements distribution.ManifestService 25 + // It stores manifests in ATProto as records 26 + type ManifestStore struct { 27 + ctx *RegistryContext // Context with user/hold info 28 + notifier HoldNotifier // OAuth refresher for getting service tokens 29 + lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull) 30 + blobStore distribution.BlobStore // Blob store for fetching config during push 31 + } 32 + 33 + // NewManifestStore creates a new ATProto-backed manifest store 34 + func NewManifestStore(ctx *RegistryContext, notifier HoldNotifier, blobStore distribution.BlobStore) *ManifestStore { 35 + return &ManifestStore{ 36 + ctx: ctx, 37 + notifier: notifier, 38 + blobStore: blobStore, 39 + } 40 + } 41 + 42 + // Exists checks if a manifest exists by digest 43 + func (s *ManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { 44 + rkey := digestToRKey(dgst) 45 + _, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey) 46 + if err != nil { 47 + // If not found, return false without error 48 + if errors.Is(err, atproto.ErrRecordNotFound) { 49 + return false, nil 50 + } 51 + return false, err 52 + } 53 + return true, nil 54 + } 55 + 56 + // Get retrieves a manifest by digest 57 + func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { 58 + rkey := digestToRKey(dgst) 59 + record, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey) 60 + if err != nil { 61 + return nil, distribution.ErrManifestUnknownRevision{ 62 + Name: s.ctx.Repository, 63 + Revision: dgst, 64 + } 65 + } 66 + 67 + var manifestRecord atproto.ManifestRecord 68 + if err := json.Unmarshal(record.Value, &manifestRecord); err != nil { 69 + return nil, fmt.Errorf("failed to unmarshal manifest record: %w", err) 70 + } 71 + 72 + // Store the hold DID for subsequent blob requests during pull 73 + // Prefer HoldDID (new format) with fallback to HoldEndpoint (legacy URL format) 74 + // The routing repository will cache this for concurrent blob fetches 75 + if manifestRecord.HoldDID != "" { 76 + // New format: DID reference (preferred) 77 + s.lastFetchedHoldDID = manifestRecord.HoldDID 78 + } else if manifestRecord.HoldEndpoint != "" { 79 + // Legacy format: URL reference - convert to DID 80 + s.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint) 81 + } 82 + 83 + var ociManifest []byte 84 + 85 + // New records: Download blob from ATProto blob storage 86 + if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Link != "" { 87 + ociManifest, err = s.ctx.ATProtoClient.GetBlob(ctx, manifestRecord.ManifestBlob.Ref.Link) 88 + if err != nil { 89 + return nil, fmt.Errorf("failed to download manifest blob: %w", err) 90 + } 91 + } 92 + 93 + // Track pull count (increment asynchronously to avoid blocking the response) 94 + if s.ctx.Database != nil { 95 + go func() { 96 + if err := s.ctx.Database.IncrementPullCount(s.ctx.DID, s.ctx.Repository); err != nil { 97 + fmt.Printf("WARNING: Failed to increment pull count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err) 98 + } 99 + }() 100 + } 101 + 102 + // Parse the manifest based on media type 103 + // For now, we'll return the raw bytes wrapped in a manifest object 104 + // In a full implementation, you'd use distribution's manifest parsing 105 + return &rawManifest{ 106 + mediaType: manifestRecord.MediaType, 107 + payload: ociManifest, 108 + }, nil 109 + } 110 + 111 + // Put stores a manifest 112 + func (s *ManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { 113 + // Get the manifest payload (raw bytes) 114 + mediaType, payload, err := manifest.Payload() 115 + if err != nil { 116 + return "", err 117 + } 118 + 119 + // Calculate digest 120 + dgst := digest.FromBytes(payload) 121 + 122 + // Upload manifest as blob to PDS 123 + blobRef, err := s.ctx.ATProtoClient.UploadBlob(ctx, payload, mediaType) 124 + if err != nil { 125 + return "", fmt.Errorf("failed to upload manifest blob: %w", err) 126 + } 127 + 128 + // Create manifest record with structured metadata 129 + manifestRecord, err := atproto.NewManifestRecord(s.ctx.Repository, dgst.String(), payload) 130 + if err != nil { 131 + return "", fmt.Errorf("failed to create manifest record: %w", err) 132 + } 133 + 134 + // Set the blob reference, hold DID, and hold endpoint 135 + manifestRecord.ManifestBlob = blobRef 136 + manifestRecord.HoldDID = s.ctx.HoldDID // Primary reference (DID) 137 + 138 + // Resolve hold endpoint from DID for backward compatibility 139 + if holdEndpoint, err := resolveDIDToHTTPSEndpoint(s.ctx.HoldDID); err == nil { 140 + manifestRecord.HoldEndpoint = holdEndpoint // Legacy reference (URL) for backward compat 141 + } 142 + 143 + // Extract Dockerfile labels from config blob and add to annotations 144 + // Only for image manifests (not manifest lists which don't have config blobs) 145 + isManifestList := strings.Contains(manifestRecord.MediaType, "manifest.list") || 146 + strings.Contains(manifestRecord.MediaType, "image.index") 147 + 148 + if !isManifestList && s.blobStore != nil && manifestRecord.Config != nil && manifestRecord.Config.Digest != "" { 149 + labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest) 150 + if err != nil { 151 + // Log error but don't fail the push - labels are optional 152 + fmt.Printf("WARNING: Failed to extract config labels: %v\n", err) 153 + } else { 154 + // Initialize annotations map if needed 155 + if manifestRecord.Annotations == nil { 156 + manifestRecord.Annotations = make(map[string]string) 157 + } 158 + 159 + // Copy labels to annotations (Dockerfile LABELs → manifest annotations) 160 + maps.Copy(manifestRecord.Annotations, labels) 161 + 162 + fmt.Printf("DEBUG: Extracted %d labels from config blob\n", len(labels)) 163 + } 164 + } 165 + 166 + // Store manifest record in ATProto 167 + rkey := digestToRKey(dgst) 168 + _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.ManifestCollection, rkey, manifestRecord) 169 + if err != nil { 170 + return "", fmt.Errorf("failed to store manifest record in ATProto: %w", err) 171 + } 172 + 173 + // Track push count (increment asynchronously to avoid blocking the response) 174 + if s.ctx.Database != nil { 175 + go func() { 176 + if err := s.ctx.Database.IncrementPushCount(s.ctx.DID, s.ctx.Repository); err != nil { 177 + fmt.Printf("WARNING: Failed to increment push count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err) 178 + } 179 + }() 180 + } 181 + 182 + // Also handle tag if specified 183 + var tag string 184 + for _, option := range options { 185 + if tagOpt, ok := option.(distribution.WithTagOption); ok { 186 + tag = tagOpt.Tag 187 + tagRecord := atproto.NewTagRecord(s.ctx.ATProtoClient.DID(), s.ctx.Repository, tag, dgst.String()) 188 + tagRKey := atproto.RepositoryTagToRKey(s.ctx.Repository, tag) 189 + _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.TagCollection, tagRKey, tagRecord) 190 + if err != nil { 191 + return "", fmt.Errorf("failed to store tag in ATProto: %w", err) 192 + } 193 + } 194 + } 195 + 196 + // Notify hold about manifest upload (for layer tracking and Bluesky posts) 197 + // Do this asynchronously to avoid blocking the push 198 + if tag != "" && s.notifier != nil && s.ctx.Handle != "" { 199 + go func() { 200 + if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String()); err != nil { 201 + fmt.Printf("WARNING: Failed to notify hold about manifest: %v\n", err) 202 + } 203 + }() 204 + } 205 + 206 + return dgst, nil 207 + } 208 + 209 + // Delete removes a manifest 210 + func (s *ManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { 211 + rkey := digestToRKey(dgst) 212 + return s.ctx.ATProtoClient.DeleteRecord(ctx, atproto.ManifestCollection, rkey) 213 + } 214 + 215 + // digestToRKey converts a digest to an ATProto record key 216 + // ATProto rkeys must be valid strings, so we use the digest string without the algorithm prefix 217 + func digestToRKey(dgst digest.Digest) string { 218 + // Remove the algorithm prefix (e.g., "sha256:") 219 + return dgst.Encoded() 220 + } 221 + 222 + // GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest 223 + // This is used by the routing repository to cache the hold for blob requests 224 + func (s *ManifestStore) GetLastFetchedHoldDID() string { 225 + return s.lastFetchedHoldDID 226 + } 227 + 228 + // rawManifest is a simple implementation of distribution.Manifest 229 + type rawManifest struct { 230 + mediaType string 231 + payload []byte 232 + } 233 + 234 + func (m *rawManifest) References() []distribution.Descriptor { 235 + // TODO: Parse the manifest and return actual references 236 + return nil 237 + } 238 + 239 + func (m *rawManifest) Payload() (string, []byte, error) { 240 + return m.mediaType, m.payload, nil 241 + } 242 + 243 + // extractConfigLabels fetches the image config blob and extracts Dockerfile LABELs 244 + func (s *ManifestStore) extractConfigLabels(ctx context.Context, configDigestStr string) (map[string]string, error) { 245 + // Parse digest string 246 + configDigest, err := digest.Parse(configDigestStr) 247 + if err != nil { 248 + return nil, fmt.Errorf("invalid config digest: %w", err) 249 + } 250 + 251 + // Fetch config blob from storage 252 + configData, err := s.blobStore.Get(ctx, configDigest) 253 + if err != nil { 254 + return nil, fmt.Errorf("failed to fetch config blob: %w", err) 255 + } 256 + 257 + // Parse config JSON 258 + var configJSON struct { 259 + Config struct { 260 + Labels map[string]string `json:"Labels"` 261 + } `json:"config"` 262 + } 263 + 264 + if err := json.Unmarshal(configData, &configJSON); err != nil { 265 + return nil, fmt.Errorf("failed to parse config JSON: %w", err) 266 + } 267 + 268 + return configJSON.Config.Labels, nil 269 + } 270 + 271 + // resolveDIDToHTTPSEndpoint resolves a DID to an HTTPS endpoint 272 + // Currently supports did:web only (e.g., did:web:hold01.atcr.io → https://hold01.atcr.io) 273 + func resolveDIDToHTTPSEndpoint(did string) (string, error) { 274 + if !strings.HasPrefix(did, "did:web:") { 275 + return "", fmt.Errorf("only did:web is supported, got: %s", did) 276 + } 277 + 278 + // Extract hostname from did:web 279 + hostname := strings.TrimPrefix(did, "did:web:") 280 + 281 + // Handle port notation (did:web:example.com:8080 → https://example.com:8080) 282 + hostname = strings.ReplaceAll(hostname, ":", ":") 283 + 284 + return "https://" + hostname, nil 285 + } 286 + 287 + // notifyHoldAboutManifest notifies the hold service about a manifest upload 288 + // This enables the hold to create layer records and Bluesky posts 289 + func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRecord *atproto.ManifestRecord, tag, manifestDigest string) error { 290 + // Skip if no notifier configured 291 + if s.notifier == nil { 292 + return nil 293 + } 294 + 295 + // Resolve hold DID to HTTP endpoint 296 + // For did:web, this is straightforward (e.g., did:web:hold01.atcr.io → https://hold01.atcr.io) 297 + holdEndpoint, err := resolveDIDToHTTPSEndpoint(s.ctx.HoldDID) 298 + if err != nil { 299 + return fmt.Errorf("failed to resolve hold DID %s: %w", s.ctx.HoldDID, err) 300 + } 301 + 302 + // Get service token from user's PDS for hold authentication 303 + serviceToken, err := s.notifier.GetServiceToken(ctx, s.ctx.DID, s.ctx.HoldDID) 304 + if err != nil { 305 + return fmt.Errorf("failed to get service token: %w", err) 306 + } 307 + 308 + // Build notification request 309 + notifyReq := map[string]any{ 310 + "repository": s.ctx.Repository, 311 + "tag": tag, 312 + "userDid": s.ctx.DID, 313 + "userHandle": s.ctx.Handle, 314 + "manifest": map[string]any{ 315 + "mediaType": manifestRecord.MediaType, 316 + "config": map[string]any{ 317 + "digest": manifestRecord.Config.Digest, 318 + "size": manifestRecord.Config.Size, 319 + }, 320 + "layers": func() []map[string]any { 321 + layers := make([]map[string]any, len(manifestRecord.Layers)) 322 + for i, layer := range manifestRecord.Layers { 323 + layers[i] = map[string]any{ 324 + "digest": layer.Digest, 325 + "size": layer.Size, 326 + "mediaType": layer.MediaType, 327 + } 328 + } 329 + return layers 330 + }(), 331 + }, 332 + } 333 + 334 + // Marshal request 335 + reqBody, err := json.Marshal(notifyReq) 336 + if err != nil { 337 + return fmt.Errorf("failed to marshal notification request: %w", err) 338 + } 339 + 340 + // Send notification to hold 341 + req, err := http.NewRequestWithContext( 342 + ctx, 343 + "POST", 344 + holdEndpoint+atproto.HoldNotifyManifest, 345 + bytes.NewReader(reqBody), 346 + ) 347 + if err != nil { 348 + return fmt.Errorf("failed to create HTTP request: %w", err) 349 + } 350 + 351 + req.Header.Set("Content-Type", "application/json") 352 + req.Header.Set("Authorization", "Bearer "+serviceToken) 353 + 354 + resp, err := http.DefaultClient.Do(req) 355 + if err != nil { 356 + return fmt.Errorf("failed to send notification: %w", err) 357 + } 358 + defer resp.Body.Close() 359 + 360 + if resp.StatusCode != http.StatusOK { 361 + body, _ := io.ReadAll(resp.Body) 362 + return fmt.Errorf("hold notification failed: status %d, body: %s", resp.StatusCode, body) 363 + } 364 + 365 + // Parse response (optional logging) 366 + var notifyResp map[string]any 367 + if err := json.NewDecoder(resp.Body).Decode(&notifyResp); err == nil { 368 + fmt.Printf("INFO: Hold notification successful for %s:%s - %+v\n", s.ctx.Repository, tag, notifyResp) 369 + } 370 + 371 + return nil 372 + }
+10 -11
pkg/appview/storage/proxy_blob_store.go
··· 564 564 565 565 // ProxyBlobWriter implements distribution.BlobWriter for proxy uploads using multipart upload 566 566 type ProxyBlobWriter struct { 567 - store *ProxyBlobStore 568 - options distribution.CreateOptions 569 - uploadID string // S3 multipart upload ID 570 - parts []CompletedPart // Track uploaded parts with ETags 571 - partNumber int // Current part number (starts at 1) 572 - buffer *bytes.Buffer // Buffer for current part 573 - size int64 // Total bytes written 574 - closed bool 575 - id string // Distribution's upload ID (for state) 576 - startedAt time.Time 577 - finalDigest string // Set on Commit 567 + store *ProxyBlobStore 568 + options distribution.CreateOptions 569 + uploadID string // S3 multipart upload ID 570 + parts []CompletedPart // Track uploaded parts with ETags 571 + partNumber int // Current part number (starts at 1) 572 + buffer *bytes.Buffer // Buffer for current part 573 + size int64 // Total bytes written 574 + closed bool 575 + id string // Distribution's upload ID (for state) 576 + startedAt time.Time 578 577 } 579 578 580 579 // ID returns the upload ID
+1 -2
pkg/appview/storage/proxy_blob_store_test.go
··· 313 313 testTokenStr := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature" 314 314 token.SetServiceToken(userDID, holdDID, testTokenStr) 315 315 316 - b.ResetTimer() 317 - for i := 0; i < b.N; i++ { 316 + for b.Loop() { 318 317 cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID) 319 318 320 319 if cachedToken == "" || time.Now().After(expiresAt) {
+62 -16
pkg/appview/storage/routing_repository.go
··· 2 2 3 3 import ( 4 4 "context" 5 + "encoding/json" 5 6 "fmt" 7 + "io" 8 + "net/http" 6 9 "time" 7 10 8 - "atcr.io/pkg/atproto" 11 + "atcr.io/pkg/auth/oauth" 9 12 "github.com/distribution/distribution/v3" 10 13 ) 11 14 ··· 13 16 // The registry (AppView) is stateless and NEVER stores blobs locally 14 17 type RoutingRepository struct { 15 18 distribution.Repository 16 - Ctx *RegistryContext // All context and services (exported for token updates) 17 - manifestStore *atproto.ManifestStore // Cached manifest store instance 18 - blobStore *ProxyBlobStore // Cached blob store instance 19 + Ctx *RegistryContext // All context and services (exported for token updates) 20 + manifestStore *ManifestStore // Cached manifest store instance 21 + blobStore *ProxyBlobStore // Cached blob store instance 22 + } 23 + 24 + // refresherAdapter adapts the oauth.Refresher to implement atproto.HoldNotifier 25 + type refresherAdapter struct { 26 + refresher *oauth.Refresher 27 + pdsEndpoint string 28 + } 29 + 30 + // GetServiceToken implements atproto.HoldNotifier 31 + func (r *refresherAdapter) GetServiceToken(ctx context.Context, userDID, audienceDID string) (string, error) { 32 + // Get OAuth session for the user 33 + session, err := r.refresher.GetSession(ctx, userDID) 34 + if err != nil { 35 + return "", fmt.Errorf("failed to get OAuth session: %w", err) 36 + } 37 + 38 + // Build service auth URL 39 + serviceAuthURL := fmt.Sprintf("%s/xrpc/com.atproto.server.getServiceAuth?aud=%s", r.pdsEndpoint, audienceDID) 40 + 41 + req, err := http.NewRequestWithContext(ctx, "GET", serviceAuthURL, nil) 42 + if err != nil { 43 + return "", fmt.Errorf("failed to create request: %w", err) 44 + } 45 + 46 + // Use session's DoWithAuth to handle OAuth authentication automatically 47 + resp, err := session.DoWithAuth(session.Client, req, "com.atproto.server.getServiceAuth") 48 + if err != nil { 49 + return "", fmt.Errorf("failed to request service token: %w", err) 50 + } 51 + defer resp.Body.Close() 52 + 53 + if resp.StatusCode != http.StatusOK { 54 + body, _ := io.ReadAll(resp.Body) 55 + return "", fmt.Errorf("PDS returned status %d: %s", resp.StatusCode, body) 56 + } 57 + 58 + var result struct { 59 + Token string `json:"token"` 60 + } 61 + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { 62 + return "", fmt.Errorf("failed to decode response: %w", err) 63 + } 64 + 65 + return result.Token, nil 19 66 } 20 67 21 68 // NewRoutingRepository creates a new routing repository ··· 33 80 // Ensure blob store is created first (needed for label extraction during push) 34 81 blobStore := r.Blobs(ctx) 35 82 36 - // ManifestStore needs both DID and URL for backward compat (legacy holdEndpoint field) 37 - // For now, pass holdDID twice (will be cleaned up in manifest_store.go later) 38 - r.manifestStore = atproto.NewManifestStore( 39 - r.Ctx.ATProtoClient, 40 - r.Ctx.Repository, 41 - r.Ctx.HoldDID, 42 - r.Ctx.HoldDID, 43 - r.Ctx.DID, 44 - blobStore, 45 - r.Ctx.Database, 46 - ) 83 + // Wrap the Refresher in an adapter to implement HoldNotifier 84 + var notifier HoldNotifier 85 + if r.Ctx.Refresher != nil { 86 + notifier = &refresherAdapter{ 87 + refresher: r.Ctx.Refresher, 88 + pdsEndpoint: r.Ctx.PDSEndpoint, 89 + } 90 + } 91 + 92 + r.manifestStore = NewManifestStore(r.Ctx, notifier, blobStore) 47 93 } 48 94 49 95 // After any manifest operation, cache the hold DID for blob fetches ··· 102 148 // Tags returns the tag service 103 149 // Tags are stored in ATProto as io.atcr.tag records 104 150 func (r *RoutingRepository) Tags(ctx context.Context) distribution.TagService { 105 - return atproto.NewTagStore(r.Ctx.ATProtoClient, r.Ctx.Repository) 151 + return NewTagStore(r.Ctx.ATProtoClient, r.Ctx.Repository) 106 152 }
+1 -1
pkg/appview/ui_test.go
··· 607 607 tests := []struct { 608 608 name string 609 609 templateStr string 610 - data interface{} 610 + data any 611 611 expectInOutput string 612 612 }{ 613 613 {
+388 -2
pkg/atproto/cbor_gen.go
··· 300 300 } 301 301 302 302 cw := cbg.NewCborWriter(w) 303 - fieldCount := 7 303 + fieldCount := 8 304 304 305 305 if t.Region == "" { 306 306 fieldCount-- ··· 466 466 if err := cbg.WriteBool(w, t.AllowAllCrew); err != nil { 467 467 return err 468 468 } 469 + 470 + // t.EnableManifestPosts (bool) (bool) 471 + if len("enableManifestPosts") > 8192 { 472 + return xerrors.Errorf("Value in field \"enableManifestPosts\" was too long") 473 + } 474 + 475 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("enableManifestPosts"))); err != nil { 476 + return err 477 + } 478 + if _, err := cw.WriteString(string("enableManifestPosts")); err != nil { 479 + return err 480 + } 481 + 482 + if err := cbg.WriteBool(w, t.EnableManifestPosts); err != nil { 483 + return err 484 + } 469 485 return nil 470 486 } 471 487 ··· 494 510 495 511 n := extra 496 512 497 - nameBuf := make([]byte, 12) 513 + nameBuf := make([]byte, 19) 498 514 for i := uint64(0); i < n; i++ { 499 515 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 500 516 if err != nil { ··· 600 616 t.AllowAllCrew = true 601 617 default: 602 618 return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) 619 + } 620 + // t.EnableManifestPosts (bool) (bool) 621 + case "enableManifestPosts": 622 + 623 + maj, extra, err = cr.ReadHeader() 624 + if err != nil { 625 + return err 626 + } 627 + if maj != cbg.MajOther { 628 + return fmt.Errorf("booleans must be major type 7") 629 + } 630 + switch extra { 631 + case 20: 632 + t.EnableManifestPosts = false 633 + case 21: 634 + t.EnableManifestPosts = true 635 + default: 636 + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) 637 + } 638 + 639 + default: 640 + // Field doesn't exist on this type, so ignore it 641 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 642 + return err 643 + } 644 + } 645 + } 646 + 647 + return nil 648 + } 649 + func (t *LayerRecord) MarshalCBOR(w io.Writer) error { 650 + if t == nil { 651 + _, err := w.Write(cbg.CborNull) 652 + return err 653 + } 654 + 655 + cw := cbg.NewCborWriter(w) 656 + 657 + if _, err := cw.Write([]byte{168}); err != nil { 658 + return err 659 + } 660 + 661 + // t.Size (int64) (int64) 662 + if len("size") > 8192 { 663 + return xerrors.Errorf("Value in field \"size\" was too long") 664 + } 665 + 666 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("size"))); err != nil { 667 + return err 668 + } 669 + if _, err := cw.WriteString(string("size")); err != nil { 670 + return err 671 + } 672 + 673 + if t.Size >= 0 { 674 + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { 675 + return err 676 + } 677 + } else { 678 + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Size-1)); err != nil { 679 + return err 680 + } 681 + } 682 + 683 + // t.Type (string) (string) 684 + if len("$type") > 8192 { 685 + return xerrors.Errorf("Value in field \"$type\" was too long") 686 + } 687 + 688 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 689 + return err 690 + } 691 + if _, err := cw.WriteString(string("$type")); err != nil { 692 + return err 693 + } 694 + 695 + if len(t.Type) > 8192 { 696 + return xerrors.Errorf("Value in field t.Type was too long") 697 + } 698 + 699 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { 700 + return err 701 + } 702 + if _, err := cw.WriteString(string(t.Type)); err != nil { 703 + return err 704 + } 705 + 706 + // t.Digest (string) (string) 707 + if len("digest") > 8192 { 708 + return xerrors.Errorf("Value in field \"digest\" was too long") 709 + } 710 + 711 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("digest"))); err != nil { 712 + return err 713 + } 714 + if _, err := cw.WriteString(string("digest")); err != nil { 715 + return err 716 + } 717 + 718 + if len(t.Digest) > 8192 { 719 + return xerrors.Errorf("Value in field t.Digest was too long") 720 + } 721 + 722 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Digest))); err != nil { 723 + return err 724 + } 725 + if _, err := cw.WriteString(string(t.Digest)); err != nil { 726 + return err 727 + } 728 + 729 + // t.UserDID (string) (string) 730 + if len("userDid") > 8192 { 731 + return xerrors.Errorf("Value in field \"userDid\" was too long") 732 + } 733 + 734 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("userDid"))); err != nil { 735 + return err 736 + } 737 + if _, err := cw.WriteString(string("userDid")); err != nil { 738 + return err 739 + } 740 + 741 + if len(t.UserDID) > 8192 { 742 + return xerrors.Errorf("Value in field t.UserDID was too long") 743 + } 744 + 745 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UserDID))); err != nil { 746 + return err 747 + } 748 + if _, err := cw.WriteString(string(t.UserDID)); err != nil { 749 + return err 750 + } 751 + 752 + // t.CreatedAt (string) (string) 753 + if len("createdAt") > 8192 { 754 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 755 + } 756 + 757 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 758 + return err 759 + } 760 + if _, err := cw.WriteString(string("createdAt")); err != nil { 761 + return err 762 + } 763 + 764 + if len(t.CreatedAt) > 8192 { 765 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 766 + } 767 + 768 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 769 + return err 770 + } 771 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 772 + return err 773 + } 774 + 775 + // t.MediaType (string) (string) 776 + if len("mediaType") > 8192 { 777 + return xerrors.Errorf("Value in field \"mediaType\" was too long") 778 + } 779 + 780 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mediaType"))); err != nil { 781 + return err 782 + } 783 + if _, err := cw.WriteString(string("mediaType")); err != nil { 784 + return err 785 + } 786 + 787 + if len(t.MediaType) > 8192 { 788 + return xerrors.Errorf("Value in field t.MediaType was too long") 789 + } 790 + 791 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MediaType))); err != nil { 792 + return err 793 + } 794 + if _, err := cw.WriteString(string(t.MediaType)); err != nil { 795 + return err 796 + } 797 + 798 + // t.Repository (string) (string) 799 + if len("repository") > 8192 { 800 + return xerrors.Errorf("Value in field \"repository\" was too long") 801 + } 802 + 803 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repository"))); err != nil { 804 + return err 805 + } 806 + if _, err := cw.WriteString(string("repository")); err != nil { 807 + return err 808 + } 809 + 810 + if len(t.Repository) > 8192 { 811 + return xerrors.Errorf("Value in field t.Repository was too long") 812 + } 813 + 814 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repository))); err != nil { 815 + return err 816 + } 817 + if _, err := cw.WriteString(string(t.Repository)); err != nil { 818 + return err 819 + } 820 + 821 + // t.UserHandle (string) (string) 822 + if len("userHandle") > 8192 { 823 + return xerrors.Errorf("Value in field \"userHandle\" was too long") 824 + } 825 + 826 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("userHandle"))); err != nil { 827 + return err 828 + } 829 + if _, err := cw.WriteString(string("userHandle")); err != nil { 830 + return err 831 + } 832 + 833 + if len(t.UserHandle) > 8192 { 834 + return xerrors.Errorf("Value in field t.UserHandle was too long") 835 + } 836 + 837 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UserHandle))); err != nil { 838 + return err 839 + } 840 + if _, err := cw.WriteString(string(t.UserHandle)); err != nil { 841 + return err 842 + } 843 + return nil 844 + } 845 + 846 + func (t *LayerRecord) UnmarshalCBOR(r io.Reader) (err error) { 847 + *t = LayerRecord{} 848 + 849 + cr := cbg.NewCborReader(r) 850 + 851 + maj, extra, err := cr.ReadHeader() 852 + if err != nil { 853 + return err 854 + } 855 + defer func() { 856 + if err == io.EOF { 857 + err = io.ErrUnexpectedEOF 858 + } 859 + }() 860 + 861 + if maj != cbg.MajMap { 862 + return fmt.Errorf("cbor input should be of type map") 863 + } 864 + 865 + if extra > cbg.MaxLength { 866 + return fmt.Errorf("LayerRecord: map struct too large (%d)", extra) 867 + } 868 + 869 + n := extra 870 + 871 + nameBuf := make([]byte, 10) 872 + for i := uint64(0); i < n; i++ { 873 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 874 + if err != nil { 875 + return err 876 + } 877 + 878 + if !ok { 879 + // Field doesn't exist on this type, so ignore it 880 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 881 + return err 882 + } 883 + continue 884 + } 885 + 886 + switch string(nameBuf[:nameLen]) { 887 + // t.Size (int64) (int64) 888 + case "size": 889 + { 890 + maj, extra, err := cr.ReadHeader() 891 + if err != nil { 892 + return err 893 + } 894 + var extraI int64 895 + switch maj { 896 + case cbg.MajUnsignedInt: 897 + extraI = int64(extra) 898 + if extraI < 0 { 899 + return fmt.Errorf("int64 positive overflow") 900 + } 901 + case cbg.MajNegativeInt: 902 + extraI = int64(extra) 903 + if extraI < 0 { 904 + return fmt.Errorf("int64 negative overflow") 905 + } 906 + extraI = -1 - extraI 907 + default: 908 + return fmt.Errorf("wrong type for int64 field: %d", maj) 909 + } 910 + 911 + t.Size = int64(extraI) 912 + } 913 + // t.Type (string) (string) 914 + case "$type": 915 + 916 + { 917 + sval, err := cbg.ReadStringWithMax(cr, 8192) 918 + if err != nil { 919 + return err 920 + } 921 + 922 + t.Type = string(sval) 923 + } 924 + // t.Digest (string) (string) 925 + case "digest": 926 + 927 + { 928 + sval, err := cbg.ReadStringWithMax(cr, 8192) 929 + if err != nil { 930 + return err 931 + } 932 + 933 + t.Digest = string(sval) 934 + } 935 + // t.UserDID (string) (string) 936 + case "userDid": 937 + 938 + { 939 + sval, err := cbg.ReadStringWithMax(cr, 8192) 940 + if err != nil { 941 + return err 942 + } 943 + 944 + t.UserDID = string(sval) 945 + } 946 + // t.CreatedAt (string) (string) 947 + case "createdAt": 948 + 949 + { 950 + sval, err := cbg.ReadStringWithMax(cr, 8192) 951 + if err != nil { 952 + return err 953 + } 954 + 955 + t.CreatedAt = string(sval) 956 + } 957 + // t.MediaType (string) (string) 958 + case "mediaType": 959 + 960 + { 961 + sval, err := cbg.ReadStringWithMax(cr, 8192) 962 + if err != nil { 963 + return err 964 + } 965 + 966 + t.MediaType = string(sval) 967 + } 968 + // t.Repository (string) (string) 969 + case "repository": 970 + 971 + { 972 + sval, err := cbg.ReadStringWithMax(cr, 8192) 973 + if err != nil { 974 + return err 975 + } 976 + 977 + t.Repository = string(sval) 978 + } 979 + // t.UserHandle (string) (string) 980 + case "userHandle": 981 + 982 + { 983 + sval, err := cbg.ReadStringWithMax(cr, 8192) 984 + if err != nil { 985 + return err 986 + } 987 + 988 + t.UserHandle = string(sval) 603 989 } 604 990 605 991 default:
+4 -4
pkg/atproto/client_test.go
··· 34 34 name string 35 35 collection string 36 36 rkey string 37 - record interface{} 37 + record any 38 38 serverResponse string 39 39 serverStatus int 40 40 wantErr bool ··· 93 93 } 94 94 95 95 // Verify request body 96 - var body map[string]interface{} 96 + var body map[string]any 97 97 if err := json.NewDecoder(r.Body).Decode(&body); err != nil { 98 98 t.Errorf("Failed to decode request body: %v", err) 99 99 } ··· 158 158 t.Errorf("URI = %v, want at://did:plc:test123/io.atcr.manifest/abc123", r.URI) 159 159 } 160 160 161 - var value map[string]interface{} 161 + var value map[string]any 162 162 if err := json.Unmarshal(r.Value, &value); err != nil { 163 163 t.Errorf("Failed to unmarshal value: %v", err) 164 164 } ··· 290 290 } 291 291 292 292 // Verify request body 293 - var body map[string]interface{} 293 + var body map[string]any 294 294 if err := json.NewDecoder(r.Body).Decode(&body); err != nil { 295 295 t.Errorf("Failed to decode request body: %v", err) 296 296 }
+6
pkg/atproto/endpoints.go
··· 39 39 // Request: {"uploadId": "..."} 40 40 // Response: {"status": "aborted"} 41 41 HoldAbortUpload = "/xrpc/io.atcr.hold.abortUpload" 42 + 43 + // HoldNotifyManifest notifies hold about a manifest upload for layer tracking and Bluesky posting. 44 + // Method: POST 45 + // Request: {"repository": "...", "tag": "...", "userDid": "...", "userHandle": "...", "manifest": {...}} 46 + // Response: {"success": true, "layersCreated": 5, "postCreated": true, "postUri": "at://..."} 47 + HoldNotifyManifest = "/xrpc/io.atcr.hold.notifyManifest" 42 48 ) 43 49 44 50 // Hold service crew management endpoints (io.atcr.hold.*)
+2 -1
pkg/atproto/generate.go
··· 25 25 ) 26 26 27 27 func main() { 28 - // Generate map-style encoders for CrewRecord, CaptainRecord, and TangledProfileRecord 28 + // Generate map-style encoders for CrewRecord, CaptainRecord, LayerRecord, and TangledProfileRecord 29 29 if err := cbg.WriteMapEncodersToFile("cbor_gen.go", "atproto", 30 30 atproto.CrewRecord{}, 31 31 atproto.CaptainRecord{}, 32 + atproto.LayerRecord{}, 32 33 atproto.TangledProfileRecord{}, 33 34 ); err != nil { 34 35 fmt.Printf("Failed to generate CBOR encoders: %v\n", err)
+75 -7
pkg/atproto/lexicon.go
··· 34 34 // Note: Uses same collection name as HoldCrewCollection but stored in different PDS (hold's PDS vs owner's PDS) 35 35 CrewCollection = "io.atcr.hold.crew" 36 36 37 + // LayerCollection is the collection name for container layer metadata 38 + // Stored in hold's embedded PDS to track which layers are stored 39 + LayerCollection = "io.atcr.hold.layer" 40 + 37 41 // TangledProfileCollection is the collection name for tangled profiles 38 42 // Stored in hold's embedded PDS (singleton record at rkey "self") 39 43 TangledProfileCollection = "sh.tangled.actor.profile" ··· 434 438 return len(s) > 4 && s[:4] == "did:" 435 439 } 436 440 441 + // RepositoryTagToRKey converts a repository and tag to an ATProto record key 442 + // ATProto record keys must match: ^[a-zA-Z0-9._~-]{1,512}$ 443 + func RepositoryTagToRKey(repository, tag string) string { 444 + // Combine repository and tag to create a unique key 445 + // Replace invalid characters: slashes become tildes (~) 446 + // We use tilde instead of dash to avoid ambiguity with repository names that contain hyphens 447 + key := fmt.Sprintf("%s_%s", repository, tag) 448 + 449 + // Replace / with ~ (slash not allowed in rkeys, tilde is allowed and unlikely in repo names) 450 + key = strings.ReplaceAll(key, "/", "~") 451 + 452 + return key 453 + } 454 + 455 + // RKeyToRepositoryTag converts an ATProto record key back to repository and tag 456 + // This is the inverse of RepositoryTagToRKey 457 + // Note: If the tag contains underscores, this will split on the LAST underscore 458 + func RKeyToRepositoryTag(rkey string) (repository, tag string) { 459 + // Find the last underscore to split repository and tag 460 + lastUnderscore := strings.LastIndex(rkey, "_") 461 + if lastUnderscore == -1 { 462 + // No underscore found - treat entire string as tag with empty repository 463 + return "", rkey 464 + } 465 + 466 + repository = rkey[:lastUnderscore] 467 + tag = rkey[lastUnderscore+1:] 468 + 469 + // Convert tildes back to slashes in repository (tilde was used to encode slashes) 470 + repository = strings.ReplaceAll(repository, "~", "/") 471 + 472 + return repository, tag 473 + } 474 + 437 475 // BuildManifestURI creates an AT-URI for a manifest record 438 476 // did: The DID of the user (e.g., "did:plc:xyz123") 439 477 // manifestDigest: The manifest digest (e.g., "sha256:abc123...") ··· 498 536 // Stored in the hold's embedded PDS to identify the hold owner and settings 499 537 // Uses CBOR encoding for efficient storage in hold's carstore 500 538 type CaptainRecord struct { 501 - Type string `json:"$type" cborgen:"$type"` 502 - Owner string `json:"owner" cborgen:"owner"` // DID of hold owner 503 - Public bool `json:"public" cborgen:"public"` // Public read access 504 - AllowAllCrew bool `json:"allowAllCrew" cborgen:"allowAllCrew"` // Allow any authenticated user to register as crew 505 - DeployedAt string `json:"deployedAt" cborgen:"deployedAt"` // RFC3339 timestamp 506 - Region string `json:"region,omitempty" cborgen:"region,omitempty"` // S3 region (optional) 507 - Provider string `json:"provider,omitempty" cborgen:"provider,omitempty"` // Deployment provider (optional) 539 + Type string `json:"$type" cborgen:"$type"` 540 + Owner string `json:"owner" cborgen:"owner"` // DID of hold owner 541 + Public bool `json:"public" cborgen:"public"` // Public read access 542 + AllowAllCrew bool `json:"allowAllCrew" cborgen:"allowAllCrew"` // Allow any authenticated user to register as crew 543 + EnableManifestPosts bool `json:"enableManifestPosts" cborgen:"enableManifestPosts"` // Enable Bluesky posts when manifests are pushed (overrides env var) 544 + DeployedAt string `json:"deployedAt" cborgen:"deployedAt"` // RFC3339 timestamp 545 + Region string `json:"region,omitempty" cborgen:"region,omitempty"` // S3 region (optional) 546 + Provider string `json:"provider,omitempty" cborgen:"provider,omitempty"` // Deployment provider (optional) 508 547 } 509 548 510 549 // CrewRecord represents a crew member in the hold ··· 518 557 Role string `json:"role" cborgen:"role"` 519 558 Permissions []string `json:"permissions" cborgen:"permissions"` 520 559 AddedAt string `json:"addedAt" cborgen:"addedAt"` // RFC3339 timestamp 560 + } 561 + 562 + // LayerRecord represents metadata about a container layer stored in the hold 563 + // Collection: io.atcr.hold.layer 564 + // Stored in the hold's embedded PDS for tracking and analytics 565 + // Uses CBOR encoding for efficient storage in hold's carstore 566 + type LayerRecord struct { 567 + Type string `json:"$type" cborgen:"$type"` 568 + Digest string `json:"digest" cborgen:"digest"` // Layer digest (e.g., "sha256:abc123...") 569 + Size int64 `json:"size" cborgen:"size"` // Size in bytes 570 + MediaType string `json:"mediaType" cborgen:"mediaType"` // Media type (e.g., "application/vnd.oci.image.layer.v1.tar+gzip") 571 + Repository string `json:"repository" cborgen:"repository"` // Repository this layer belongs to 572 + UserDID string `json:"userDid" cborgen:"userDid"` // DID of user who uploaded this layer 573 + UserHandle string `json:"userHandle" cborgen:"userHandle"` // Handle of user (for display purposes) 574 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` // RFC3339 timestamp 575 + } 576 + 577 + // NewLayerRecord creates a new layer record 578 + func NewLayerRecord(digest string, size int64, mediaType, repository, userDID, userHandle string) *LayerRecord { 579 + return &LayerRecord{ 580 + Type: LayerCollection, 581 + Digest: digest, 582 + Size: size, 583 + MediaType: mediaType, 584 + Repository: repository, 585 + UserDID: userDID, 586 + UserHandle: userHandle, 587 + CreatedAt: time.Now().Format(time.RFC3339), 588 + } 521 589 } 522 590 523 591 // TangledProfileRecord represents a Tangled profile for the hold
-293
pkg/atproto/manifest_store.go
··· 1 - package atproto 2 - 3 - import ( 4 - "context" 5 - "encoding/json" 6 - "errors" 7 - "fmt" 8 - "maps" 9 - "strings" 10 - 11 - "github.com/distribution/distribution/v3" 12 - "github.com/opencontainers/go-digest" 13 - ) 14 - 15 - // DatabaseMetrics interface for tracking push and pull counts 16 - type DatabaseMetrics interface { 17 - IncrementPushCount(did, repository string) error 18 - IncrementPullCount(did, repository string) error 19 - } 20 - 21 - // ManifestStore implements distribution.ManifestService 22 - // It stores manifests in ATProto as records 23 - type ManifestStore struct { 24 - client *Client 25 - repository string 26 - holdEndpoint string // Hold service endpoint URL (for legacy, to be deprecated) 27 - holdDID string // Hold service DID (primary reference) 28 - did string // User's DID for cache key 29 - lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull) 30 - blobStore distribution.BlobStore // Blob store for fetching config during push 31 - database DatabaseMetrics // Database for metrics tracking 32 - } 33 - 34 - // NewManifestStore creates a new ATProto-backed manifest store 35 - func NewManifestStore(client *Client, repository string, holdEndpoint string, holdDID string, did string, blobStore distribution.BlobStore, database DatabaseMetrics) *ManifestStore { 36 - return &ManifestStore{ 37 - client: client, 38 - repository: repository, 39 - holdEndpoint: holdEndpoint, 40 - holdDID: holdDID, 41 - did: did, 42 - blobStore: blobStore, 43 - database: database, 44 - } 45 - } 46 - 47 - // Exists checks if a manifest exists by digest 48 - func (s *ManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { 49 - rkey := digestToRKey(dgst) 50 - _, err := s.client.GetRecord(ctx, ManifestCollection, rkey) 51 - if err != nil { 52 - // If not found, return false without error 53 - if errors.Is(err, ErrRecordNotFound) { 54 - return false, nil 55 - } 56 - return false, err 57 - } 58 - return true, nil 59 - } 60 - 61 - // Get retrieves a manifest by digest 62 - func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { 63 - rkey := digestToRKey(dgst) 64 - record, err := s.client.GetRecord(ctx, ManifestCollection, rkey) 65 - if err != nil { 66 - return nil, distribution.ErrManifestUnknownRevision{ 67 - Name: s.repository, 68 - Revision: dgst, 69 - } 70 - } 71 - 72 - var manifestRecord ManifestRecord 73 - if err := json.Unmarshal(record.Value, &manifestRecord); err != nil { 74 - return nil, fmt.Errorf("failed to unmarshal manifest record: %w", err) 75 - } 76 - 77 - // Store the hold DID for subsequent blob requests during pull 78 - // Prefer HoldDID (new format) with fallback to HoldEndpoint (legacy URL format) 79 - // The routing repository will cache this for concurrent blob fetches 80 - if manifestRecord.HoldDID != "" { 81 - // New format: DID reference (preferred) 82 - s.lastFetchedHoldDID = manifestRecord.HoldDID 83 - } else if manifestRecord.HoldEndpoint != "" { 84 - // Legacy format: URL reference - convert to DID 85 - s.lastFetchedHoldDID = ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint) 86 - } 87 - 88 - var ociManifest []byte 89 - 90 - // New records: Download blob from ATProto blob storage 91 - if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Link != "" { 92 - ociManifest, err = s.client.GetBlob(ctx, manifestRecord.ManifestBlob.Ref.Link) 93 - if err != nil { 94 - return nil, fmt.Errorf("failed to download manifest blob: %w", err) 95 - } 96 - } 97 - 98 - // Track pull count (increment asynchronously to avoid blocking the response) 99 - if s.database != nil { 100 - go func() { 101 - if err := s.database.IncrementPullCount(s.did, s.repository); err != nil { 102 - fmt.Printf("WARNING: Failed to increment pull count for %s/%s: %v\n", s.did, s.repository, err) 103 - } 104 - }() 105 - } 106 - 107 - // Parse the manifest based on media type 108 - // For now, we'll return the raw bytes wrapped in a manifest object 109 - // In a full implementation, you'd use distribution's manifest parsing 110 - return &rawManifest{ 111 - mediaType: manifestRecord.MediaType, 112 - payload: ociManifest, 113 - }, nil 114 - } 115 - 116 - // Put stores a manifest 117 - func (s *ManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { 118 - // Get the manifest payload (raw bytes) 119 - mediaType, payload, err := manifest.Payload() 120 - if err != nil { 121 - return "", err 122 - } 123 - 124 - // Calculate digest 125 - dgst := digest.FromBytes(payload) 126 - 127 - // Upload manifest as blob to PDS 128 - blobRef, err := s.client.UploadBlob(ctx, payload, mediaType) 129 - if err != nil { 130 - return "", fmt.Errorf("failed to upload manifest blob: %w", err) 131 - } 132 - 133 - // Create manifest record with structured metadata 134 - manifestRecord, err := NewManifestRecord(s.repository, dgst.String(), payload) 135 - if err != nil { 136 - return "", fmt.Errorf("failed to create manifest record: %w", err) 137 - } 138 - 139 - // Set the blob reference, hold DID, and hold endpoint 140 - manifestRecord.ManifestBlob = blobRef 141 - manifestRecord.HoldDID = s.holdDID // Primary reference (DID) 142 - manifestRecord.HoldEndpoint = s.holdEndpoint // Legacy reference (URL) for backward compat 143 - 144 - // Extract Dockerfile labels from config blob and add to annotations 145 - // Only for image manifests (not manifest lists which don't have config blobs) 146 - isManifestList := strings.Contains(manifestRecord.MediaType, "manifest.list") || 147 - strings.Contains(manifestRecord.MediaType, "image.index") 148 - 149 - if !isManifestList && s.blobStore != nil && manifestRecord.Config != nil && manifestRecord.Config.Digest != "" { 150 - labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest) 151 - if err != nil { 152 - // Log error but don't fail the push - labels are optional 153 - fmt.Printf("WARNING: Failed to extract config labels: %v\n", err) 154 - } else { 155 - // Initialize annotations map if needed 156 - if manifestRecord.Annotations == nil { 157 - manifestRecord.Annotations = make(map[string]string) 158 - } 159 - 160 - // Copy labels to annotations (Dockerfile LABELs → manifest annotations) 161 - maps.Copy(manifestRecord.Annotations, labels) 162 - 163 - fmt.Printf("DEBUG: Extracted %d labels from config blob\n", len(labels)) 164 - } 165 - } 166 - 167 - // Store manifest record in ATProto 168 - rkey := digestToRKey(dgst) 169 - _, err = s.client.PutRecord(ctx, ManifestCollection, rkey, manifestRecord) 170 - if err != nil { 171 - return "", fmt.Errorf("failed to store manifest record in ATProto: %w", err) 172 - } 173 - 174 - // Track push count (increment asynchronously to avoid blocking the response) 175 - if s.database != nil { 176 - go func() { 177 - if err := s.database.IncrementPushCount(s.did, s.repository); err != nil { 178 - fmt.Printf("WARNING: Failed to increment push count for %s/%s: %v\n", s.did, s.repository, err) 179 - } 180 - }() 181 - } 182 - 183 - // Also handle tag if specified 184 - for _, option := range options { 185 - if tagOpt, ok := option.(distribution.WithTagOption); ok { 186 - tag := tagOpt.Tag 187 - tagRecord := NewTagRecord(s.client.DID(), s.repository, tag, dgst.String()) 188 - tagRKey := RepositoryTagToRKey(s.repository, tag) 189 - _, err = s.client.PutRecord(ctx, TagCollection, tagRKey, tagRecord) 190 - if err != nil { 191 - return "", fmt.Errorf("failed to store tag in ATProto: %w", err) 192 - } 193 - } 194 - } 195 - 196 - return dgst, nil 197 - } 198 - 199 - // Delete removes a manifest 200 - func (s *ManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { 201 - rkey := digestToRKey(dgst) 202 - return s.client.DeleteRecord(ctx, ManifestCollection, rkey) 203 - } 204 - 205 - // digestToRKey converts a digest to an ATProto record key 206 - // ATProto rkeys must be valid strings, so we use the digest string without the algorithm prefix 207 - func digestToRKey(dgst digest.Digest) string { 208 - // Remove the algorithm prefix (e.g., "sha256:") 209 - return dgst.Encoded() 210 - } 211 - 212 - // RepositoryTagToRKey converts a repository and tag to an ATProto record key 213 - // ATProto record keys must match: ^[a-zA-Z0-9._~-]{1,512}$ 214 - func RepositoryTagToRKey(repository, tag string) string { 215 - // Combine repository and tag to create a unique key 216 - // Replace invalid characters: slashes become tildes (~) 217 - // We use tilde instead of dash to avoid ambiguity with repository names that contain hyphens 218 - key := fmt.Sprintf("%s_%s", repository, tag) 219 - 220 - // Replace / with ~ (slash not allowed in rkeys, tilde is allowed and unlikely in repo names) 221 - key = strings.ReplaceAll(key, "/", "~") 222 - 223 - return key 224 - } 225 - 226 - // RKeyToRepositoryTag converts an ATProto record key back to repository and tag 227 - // This is the inverse of RepositoryTagToRKey 228 - // Note: If the tag contains underscores, this will split on the LAST underscore 229 - func RKeyToRepositoryTag(rkey string) (repository, tag string) { 230 - // Find the last underscore to split repository and tag 231 - lastUnderscore := strings.LastIndex(rkey, "_") 232 - if lastUnderscore == -1 { 233 - // No underscore found - treat entire string as tag with empty repository 234 - return "", rkey 235 - } 236 - 237 - repository = rkey[:lastUnderscore] 238 - tag = rkey[lastUnderscore+1:] 239 - 240 - // Convert tildes back to slashes in repository (tilde was used to encode slashes) 241 - repository = strings.ReplaceAll(repository, "~", "/") 242 - 243 - return repository, tag 244 - } 245 - 246 - // GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest 247 - // This is used by the routing repository to cache the hold for blob requests 248 - func (s *ManifestStore) GetLastFetchedHoldDID() string { 249 - return s.lastFetchedHoldDID 250 - } 251 - 252 - // rawManifest is a simple implementation of distribution.Manifest 253 - type rawManifest struct { 254 - mediaType string 255 - payload []byte 256 - } 257 - 258 - func (m *rawManifest) References() []distribution.Descriptor { 259 - // TODO: Parse the manifest and return actual references 260 - return nil 261 - } 262 - 263 - func (m *rawManifest) Payload() (string, []byte, error) { 264 - return m.mediaType, m.payload, nil 265 - } 266 - 267 - // extractConfigLabels fetches the image config blob and extracts Dockerfile LABELs 268 - func (s *ManifestStore) extractConfigLabels(ctx context.Context, configDigestStr string) (map[string]string, error) { 269 - // Parse digest string 270 - configDigest, err := digest.Parse(configDigestStr) 271 - if err != nil { 272 - return nil, fmt.Errorf("invalid config digest: %w", err) 273 - } 274 - 275 - // Fetch config blob from storage 276 - configData, err := s.blobStore.Get(ctx, configDigest) 277 - if err != nil { 278 - return nil, fmt.Errorf("failed to fetch config blob: %w", err) 279 - } 280 - 281 - // Parse config JSON 282 - var configJSON struct { 283 - Config struct { 284 - Labels map[string]string `json:"Labels"` 285 - } `json:"config"` 286 - } 287 - 288 - if err := json.Unmarshal(configData, &configJSON); err != nil { 289 - return nil, fmt.Errorf("failed to parse config JSON: %w", err) 290 - } 291 - 292 - return configJSON.Config.Labels, nil 293 - }
+51 -193
pkg/atproto/manifest_store_test.go pkg/appview/storage/manifest_store_test.go
··· 1 - package atproto 1 + package storage 2 2 3 3 import ( 4 4 "context" ··· 7 7 "net/http" 8 8 "testing" 9 9 10 + "atcr.io/pkg/atproto" 10 11 "github.com/distribution/distribution/v3" 11 12 "github.com/opencontainers/go-digest" 12 13 ) ··· 92 93 return nil, nil // Not needed for current tests 93 94 } 94 95 95 - // mockATProtoClient mocks the ATProto client for testing 96 - type mockATProtoClient struct { 97 - records map[string]map[string]interface{} // collection -> rkey -> record 98 - blobs map[string][]byte // cid -> blob data 99 - } 100 - 101 - func newMockATProtoClient() *mockATProtoClient { 102 - return &mockATProtoClient{ 103 - records: make(map[string]map[string]interface{}), 104 - blobs: make(map[string][]byte), 96 + // mockRegistryContext creates a mock RegistryContext for testing 97 + func mockRegistryContext(client *atproto.Client, repository, holdDID, did, handle string, database DatabaseMetrics) *RegistryContext { 98 + return &RegistryContext{ 99 + ATProtoClient: client, 100 + Repository: repository, 101 + HoldDID: holdDID, 102 + DID: did, 103 + Handle: handle, 104 + Database: database, 105 105 } 106 106 } 107 107 ··· 134 134 } 135 135 } 136 136 137 - // TestRepositoryTagToRKey tests repository+tag to record key conversion 138 - func TestRepositoryTagToRKey(t *testing.T) { 139 - tests := []struct { 140 - name string 141 - repository string 142 - tag string 143 - want string 144 - }{ 145 - { 146 - name: "simple repo and tag", 147 - repository: "myapp", 148 - tag: "latest", 149 - want: "myapp_latest", 150 - }, 151 - { 152 - name: "repo with namespace", 153 - repository: "org/myapp", 154 - tag: "v1.0.0", 155 - want: "org~myapp_v1.0.0", 156 - }, 157 - { 158 - name: "tag with underscore", 159 - repository: "myapp", 160 - tag: "test_tag", 161 - want: "myapp_test_tag", 162 - }, 163 - { 164 - name: "deep namespace", 165 - repository: "a/b/c/myapp", 166 - tag: "prod", 167 - want: "a~b~c~myapp_prod", 168 - }, 169 - } 170 - 171 - for _, tt := range tests { 172 - t.Run(tt.name, func(t *testing.T) { 173 - got := RepositoryTagToRKey(tt.repository, tt.tag) 174 - if got != tt.want { 175 - t.Errorf("RepositoryTagToRKey() = %v, want %v", got, tt.want) 176 - } 177 - }) 178 - } 179 - } 180 - 181 - // TestRKeyToRepositoryTag tests converting record key back to repository and tag 182 - func TestRKeyToRepositoryTag(t *testing.T) { 183 - tests := []struct { 184 - name string 185 - rkey string 186 - wantRepository string 187 - wantTag string 188 - }{ 189 - { 190 - name: "simple key", 191 - rkey: "myapp_latest", 192 - wantRepository: "myapp", 193 - wantTag: "latest", 194 - }, 195 - { 196 - name: "namespaced repo", 197 - rkey: "org~myapp_v1.0.0", 198 - wantRepository: "org/myapp", 199 - wantTag: "v1.0.0", 200 - }, 201 - { 202 - name: "tag with underscore (splits on last underscore)", 203 - rkey: "myapp_test_tag", 204 - wantRepository: "myapp_test", 205 - wantTag: "tag", 206 - }, 207 - { 208 - name: "deep namespace", 209 - rkey: "a~b~c~myapp_prod", 210 - wantRepository: "a/b/c/myapp", 211 - wantTag: "prod", 212 - }, 213 - { 214 - name: "no underscore - all tag", 215 - rkey: "latest", 216 - wantRepository: "", 217 - wantTag: "latest", 218 - }, 219 - } 220 - 221 - for _, tt := range tests { 222 - t.Run(tt.name, func(t *testing.T) { 223 - gotRepo, gotTag := RKeyToRepositoryTag(tt.rkey) 224 - if gotRepo != tt.wantRepository { 225 - t.Errorf("RKeyToRepositoryTag() repository = %v, want %v", gotRepo, tt.wantRepository) 226 - } 227 - if gotTag != tt.wantTag { 228 - t.Errorf("RKeyToRepositoryTag() tag = %v, want %v", gotTag, tt.wantTag) 229 - } 230 - }) 231 - } 232 - } 233 - 234 - // TestRepositoryTagRoundTrip tests that converting to rkey and back preserves values 235 - // Note: Tags with underscores cannot be perfectly round-tripped since we use underscore as separator 236 - func TestRepositoryTagRoundTrip(t *testing.T) { 237 - tests := []struct { 238 - repository string 239 - tag string 240 - }{ 241 - {"myapp", "latest"}, 242 - {"org/myapp", "v1.0.0"}, 243 - {"a/b/c/myapp", "prod"}, 244 - // Note: Tags with underscores are excluded - they cannot round-trip correctly 245 - // because underscore is used as the separator between repository and tag 246 - } 247 - 248 - for _, tt := range tests { 249 - t.Run(tt.repository+":"+tt.tag, func(t *testing.T) { 250 - rkey := RepositoryTagToRKey(tt.repository, tt.tag) 251 - gotRepo, gotTag := RKeyToRepositoryTag(rkey) 252 - 253 - if gotRepo != tt.repository { 254 - t.Errorf("Round trip failed: repository = %v, want %v", gotRepo, tt.repository) 255 - } 256 - if gotTag != tt.tag { 257 - t.Errorf("Round trip failed: tag = %v, want %v", gotTag, tt.tag) 258 - } 259 - }) 260 - } 261 - } 262 - 263 137 // TestNewManifestStore tests creating a new manifest store 264 138 func TestNewManifestStore(t *testing.T) { 265 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 139 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 266 140 blobStore := newMockBlobStore() 267 141 db := &mockDatabaseMetrics{} 268 142 269 - store := NewManifestStore( 270 - client, 271 - "myapp", 272 - "https://hold.example.com", 273 - "did:web:hold.example.com", 274 - "did:plc:alice123", 275 - blobStore, 276 - db, 277 - ) 143 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", db) 144 + store := NewManifestStore(ctx, nil, blobStore) 278 145 279 - if store.repository != "myapp" { 280 - t.Errorf("repository = %v, want myapp", store.repository) 146 + if store.ctx.Repository != "myapp" { 147 + t.Errorf("repository = %v, want myapp", store.ctx.Repository) 281 148 } 282 - if store.holdEndpoint != "https://hold.example.com" { 283 - t.Errorf("holdEndpoint = %v, want https://hold.example.com", store.holdEndpoint) 149 + if store.ctx.HoldDID != "did:web:hold.example.com" { 150 + t.Errorf("holdDID = %v, want did:web:hold.example.com", store.ctx.HoldDID) 284 151 } 285 - if store.holdDID != "did:web:hold.example.com" { 286 - t.Errorf("holdDID = %v, want did:web:hold.example.com", store.holdDID) 152 + if store.ctx.DID != "did:plc:alice123" { 153 + t.Errorf("did = %v, want did:plc:alice123", store.ctx.DID) 287 154 } 288 - if store.did != "did:plc:alice123" { 289 - t.Errorf("did = %v, want did:plc:alice123", store.did) 155 + if store.ctx.Handle != "alice.test" { 156 + t.Errorf("handle = %v, want alice.test", store.ctx.Handle) 290 157 } 291 158 } 292 159 ··· 320 187 321 188 for _, tt := range tests { 322 189 t.Run(tt.name, func(t *testing.T) { 323 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 324 - store := NewManifestStore(client, "myapp", "", "", "did:plc:test123", nil, nil) 190 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 191 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 192 + store := NewManifestStore(ctx, nil, nil) 325 193 326 194 // Simulate what happens in Get() when parsing a manifest record 327 - var manifestRecord ManifestRecord 195 + var manifestRecord atproto.ManifestRecord 328 196 manifestRecord.HoldDID = tt.manifestHoldDID 329 197 manifestRecord.HoldEndpoint = tt.manifestHoldURL 330 198 ··· 332 200 if manifestRecord.HoldDID != "" { 333 201 store.lastFetchedHoldDID = manifestRecord.HoldDID 334 202 } else if manifestRecord.HoldEndpoint != "" { 335 - store.lastFetchedHoldDID = ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint) 203 + store.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint) 336 204 } 337 205 338 206 got := store.GetLastFetchedHoldDID() ··· 377 245 // TestExtractConfigLabels tests extracting labels from image config 378 246 func TestExtractConfigLabels(t *testing.T) { 379 247 // Create a mock config blob 380 - configJSON := map[string]interface{}{ 381 - "config": map[string]interface{}{ 248 + configJSON := map[string]any{ 249 + "config": map[string]any{ 382 250 "Labels": map[string]string{ 383 251 "org.opencontainers.image.version": "1.0.0", 384 252 "org.opencontainers.image.authors": "test@example.com", ··· 394 262 blobStore.blobs[configDigest] = configData 395 263 396 264 // Create manifest store 397 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 398 - store := NewManifestStore(client, "myapp", "", "", "did:plc:test123", blobStore, nil) 265 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 266 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 267 + store := NewManifestStore(ctx, nil, blobStore) 399 268 400 269 // Extract labels 401 270 labels, err := store.extractConfigLabels(context.Background(), configDigest.String()) ··· 424 293 // TestExtractConfigLabels_NoLabels tests handling config without labels 425 294 func TestExtractConfigLabels_NoLabels(t *testing.T) { 426 295 // Config without Labels field 427 - configJSON := map[string]interface{}{ 428 - "config": map[string]interface{}{}, 296 + configJSON := map[string]any{ 297 + "config": map[string]any{}, 429 298 } 430 299 configData, _ := json.Marshal(configJSON) 431 300 ··· 433 302 configDigest := digest.FromBytes(configData) 434 303 blobStore.blobs[configDigest] = configData 435 304 436 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 437 - store := NewManifestStore(client, "myapp", "", "", "did:plc:test123", blobStore, nil) 305 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 306 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 307 + store := NewManifestStore(ctx, nil, blobStore) 438 308 439 309 labels, err := store.extractConfigLabels(context.Background(), configDigest.String()) 440 310 if err != nil { ··· 450 320 // TestExtractConfigLabels_InvalidDigest tests error handling for invalid digest 451 321 func TestExtractConfigLabels_InvalidDigest(t *testing.T) { 452 322 blobStore := newMockBlobStore() 453 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 454 - store := NewManifestStore(client, "myapp", "", "", "did:plc:test123", blobStore, nil) 323 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 324 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 325 + store := NewManifestStore(ctx, nil, blobStore) 455 326 456 327 _, err := store.extractConfigLabels(context.Background(), "invalid-digest") 457 328 if err == nil { ··· 468 339 configDigest := digest.FromBytes(configData) 469 340 blobStore.blobs[configDigest] = configData 470 341 471 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 472 - store := NewManifestStore(client, "myapp", "", "", "did:plc:test123", blobStore, nil) 342 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 343 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 344 + store := NewManifestStore(ctx, nil, blobStore) 473 345 474 346 _, err := store.extractConfigLabels(context.Background(), configDigest.String()) 475 347 if err == nil { ··· 480 352 // TestManifestStore_WithMetrics tests that metrics are tracked 481 353 func TestManifestStore_WithMetrics(t *testing.T) { 482 354 db := &mockDatabaseMetrics{} 483 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 484 - store := NewManifestStore( 485 - client, 486 - "myapp", 487 - "https://hold.example.com", 488 - "did:web:hold.example.com", 489 - "did:plc:alice123", 490 - nil, 491 - db, 492 - ) 355 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 356 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", db) 357 + store := NewManifestStore(ctx, nil, nil) 493 358 494 - if store.database != db { 359 + if store.ctx.Database != db { 495 360 t.Error("ManifestStore should store database reference") 496 361 } 497 362 ··· 501 366 502 367 // TestManifestStore_WithoutMetrics tests that nil database is acceptable 503 368 func TestManifestStore_WithoutMetrics(t *testing.T) { 504 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 505 - store := NewManifestStore( 506 - client, 507 - "myapp", 508 - "https://hold.example.com", 509 - "did:web:hold.example.com", 510 - "did:plc:alice123", 511 - nil, 512 - nil, // nil database 513 - ) 369 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 370 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", nil) 371 + store := NewManifestStore(ctx, nil, nil) 514 372 515 - if store.database != nil { 373 + if store.ctx.Database != nil { 516 374 t.Error("ManifestStore should accept nil database") 517 375 } 518 376 }
+7 -7
pkg/atproto/profile_test.go
··· 48 48 49 49 // Second request: PutRecord (create profile) 50 50 if r.Method == "POST" && strings.Contains(r.URL.Path, "putRecord") { 51 - var body map[string]interface{} 51 + var body map[string]any 52 52 json.NewDecoder(r.Body).Decode(&body) 53 53 54 54 // Verify profile data 55 - recordData := body["record"].(map[string]interface{}) 55 + recordData := body["record"].(map[string]any) 56 56 if recordData["$type"] != SailorProfileCollection { 57 57 t.Errorf("$type = %v, want %v", recordData["$type"], SailorProfileCollection) 58 58 } ··· 218 218 migrationLocks = sync.Map{} 219 219 220 220 putRecordCalled := false 221 - var migrationRequest map[string]interface{} 221 + var migrationRequest map[string]any 222 222 223 223 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 224 224 // GetRecord ··· 273 273 } 274 274 275 275 if migrationRequest != nil { 276 - recordData := migrationRequest["record"].(map[string]interface{}) 276 + recordData := migrationRequest["record"].(map[string]any) 277 277 migratedHold := recordData["defaultHold"] 278 278 if migratedHold != tt.expectedHoldDID { 279 279 t.Errorf("Migrated defaultHold = %v, want %v", migratedHold, tt.expectedHoldDID) ··· 401 401 402 402 for _, tt := range tests { 403 403 t.Run(tt.name, func(t *testing.T) { 404 - var sentProfile map[string]interface{} 404 + var sentProfile map[string]any 405 405 406 406 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 407 407 if r.Method == "POST" && strings.Contains(r.URL.Path, "putRecord") { 408 - var body map[string]interface{} 408 + var body map[string]any 409 409 json.NewDecoder(r.Body).Decode(&body) 410 410 sentProfile = body 411 411 ··· 432 432 433 433 if !tt.wantErr { 434 434 // Verify normalization happened 435 - recordData := sentProfile["record"].(map[string]interface{}) 435 + recordData := sentProfile["record"].(map[string]any) 436 436 defaultHold := recordData["defaultHold"] 437 437 // Handle empty string (may be nil in JSON) 438 438 defaultHoldStr := ""
+16 -15
pkg/atproto/tag_store.go pkg/appview/storage/tag_store.go
··· 1 - package atproto 1 + package storage 2 2 3 3 import ( 4 4 "context" 5 5 "encoding/json" 6 6 "fmt" 7 7 8 + "atcr.io/pkg/atproto" 8 9 "github.com/distribution/distribution/v3" 9 10 "github.com/opencontainers/go-digest" 10 11 ) ··· 12 13 // TagStore implements distribution.TagService 13 14 // It stores tags in ATProto as records 14 15 type TagStore struct { 15 - client *Client 16 + client *atproto.Client 16 17 repository string 17 18 } 18 19 19 20 // NewTagStore creates a new ATProto-backed tag store 20 - func NewTagStore(client *Client, repository string) *TagStore { 21 + func NewTagStore(client *atproto.Client, repository string) *TagStore { 21 22 return &TagStore{ 22 23 client: client, 23 24 repository: repository, ··· 27 28 // Get retrieves the descriptor for a tag 28 29 func (s *TagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { 29 30 // Build record key 30 - rkey := RepositoryTagToRKey(s.repository, tag) 31 + rkey := atproto.RepositoryTagToRKey(s.repository, tag) 31 32 32 33 // Fetch tag record from ATProto 33 - record, err := s.client.GetRecord(ctx, TagCollection, rkey) 34 + record, err := s.client.GetRecord(ctx, atproto.TagCollection, rkey) 34 35 if err != nil { 35 36 return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} 36 37 } 37 38 38 - var tagRecord TagRecord 39 + var tagRecord atproto.TagRecord 39 40 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 40 41 return distribution.Descriptor{}, fmt.Errorf("failed to unmarshal tag record: %w", err) 41 42 } ··· 62 63 // Tag associates a tag with a descriptor (manifest digest) 63 64 func (s *TagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { 64 65 // Create tag record with manifest AT-URI 65 - tagRecord := NewTagRecord(s.client.DID(), s.repository, tag, desc.Digest.String()) 66 + tagRecord := atproto.NewTagRecord(s.client.DID(), s.repository, tag, desc.Digest.String()) 66 67 67 68 // Store in ATProto 68 - rkey := RepositoryTagToRKey(s.repository, tag) 69 - _, err := s.client.PutRecord(ctx, TagCollection, rkey, tagRecord) 69 + rkey := atproto.RepositoryTagToRKey(s.repository, tag) 70 + _, err := s.client.PutRecord(ctx, atproto.TagCollection, rkey, tagRecord) 70 71 if err != nil { 71 72 return fmt.Errorf("failed to store tag in ATProto: %w", err) 72 73 } ··· 76 77 77 78 // Untag removes a tag 78 79 func (s *TagStore) Untag(ctx context.Context, tag string) error { 79 - rkey := RepositoryTagToRKey(s.repository, tag) 80 - return s.client.DeleteRecord(ctx, TagCollection, rkey) 80 + rkey := atproto.RepositoryTagToRKey(s.repository, tag) 81 + return s.client.DeleteRecord(ctx, atproto.TagCollection, rkey) 81 82 } 82 83 83 84 // All returns all tags for this repository 84 85 func (s *TagStore) All(ctx context.Context) ([]string, error) { 85 86 // List all records in the tag collection 86 - records, err := s.client.ListRecords(ctx, TagCollection, 100) 87 + records, err := s.client.ListRecords(ctx, atproto.TagCollection, 100) 87 88 if err != nil { 88 89 return nil, fmt.Errorf("failed to list tags: %w", err) 89 90 } 90 91 91 92 var tags []string 92 93 for _, record := range records { 93 - var tagRecord TagRecord 94 + var tagRecord atproto.TagRecord 94 95 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 95 96 // Skip invalid records 96 97 continue ··· 108 109 // Lookup returns the set of tags for a given digest 109 110 func (s *TagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { 110 111 // List all records in the tag collection 111 - records, err := s.client.ListRecords(ctx, TagCollection, 100) 112 + records, err := s.client.ListRecords(ctx, atproto.TagCollection, 100) 112 113 if err != nil { 113 114 return nil, fmt.Errorf("failed to list tags: %w", err) 114 115 } 115 116 116 117 var tags []string 117 118 for _, record := range records { 118 - var tagRecord TagRecord 119 + var tagRecord atproto.TagRecord 119 120 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 120 121 // Skip invalid records 121 122 continue
+32 -31
pkg/atproto/tag_store_test.go pkg/appview/storage/tag_store_test.go
··· 1 - package atproto 1 + package storage 2 2 3 3 import ( 4 4 "context" ··· 8 8 "strings" 9 9 "testing" 10 10 11 + "atcr.io/pkg/atproto" 11 12 "github.com/distribution/distribution/v3" 12 13 "github.com/opencontainers/go-digest" 13 14 ) 14 15 15 16 // TestNewTagStore tests creating a new tag store 16 17 func TestNewTagStore(t *testing.T) { 17 - client := NewClient("https://pds.example.com", "did:plc:test123", "token") 18 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 18 19 store := NewTagStore(client, "myapp") 19 20 20 21 if store.repository != "myapp" { ··· 67 68 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 68 69 // Verify query parameters 69 70 query := r.URL.Query() 70 - rkey := RepositoryTagToRKey("myapp", tt.tag) 71 + rkey := atproto.RepositoryTagToRKey("myapp", tt.tag) 71 72 if query.Get("rkey") != rkey { 72 73 t.Errorf("rkey = %v, want %v", query.Get("rkey"), rkey) 73 74 } 74 - if query.Get("collection") != TagCollection { 75 - t.Errorf("collection = %v, want %v", query.Get("collection"), TagCollection) 75 + if query.Get("collection") != atproto.TagCollection { 76 + t.Errorf("collection = %v, want %v", query.Get("collection"), atproto.TagCollection) 76 77 } 77 78 78 79 w.WriteHeader(tt.serverStatus) ··· 80 81 })) 81 82 defer server.Close() 82 83 83 - client := NewClient(server.URL, "did:plc:test123", "test-token") 84 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 84 85 store := NewTagStore(client, "myapp") 85 86 86 87 desc, err := store.Get(context.Background(), tt.tag) ··· 119 120 })) 120 121 defer server.Close() 121 122 122 - client := NewClient(server.URL, "did:plc:test123", "test-token") 123 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 123 124 store := NewTagStore(client, "myapp") 124 125 125 126 _, err := store.Get(context.Background(), "latest") ··· 148 149 })) 149 150 defer server.Close() 150 151 151 - client := NewClient(server.URL, "did:plc:test123", "test-token") 152 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 152 153 store := NewTagStore(client, "myapp") 153 154 154 155 desc, err := store.Get(context.Background(), "latest") ··· 181 182 })) 182 183 defer server.Close() 183 184 184 - client := NewClient(server.URL, "did:plc:test123", "test-token") 185 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 185 186 store := NewTagStore(client, "myapp") 186 187 187 188 desc, err := store.Get(context.Background(), "latest") ··· 228 229 229 230 for _, tt := range tests { 230 231 t.Run(tt.name, func(t *testing.T) { 231 - var sentTagRecord *TagRecord 232 + var sentTagRecord *atproto.TagRecord 232 233 233 234 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 234 235 if r.Method != "POST" { ··· 236 237 } 237 238 238 239 // Parse request body 239 - var body map[string]interface{} 240 + var body map[string]any 240 241 json.NewDecoder(r.Body).Decode(&body) 241 242 242 243 // Verify rkey 243 - expectedRKey := RepositoryTagToRKey("myapp", tt.tag) 244 + expectedRKey := atproto.RepositoryTagToRKey("myapp", tt.tag) 244 245 if body["rkey"] != expectedRKey { 245 246 t.Errorf("rkey = %v, want %v", body["rkey"], expectedRKey) 246 247 } 247 248 248 249 // Verify collection 249 - if body["collection"] != TagCollection { 250 - t.Errorf("collection = %v, want %v", body["collection"], TagCollection) 250 + if body["collection"] != atproto.TagCollection { 251 + t.Errorf("collection = %v, want %v", body["collection"], atproto.TagCollection) 251 252 } 252 253 253 254 // Parse and verify tag record 254 - recordData := body["record"].(map[string]interface{}) 255 + recordData := body["record"].(map[string]any) 255 256 recordBytes, _ := json.Marshal(recordData) 256 - var tagRecord TagRecord 257 + var tagRecord atproto.TagRecord 257 258 json.Unmarshal(recordBytes, &tagRecord) 258 259 sentTagRecord = &tagRecord 259 260 ··· 266 267 })) 267 268 defer server.Close() 268 269 269 - client := NewClient(server.URL, "did:plc:test123", "test-token") 270 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 270 271 store := NewTagStore(client, "myapp") 271 272 272 273 desc := distribution.Descriptor{ ··· 283 284 284 285 if !tt.wantErr && sentTagRecord != nil { 285 286 // Verify the tag record 286 - if sentTagRecord.Type != TagCollection { 287 - t.Errorf("Type = %v, want %v", sentTagRecord.Type, TagCollection) 287 + if sentTagRecord.Type != atproto.TagCollection { 288 + t.Errorf("Type = %v, want %v", sentTagRecord.Type, atproto.TagCollection) 288 289 } 289 290 if sentTagRecord.Repository != "myapp" { 290 291 t.Errorf("Repository = %v, want myapp", sentTagRecord.Repository) ··· 293 294 t.Errorf("Tag = %v, want %v", sentTagRecord.Tag, tt.tag) 294 295 } 295 296 // New records should have manifest field 296 - expectedURI := BuildManifestURI("did:plc:test123", tt.digest.String()) 297 + expectedURI := atproto.BuildManifestURI("did:plc:test123", tt.digest.String()) 297 298 if sentTagRecord.Manifest != expectedURI { 298 299 t.Errorf("Manifest = %v, want %v", sentTagRecord.Manifest, expectedURI) 299 300 } ··· 337 338 } 338 339 339 340 // Parse body to verify delete parameters 340 - var body map[string]interface{} 341 + var body map[string]any 341 342 json.NewDecoder(r.Body).Decode(&body) 342 343 343 - expectedRKey := RepositoryTagToRKey("myapp", tt.tag) 344 + expectedRKey := atproto.RepositoryTagToRKey("myapp", tt.tag) 344 345 if body["rkey"] != expectedRKey { 345 346 t.Errorf("rkey = %v, want %v", body["rkey"], expectedRKey) 346 347 } ··· 354 355 })) 355 356 defer server.Close() 356 357 357 - client := NewClient(server.URL, "did:plc:test123", "test-token") 358 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 358 359 store := NewTagStore(client, "myapp") 359 360 360 361 err := store.Untag(context.Background(), tt.tag) ··· 422 423 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 423 424 // Verify query parameters 424 425 query := r.URL.Query() 425 - if query.Get("collection") != TagCollection { 426 - t.Errorf("collection = %v, want %v", query.Get("collection"), TagCollection) 426 + if query.Get("collection") != atproto.TagCollection { 427 + t.Errorf("collection = %v, want %v", query.Get("collection"), atproto.TagCollection) 427 428 } 428 429 if query.Get("limit") != "100" { 429 430 t.Errorf("limit = %v, want 100", query.Get("limit")) ··· 434 435 })) 435 436 defer server.Close() 436 437 437 - client := NewClient(server.URL, "did:plc:test123", "test-token") 438 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 438 439 store := NewTagStore(client, "myapp") 439 440 440 441 tags, err := store.All(context.Background()) ··· 496 497 })) 497 498 defer server.Close() 498 499 499 - client := NewClient(server.URL, "did:plc:test123", "test-token") 500 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 500 501 store := NewTagStore(client, "myapp") 501 502 502 503 tags, err := store.All(context.Background()) ··· 584 585 })) 585 586 defer server.Close() 586 587 587 - client := NewClient(server.URL, "did:plc:test123", "test-token") 588 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 588 589 store := NewTagStore(client, "myapp") 589 590 590 591 desc := distribution.Descriptor{ ··· 646 647 })) 647 648 defer server.Close() 648 649 649 - client := NewClient(server.URL, "did:plc:test123", "test-token") 650 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 650 651 store := NewTagStore(client, "myapp") // Looking for "myapp" tags only 651 652 652 653 desc := distribution.Descriptor{ ··· 676 677 })) 677 678 defer server.Close() 678 679 679 - client := NewClient(server.URL, "did:plc:test123", "test-token") 680 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 680 681 store := NewTagStore(client, "myapp") 681 682 682 683 // Test All() ··· 700 701 })) 701 702 defer server.Close() 702 703 703 - client := NewClient(server.URL, "did:plc:test123", "test-token") 704 + client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 704 705 store := NewTagStore(client, "myapp") 705 706 706 707 _, err := store.Get(context.Background(), "notfound")
+2 -3
pkg/auth/oauth/store.go
··· 4 4 "context" 5 5 "encoding/json" 6 6 "fmt" 7 + "maps" 7 8 "os" 8 9 "path/filepath" 9 10 "sync" ··· 178 179 179 180 // Return a copy to prevent external modification 180 181 result := make(map[string]*oauth.ClientSessionData) 181 - for k, v := range s.sessions { 182 - result[k] = v 183 - } 182 + maps.Copy(result, s.sessions) 184 183 return result 185 184 } 186 185
-1
pkg/auth/session.go
··· 107 107 return "", "", "", fmt.Errorf("failed to resolve identity %q: %w", identifier, err) 108 108 } 109 109 110 - did = ident.DID.String() 111 110 pds := ident.PDSEndpoint() 112 111 if pds == "" { 113 112 return "", "", "", fmt.Errorf("no PDS endpoint found for %q", identifier)
+2 -2
pkg/auth/token/cache.go
··· 123 123 } 124 124 125 125 // GetCacheStats returns statistics about the service token cache for debugging 126 - func GetCacheStats() map[string]interface{} { 126 + func GetCacheStats() map[string]any { 127 127 globalServiceTokensMu.RLock() 128 128 defer globalServiceTokensMu.RUnlock() 129 129 ··· 139 139 } 140 140 } 141 141 142 - return map[string]interface{}{ 142 + return map[string]any{ 143 143 "total_entries": len(globalServiceTokens), 144 144 "valid_tokens": validCount, 145 145 "expired_tokens": expiredCount,
+118
pkg/hold/oci/xrpc.go
··· 46 46 r.Put(atproto.HoldUploadPart, h.HandleUploadPart) 47 47 r.Post(atproto.HoldCompleteUpload, h.HandleCompleteUpload) 48 48 r.Post(atproto.HoldAbortUpload, h.HandleAbortUpload) 49 + r.Post(atproto.HoldNotifyManifest, h.HandleNotifyManifest) 49 50 }) 50 51 } 51 52 ··· 195 196 RespondJSON(w, http.StatusOK, map[string]any{ 196 197 "status": "aborted", 197 198 }) 199 + } 200 + 201 + // HandleNotifyManifest handles manifest upload notifications from AppView 202 + // Creates layer records and optionally posts to Bluesky 203 + func (h *XRPCHandler) HandleNotifyManifest(w http.ResponseWriter, r *http.Request) { 204 + ctx := r.Context() 205 + 206 + // Validate service token (same auth as blob:write endpoints) 207 + validatedUser, err := pds.ValidateBlobWriteAccess(r, h.pds, h.httpClient) 208 + if err != nil { 209 + RespondError(w, http.StatusForbidden, fmt.Sprintf("authorization failed: %v", err)) 210 + return 211 + } 212 + 213 + // Parse request 214 + var req struct { 215 + Repository string `json:"repository"` 216 + Tag string `json:"tag"` 217 + UserDID string `json:"userDid"` 218 + UserHandle string `json:"userHandle"` 219 + Manifest struct { 220 + MediaType string `json:"mediaType"` 221 + Config struct { 222 + Digest string `json:"digest"` 223 + Size int64 `json:"size"` 224 + } `json:"config"` 225 + Layers []struct { 226 + Digest string `json:"digest"` 227 + Size int64 `json:"size"` 228 + MediaType string `json:"mediaType"` 229 + } `json:"layers"` 230 + } `json:"manifest"` 231 + } 232 + 233 + if err := DecodeJSON(r, &req); err != nil { 234 + RespondError(w, http.StatusBadRequest, err.Error()) 235 + return 236 + } 237 + 238 + // Verify user DID matches token 239 + if req.UserDID != validatedUser.DID { 240 + RespondError(w, http.StatusForbidden, "user DID mismatch") 241 + return 242 + } 243 + 244 + // Check if manifest posts are enabled 245 + // TODO: Check captain record enableManifestPosts field 246 + // For now, posts are always created 247 + postsEnabled := true 248 + 249 + // Create layer records for each blob 250 + layersCreated := 0 251 + for _, layer := range req.Manifest.Layers { 252 + record := atproto.NewLayerRecord( 253 + layer.Digest, 254 + layer.Size, 255 + layer.MediaType, 256 + req.Repository, 257 + req.UserDID, 258 + req.UserHandle, 259 + ) 260 + 261 + _, _, err := h.pds.CreateLayerRecord(ctx, record) 262 + if err != nil { 263 + fmt.Printf("Failed to create layer record: %v\n", err) 264 + // Continue creating other records 265 + } else { 266 + layersCreated++ 267 + } 268 + } 269 + 270 + // Calculate total size from all layers 271 + var totalSize int64 272 + for _, layer := range req.Manifest.Layers { 273 + totalSize += layer.Size 274 + } 275 + totalSize += req.Manifest.Config.Size // Add config blob size 276 + 277 + // Create Bluesky post if enabled 278 + var postURI string 279 + postCreated := false 280 + if postsEnabled { 281 + // Extract manifest digest from first layer (or use config digest as fallback) 282 + manifestDigest := req.Manifest.Config.Digest 283 + if len(req.Manifest.Layers) > 0 { 284 + manifestDigest = req.Manifest.Layers[0].Digest 285 + } 286 + 287 + postURI, err = h.pds.CreateManifestPost( 288 + ctx, 289 + req.Repository, 290 + req.Tag, 291 + req.UserHandle, 292 + manifestDigest, 293 + totalSize, 294 + ) 295 + if err != nil { 296 + fmt.Printf("Failed to create manifest post: %v\n", err) 297 + } else { 298 + postCreated = true 299 + } 300 + } 301 + 302 + // Return response 303 + resp := map[string]any{ 304 + "success": layersCreated > 0 || postCreated, 305 + "layersCreated": layersCreated, 306 + "postCreated": postCreated, 307 + } 308 + if postURI != "" { 309 + resp["postUri"] = postURI 310 + } 311 + if err != nil && layersCreated == 0 && !postCreated { 312 + resp["error"] = err.Error() 313 + } 314 + 315 + RespondJSON(w, http.StatusOK, resp) 198 316 } 199 317 200 318 // requireBlobWriteAccess middleware - validates DPoP + OAuth and checks for blob:write permission
+2 -6
pkg/hold/pds/auth.go
··· 480 480 } 481 481 482 482 // Fetch public key from issuer's DID document 483 - publicKey, err := fetchPublicKeyFromDID(r.Context(), issuerDID, httpClient) 483 + publicKey, err := fetchPublicKeyFromDID(r.Context(), issuerDID) 484 484 if err != nil { 485 485 return nil, fmt.Errorf("failed to fetch public key for issuer %s: %w", issuerDID, err) 486 486 } ··· 502 502 // fetchPublicKeyFromDID fetches the public key from a DID document 503 503 // Supports did:plc and did:web 504 504 // Returns the atcrypto.PublicKey for signature verification 505 - func fetchPublicKeyFromDID(ctx context.Context, did string, httpClient HTTPClient) (atcrypto.PublicKey, error) { 506 - if httpClient == nil { 507 - httpClient = http.DefaultClient 508 - } 509 - 505 + func fetchPublicKeyFromDID(ctx context.Context, did string) (atcrypto.PublicKey, error) { 510 506 // Use indigo's identity resolution 511 507 directory := identity.DefaultDirectory() 512 508 atID, err := syntax.ParseAtIdentifier(did)
-28
pkg/hold/pds/auth_test.go
··· 239 239 return nil 240 240 } 241 241 242 - // mockDIDResolver is a simple mock for DID resolution that returns a fixed public key 243 - type mockDIDResolver struct { 244 - publicKeys map[string]atcrypto.PublicKey 245 - } 246 - 247 - // newMockDIDResolver creates a new mock DID resolver 248 - func newMockDIDResolver() *mockDIDResolver { 249 - return &mockDIDResolver{ 250 - publicKeys: make(map[string]atcrypto.PublicKey), 251 - } 252 - } 253 - 254 - // RegisterDID registers a DID with its public key 255 - func (m *mockDIDResolver) RegisterDID(did string, publicKey atcrypto.PublicKey) { 256 - m.publicKeys[did] = publicKey 257 - } 258 - 259 - // Do implements the HTTPClient interface for mocking DID resolution 260 - // This intercepts fetchPublicKeyFromDID's indigo directory calls 261 - func (m *mockDIDResolver) Do(req *http.Request) (*http.Response, error) { 262 - // This mock is not used directly - we'll need to inject the public key differently 263 - // For now, return a 404 to indicate DID resolution should use our registered keys 264 - return &http.Response{ 265 - StatusCode: http.StatusNotFound, 266 - Body: http.NoBody, 267 - }, nil 268 - } 269 - 270 242 // TestValidateServiceToken_ValidToken tests validation of a properly formed service token 271 243 func TestValidateServiceToken_ValidToken(t *testing.T) { 272 244 // This test validates token structure, audience, and expiration
+3 -3
pkg/hold/pds/events.go
··· 29 29 eventSeq int64 30 30 eventHistory []HistoricalEvent // Ring buffer for cursor backfill (deprecated, kept for compatibility) 31 31 maxHistory int 32 - holdDID string // DID of the hold for setting repo field 33 - db *sql.DB // Database for persistent event storage 34 - dbPath string // Path to database file 32 + holdDID string // DID of the hold for setting repo field 33 + db *sql.DB // Database for persistent event storage 34 + dbPath string // Path to database file 35 35 } 36 36 37 37 // Subscriber represents a WebSocket client subscribed to the firehose
+59
pkg/hold/pds/layer.go
··· 1 + package pds 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + 7 + "atcr.io/pkg/atproto" 8 + ) 9 + 10 + // CreateLayerRecord creates a new layer record in the hold's PDS 11 + // Returns the rkey and CID of the created record 12 + func (p *HoldPDS) CreateLayerRecord(ctx context.Context, record *atproto.LayerRecord) (string, string, error) { 13 + // Validate record 14 + if record.Type != atproto.LayerCollection { 15 + return "", "", fmt.Errorf("invalid record type: %s", record.Type) 16 + } 17 + 18 + if record.Digest == "" { 19 + return "", "", fmt.Errorf("digest is required") 20 + } 21 + 22 + if record.Size <= 0 { 23 + return "", "", fmt.Errorf("size must be positive") 24 + } 25 + 26 + // Create record with auto-generated TID rkey 27 + rkey, recordCID, err := p.repomgr.CreateRecord( 28 + ctx, 29 + p.uid, 30 + atproto.LayerCollection, 31 + record, 32 + ) 33 + 34 + if err != nil { 35 + return "", "", fmt.Errorf("failed to create layer record: %w", err) 36 + } 37 + 38 + return rkey, recordCID.String(), nil 39 + } 40 + 41 + // GetLayerRecord retrieves a specific layer record by rkey 42 + // Note: This is a simplified implementation. For production, you may need to pass the CID 43 + func (p *HoldPDS) GetLayerRecord(ctx context.Context, rkey string) (*atproto.LayerRecord, error) { 44 + // For now, we don't implement this as it's not needed for the manifest post feature 45 + // Full implementation would require querying the carstore with a specific CID 46 + return nil, fmt.Errorf("GetLayerRecord not yet implemented - use via XRPC listRecords instead") 47 + } 48 + 49 + // ListLayerRecords lists layer records with pagination 50 + // Returns records, next cursor (empty if no more), and error 51 + // Note: This is a simplified implementation. For production, consider adding filters 52 + // (by repository, user, digest, etc.) and proper pagination 53 + func (p *HoldPDS) ListLayerRecords(ctx context.Context, limit int, cursor string) ([]*atproto.LayerRecord, string, error) { 54 + // For now, return empty list - full implementation would query the carstore 55 + // This would require iterating over records in the collection and filtering 56 + // In practice, layer records are mainly for analytics and Bluesky posts, 57 + // not for runtime queries 58 + return nil, "", fmt.Errorf("ListLayerRecords not yet implemented") 59 + }
+294
pkg/hold/pds/layer_test.go
··· 1 + package pds 2 + 3 + import ( 4 + "testing" 5 + 6 + "atcr.io/pkg/atproto" 7 + ) 8 + 9 + func TestCreateLayerRecord(t *testing.T) { 10 + // Setup test PDS 11 + pds, ctx := setupTestPDS(t) 12 + 13 + tests := []struct { 14 + name string 15 + record *atproto.LayerRecord 16 + wantErr bool 17 + errSubstr string 18 + }{ 19 + { 20 + name: "valid layer record", 21 + record: atproto.NewLayerRecord( 22 + "sha256:abc123def456", 23 + 1048576, // 1 MB 24 + "application/vnd.oci.image.layer.v1.tar+gzip", 25 + "myapp", 26 + "did:plc:alice123", 27 + "alice.bsky.social", 28 + ), 29 + wantErr: false, 30 + }, 31 + { 32 + name: "valid layer record with large size", 33 + record: atproto.NewLayerRecord( 34 + "sha256:fedcba987654", 35 + 1073741824, // 1 GB 36 + "application/vnd.docker.image.rootfs.diff.tar.gzip", 37 + "debian", 38 + "did:plc:bob456", 39 + "bob.example.com", 40 + ), 41 + wantErr: false, 42 + }, 43 + { 44 + name: "invalid record type", 45 + record: &atproto.LayerRecord{ 46 + Type: "wrong.type", 47 + Digest: "sha256:abc123", 48 + Size: 1024, 49 + MediaType: "application/vnd.oci.image.layer.v1.tar", 50 + Repository: "test", 51 + UserDID: "did:plc:test", 52 + UserHandle: "test.example.com", 53 + }, 54 + wantErr: true, 55 + errSubstr: "invalid record type", 56 + }, 57 + { 58 + name: "missing digest", 59 + record: &atproto.LayerRecord{ 60 + Type: atproto.LayerCollection, 61 + Digest: "", 62 + Size: 1024, 63 + MediaType: "application/vnd.oci.image.layer.v1.tar", 64 + Repository: "test", 65 + UserDID: "did:plc:test", 66 + UserHandle: "test.example.com", 67 + }, 68 + wantErr: true, 69 + errSubstr: "digest is required", 70 + }, 71 + { 72 + name: "zero size", 73 + record: &atproto.LayerRecord{ 74 + Type: atproto.LayerCollection, 75 + Digest: "sha256:abc123", 76 + Size: 0, 77 + MediaType: "application/vnd.oci.image.layer.v1.tar", 78 + Repository: "test", 79 + UserDID: "did:plc:test", 80 + UserHandle: "test.example.com", 81 + }, 82 + wantErr: true, 83 + errSubstr: "size must be positive", 84 + }, 85 + { 86 + name: "negative size", 87 + record: &atproto.LayerRecord{ 88 + Type: atproto.LayerCollection, 89 + Digest: "sha256:abc123", 90 + Size: -1, 91 + MediaType: "application/vnd.oci.image.layer.v1.tar", 92 + Repository: "test", 93 + UserDID: "did:plc:test", 94 + UserHandle: "test.example.com", 95 + }, 96 + wantErr: true, 97 + errSubstr: "size must be positive", 98 + }, 99 + } 100 + 101 + for _, tt := range tests { 102 + t.Run(tt.name, func(t *testing.T) { 103 + rkey, cid, err := pds.CreateLayerRecord(ctx, tt.record) 104 + 105 + if tt.wantErr { 106 + if err == nil { 107 + t.Errorf("CreateLayerRecord() expected error containing %q, got nil", tt.errSubstr) 108 + return 109 + } 110 + if tt.errSubstr != "" && !contains(err.Error(), tt.errSubstr) { 111 + t.Errorf("CreateLayerRecord() error = %v, want error containing %q", err, tt.errSubstr) 112 + } 113 + return 114 + } 115 + 116 + if err != nil { 117 + t.Errorf("CreateLayerRecord() unexpected error: %v", err) 118 + return 119 + } 120 + 121 + if rkey == "" { 122 + t.Error("CreateLayerRecord() returned empty rkey") 123 + } 124 + 125 + if cid == "" { 126 + t.Error("CreateLayerRecord() returned empty CID") 127 + } 128 + 129 + t.Logf("Created layer record: rkey=%s, cid=%s", rkey, cid) 130 + }) 131 + } 132 + } 133 + 134 + func TestCreateLayerRecord_MultipleRecords(t *testing.T) { 135 + // Test creating multiple layer records for the same manifest 136 + pds, ctx := setupTestPDS(t) 137 + 138 + layers := []struct { 139 + digest string 140 + size int64 141 + }{ 142 + {"sha256:layer1abc123", 1024}, 143 + {"sha256:layer2def456", 2048}, 144 + {"sha256:layer3ghi789", 4096}, 145 + } 146 + 147 + createdRKeys := make(map[string]bool) 148 + 149 + for i, layer := range layers { 150 + record := atproto.NewLayerRecord( 151 + layer.digest, 152 + layer.size, 153 + "application/vnd.oci.image.layer.v1.tar+gzip", 154 + "multi-layer-app", 155 + "did:plc:test123", 156 + "test.example.com", 157 + ) 158 + 159 + rkey, cid, err := pds.CreateLayerRecord(ctx, record) 160 + if err != nil { 161 + t.Fatalf("CreateLayerRecord() for layer %d failed: %v", i, err) 162 + } 163 + 164 + // Ensure unique rkeys 165 + if createdRKeys[rkey] { 166 + t.Errorf("CreateLayerRecord() returned duplicate rkey: %s", rkey) 167 + } 168 + createdRKeys[rkey] = true 169 + 170 + t.Logf("Layer %d: rkey=%s, cid=%s", i, rkey, cid) 171 + } 172 + 173 + if len(createdRKeys) != len(layers) { 174 + t.Errorf("Created %d unique rkeys, want %d", len(createdRKeys), len(layers)) 175 + } 176 + } 177 + 178 + func TestNewLayerRecord(t *testing.T) { 179 + // Test the layer record constructor 180 + digest := "sha256:abc123def456" 181 + size := int64(1048576) 182 + mediaType := "application/vnd.oci.image.layer.v1.tar+gzip" 183 + repository := "myapp" 184 + userDID := "did:plc:alice123" 185 + userHandle := "alice.bsky.social" 186 + 187 + record := atproto.NewLayerRecord(digest, size, mediaType, repository, userDID, userHandle) 188 + 189 + if record == nil { 190 + t.Fatal("NewLayerRecord() returned nil") 191 + } 192 + 193 + // Verify all fields are set correctly 194 + if record.Type != atproto.LayerCollection { 195 + t.Errorf("Type = %q, want %q", record.Type, atproto.LayerCollection) 196 + } 197 + 198 + if record.Digest != digest { 199 + t.Errorf("Digest = %q, want %q", record.Digest, digest) 200 + } 201 + 202 + if record.Size != size { 203 + t.Errorf("Size = %d, want %d", record.Size, size) 204 + } 205 + 206 + if record.MediaType != mediaType { 207 + t.Errorf("MediaType = %q, want %q", record.MediaType, mediaType) 208 + } 209 + 210 + if record.Repository != repository { 211 + t.Errorf("Repository = %q, want %q", record.Repository, repository) 212 + } 213 + 214 + if record.UserDID != userDID { 215 + t.Errorf("UserDID = %q, want %q", record.UserDID, userDID) 216 + } 217 + 218 + if record.UserHandle != userHandle { 219 + t.Errorf("UserHandle = %q, want %q", record.UserHandle, userHandle) 220 + } 221 + 222 + if record.CreatedAt == "" { 223 + t.Error("CreatedAt is empty") 224 + } 225 + 226 + t.Logf("Created layer record: %+v", record) 227 + } 228 + 229 + func TestLayerRecord_FieldValidation(t *testing.T) { 230 + // Test various field values 231 + tests := []struct { 232 + name string 233 + digest string 234 + size int64 235 + mediaType string 236 + repository string 237 + userDID string 238 + userHandle string 239 + }{ 240 + { 241 + name: "typical OCI layer", 242 + digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", 243 + size: 12582912, // 12 MB 244 + mediaType: "application/vnd.oci.image.layer.v1.tar+gzip", 245 + repository: "hsm-secrets-operator", 246 + userDID: "did:plc:evan123", 247 + userHandle: "evan.jarrett.net", 248 + }, 249 + { 250 + name: "Docker layer format", 251 + digest: "sha256:abc123", 252 + size: 1024, 253 + mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", 254 + repository: "nginx", 255 + userDID: "did:plc:user456", 256 + userHandle: "user.example.com", 257 + }, 258 + { 259 + name: "uncompressed layer", 260 + digest: "sha256:def456", 261 + size: 2048, 262 + mediaType: "application/vnd.oci.image.layer.v1.tar", 263 + repository: "alpine", 264 + userDID: "did:plc:user789", 265 + userHandle: "user.bsky.social", 266 + }, 267 + } 268 + 269 + for _, tt := range tests { 270 + t.Run(tt.name, func(t *testing.T) { 271 + record := atproto.NewLayerRecord( 272 + tt.digest, 273 + tt.size, 274 + tt.mediaType, 275 + tt.repository, 276 + tt.userDID, 277 + tt.userHandle, 278 + ) 279 + 280 + if record == nil { 281 + t.Fatal("NewLayerRecord() returned nil") 282 + } 283 + 284 + // Verify the record can be created 285 + if record.Type != atproto.LayerCollection { 286 + t.Errorf("Type = %q, want %q", record.Type, atproto.LayerCollection) 287 + } 288 + 289 + if record.Digest != tt.digest { 290 + t.Errorf("Digest = %q, want %q", record.Digest, tt.digest) 291 + } 292 + }) 293 + } 294 + }
+150
pkg/hold/pds/manifest_post.go
··· 1 + package pds 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "strings" 7 + "time" 8 + 9 + bsky "github.com/bluesky-social/indigo/api/bsky" 10 + ) 11 + 12 + // CreateManifestPost creates a Bluesky post announcing a manifest upload 13 + // Includes facets for clickable mentions and links 14 + func (p *HoldPDS) CreateManifestPost( 15 + ctx context.Context, 16 + repository, tag, userHandle, digest string, 17 + totalSize int64, 18 + ) (string, error) { 19 + now := time.Now() 20 + 21 + // Build AppView repository URL 22 + appViewURL := fmt.Sprintf("https://atcr.io/r/%s/%s", userHandle, repository) 23 + 24 + // Format post text components 25 + digestShort := formatDigest(digest) 26 + sizeStr := formatSize(totalSize) 27 + repoWithTag := fmt.Sprintf("%s:%s", repository, tag) 28 + 29 + // Build text: "@alice.bsky.social just pushed hsm-secrets-operator:latest\nDigest: sha256:abc...def Size: 12.2 MB" 30 + text := fmt.Sprintf("@%s just pushed %s\nDigest: %s Size: %s", userHandle, repoWithTag, digestShort, sizeStr) 31 + 32 + // Create facets for mentions and links 33 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 34 + 35 + // Create post struct with facets 36 + post := &bsky.FeedPost{ 37 + LexiconTypeID: "app.bsky.feed.post", 38 + Text: text, 39 + Facets: facets, 40 + CreatedAt: now.Format(time.RFC3339), 41 + } 42 + 43 + // Create record with auto-generated TID 44 + rkey, recordCID, err := p.repomgr.CreateRecord( 45 + ctx, 46 + p.uid, 47 + "app.bsky.feed.post", 48 + post, 49 + ) 50 + 51 + if err != nil { 52 + return "", fmt.Errorf("failed to create manifest post: %w", err) 53 + } 54 + 55 + // Build ATProto URI for the post 56 + postURI := fmt.Sprintf("at://%s/app.bsky.feed.post/%s", p.did, rkey) 57 + 58 + fmt.Printf("Created manifest post: %s (cid: %s)\n", postURI, recordCID) 59 + 60 + return postURI, nil 61 + } 62 + 63 + // formatDigest truncates digest to first 7 and last 7 chars 64 + // Example: sha256:abc1234567890...fedcba9876543210 -> sha256:abc1234...9876543 65 + func formatDigest(digest string) string { 66 + if !strings.HasPrefix(digest, "sha256:") { 67 + return digest // Return as-is if not sha256 68 + } 69 + 70 + hash := strings.TrimPrefix(digest, "sha256:") 71 + if len(hash) <= 14 { 72 + return digest // Too short to truncate 73 + } 74 + 75 + return fmt.Sprintf("sha256:%s...%s", hash[:7], hash[len(hash)-7:]) 76 + } 77 + 78 + // formatSize converts bytes to human-readable format 79 + // Examples: 1024 -> "1.0 KB", 1048576 -> "1.0 MB", 1073741824 -> "1.0 GB" 80 + func formatSize(bytes int64) string { 81 + const ( 82 + KB = 1024 83 + MB = 1024 * KB 84 + GB = 1024 * MB 85 + ) 86 + 87 + switch { 88 + case bytes >= GB: 89 + return fmt.Sprintf("%.1f GB", float64(bytes)/float64(GB)) 90 + case bytes >= MB: 91 + return fmt.Sprintf("%.1f MB", float64(bytes)/float64(MB)) 92 + case bytes >= KB: 93 + return fmt.Sprintf("%.1f KB", float64(bytes)/float64(KB)) 94 + default: 95 + return fmt.Sprintf("%d B", bytes) 96 + } 97 + } 98 + 99 + // buildFacets creates mention and link facets for rich text 100 + // IMPORTANT: Byte offsets must be calculated for UTF-8 encoded text 101 + func buildFacets(text, userHandle, repoWithTag, appViewURL string) []*bsky.RichtextFacet { 102 + facets := []*bsky.RichtextFacet{} 103 + 104 + // Find mention: "@alice.bsky.social" 105 + mentionText := "@" + userHandle 106 + mentionStart := strings.Index(text, mentionText) 107 + if mentionStart >= 0 { 108 + // Calculate byte offsets (not character offsets!) 109 + byteStart := int64(len(text[:mentionStart])) 110 + byteEnd := int64(len(text[:mentionStart+len(mentionText)])) 111 + 112 + facets = append(facets, &bsky.RichtextFacet{ 113 + Index: &bsky.RichtextFacet_ByteSlice{ 114 + ByteStart: byteStart, 115 + ByteEnd: byteEnd, 116 + }, 117 + Features: []*bsky.RichtextFacet_Features_Elem{ 118 + { 119 + RichtextFacet_Mention: &bsky.RichtextFacet_Mention{ 120 + Did: "", // Will be resolved by Bluesky from handle 121 + }, 122 + }, 123 + }, 124 + }) 125 + } 126 + 127 + // Find repository link: "hsm-secrets-operator:latest" 128 + linkStart := strings.Index(text, repoWithTag) 129 + if linkStart >= 0 { 130 + // Calculate byte offsets 131 + byteStart := int64(len(text[:linkStart])) 132 + byteEnd := int64(len(text[:linkStart+len(repoWithTag)])) 133 + 134 + facets = append(facets, &bsky.RichtextFacet{ 135 + Index: &bsky.RichtextFacet_ByteSlice{ 136 + ByteStart: byteStart, 137 + ByteEnd: byteEnd, 138 + }, 139 + Features: []*bsky.RichtextFacet_Features_Elem{ 140 + { 141 + RichtextFacet_Link: &bsky.RichtextFacet_Link{ 142 + Uri: appViewURL, 143 + }, 144 + }, 145 + }, 146 + }) 147 + } 148 + 149 + return facets 150 + }
+335
pkg/hold/pds/manifest_post_test.go
··· 1 + package pds 2 + 3 + import ( 4 + "strings" 5 + "testing" 6 + 7 + bsky "github.com/bluesky-social/indigo/api/bsky" 8 + ) 9 + 10 + func TestFormatDigest(t *testing.T) { 11 + tests := []struct { 12 + name string 13 + digest string 14 + expected string 15 + }{ 16 + { 17 + name: "standard sha256 digest", 18 + digest: "sha256:abc1234567890fedcba9876543210", 19 + expected: "sha256:abc1234...6543210", // Last 7 chars of hash 20 + }, 21 + { 22 + name: "short digest (no truncation)", 23 + digest: "sha256:abc123", 24 + expected: "sha256:abc123", 25 + }, 26 + { 27 + name: "non-sha256 digest", 28 + digest: "sha512:abc123", 29 + expected: "sha512:abc123", 30 + }, 31 + { 32 + name: "real sha256 digest", 33 + digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", 34 + expected: "sha256:e692418...7fc331f", // Last 7 chars are "7fc331f" 35 + }, 36 + } 37 + 38 + for _, tt := range tests { 39 + t.Run(tt.name, func(t *testing.T) { 40 + result := formatDigest(tt.digest) 41 + if result != tt.expected { 42 + t.Errorf("formatDigest(%q) = %q, want %q", tt.digest, result, tt.expected) 43 + } 44 + }) 45 + } 46 + } 47 + 48 + func TestFormatSize(t *testing.T) { 49 + tests := []struct { 50 + name string 51 + bytes int64 52 + expected string 53 + }{ 54 + { 55 + name: "bytes", 56 + bytes: 512, 57 + expected: "512 B", 58 + }, 59 + { 60 + name: "kilobytes", 61 + bytes: 1024, 62 + expected: "1.0 KB", 63 + }, 64 + { 65 + name: "kilobytes with decimal", 66 + bytes: 1536, // 1.5 KB 67 + expected: "1.5 KB", 68 + }, 69 + { 70 + name: "megabytes", 71 + bytes: 1048576, // 1 MB 72 + expected: "1.0 MB", 73 + }, 74 + { 75 + name: "megabytes with decimal", 76 + bytes: 12582912, // 12 MB 77 + expected: "12.0 MB", 78 + }, 79 + { 80 + name: "gigabytes", 81 + bytes: 1073741824, // 1 GB 82 + expected: "1.0 GB", 83 + }, 84 + { 85 + name: "gigabytes with decimal", 86 + bytes: 2147483648, // 2 GB 87 + expected: "2.0 GB", 88 + }, 89 + { 90 + name: "zero bytes", 91 + bytes: 0, 92 + expected: "0 B", 93 + }, 94 + } 95 + 96 + for _, tt := range tests { 97 + t.Run(tt.name, func(t *testing.T) { 98 + result := formatSize(tt.bytes) 99 + if result != tt.expected { 100 + t.Errorf("formatSize(%d) = %q, want %q", tt.bytes, result, tt.expected) 101 + } 102 + }) 103 + } 104 + } 105 + 106 + func TestBuildFacets(t *testing.T) { 107 + tests := []struct { 108 + name string 109 + text string 110 + userHandle string 111 + repoWithTag string 112 + appViewURL string 113 + wantFacets int // number of facets expected 114 + }{ 115 + { 116 + name: "standard post with mention and link", 117 + text: "@alice.bsky.social just pushed myapp:latest\nDigest: sha256:abc...def Size: 12.2 MB", 118 + userHandle: "alice.bsky.social", 119 + repoWithTag: "myapp:latest", 120 + appViewURL: "https://atcr.io/r/alice.bsky.social/myapp", 121 + wantFacets: 2, 122 + }, 123 + { 124 + name: "no matches found", 125 + text: "random text", 126 + userHandle: "alice.bsky.social", 127 + repoWithTag: "myapp:latest", 128 + appViewURL: "https://atcr.io/r/alice.bsky.social/myapp", 129 + wantFacets: 0, 130 + }, 131 + { 132 + name: "only mention found", 133 + text: "@alice.bsky.social did something", 134 + userHandle: "alice.bsky.social", 135 + repoWithTag: "myapp:latest", 136 + appViewURL: "https://atcr.io/r/alice.bsky.social/myapp", 137 + wantFacets: 1, 138 + }, 139 + } 140 + 141 + for _, tt := range tests { 142 + t.Run(tt.name, func(t *testing.T) { 143 + facets := buildFacets(tt.text, tt.userHandle, tt.repoWithTag, tt.appViewURL) 144 + 145 + if len(facets) != tt.wantFacets { 146 + t.Errorf("buildFacets() returned %d facets, want %d", len(facets), tt.wantFacets) 147 + } 148 + 149 + // Verify facet structure for standard case 150 + if tt.name == "standard post with mention and link" && len(facets) == 2 { 151 + // Check mention facet 152 + mentionFacet := facets[0] 153 + if mentionFacet.Index == nil { 154 + t.Error("mention facet has nil Index") 155 + } 156 + if len(mentionFacet.Features) != 1 { 157 + t.Errorf("mention facet has %d features, want 1", len(mentionFacet.Features)) 158 + } 159 + if mentionFacet.Features[0].RichtextFacet_Mention == nil { 160 + t.Error("mention facet feature is not a mention") 161 + } 162 + 163 + // Check link facet 164 + linkFacet := facets[1] 165 + if linkFacet.Index == nil { 166 + t.Error("link facet has nil Index") 167 + } 168 + if len(linkFacet.Features) != 1 { 169 + t.Errorf("link facet has %d features, want 1", len(linkFacet.Features)) 170 + } 171 + if linkFacet.Features[0].RichtextFacet_Link == nil { 172 + t.Error("link facet feature is not a link") 173 + } 174 + if linkFacet.Features[0].RichtextFacet_Link.Uri != tt.appViewURL { 175 + t.Errorf("link facet URI = %q, want %q", linkFacet.Features[0].RichtextFacet_Link.Uri, tt.appViewURL) 176 + } 177 + } 178 + }) 179 + } 180 + } 181 + 182 + func TestBuildFacets_ByteOffsets(t *testing.T) { 183 + // Test that byte offsets are correctly calculated 184 + text := "@alice.bsky.social just pushed myapp:latest" 185 + userHandle := "alice.bsky.social" 186 + repoWithTag := "myapp:latest" 187 + appViewURL := "https://atcr.io/r/alice.bsky.social/myapp" 188 + 189 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 190 + 191 + if len(facets) != 2 { 192 + t.Fatalf("expected 2 facets, got %d", len(facets)) 193 + } 194 + 195 + // Check mention facet byte offsets 196 + mentionFacet := facets[0] 197 + mentionText := "@alice.bsky.social" 198 + expectedStart := int64(0) // mention is at the start 199 + expectedEnd := int64(len(mentionText)) 200 + 201 + if mentionFacet.Index.ByteStart != expectedStart { 202 + t.Errorf("mention ByteStart = %d, want %d", mentionFacet.Index.ByteStart, expectedStart) 203 + } 204 + if mentionFacet.Index.ByteEnd != expectedEnd { 205 + t.Errorf("mention ByteEnd = %d, want %d", mentionFacet.Index.ByteEnd, expectedEnd) 206 + } 207 + 208 + // Verify the mention text extraction 209 + extractedMention := text[mentionFacet.Index.ByteStart:mentionFacet.Index.ByteEnd] 210 + if extractedMention != mentionText { 211 + t.Errorf("extracted mention = %q, want %q", extractedMention, mentionText) 212 + } 213 + 214 + // Check link facet byte offsets 215 + linkFacet := facets[1] 216 + linkStart := len("@alice.bsky.social just pushed ") 217 + expectedLinkStart := int64(linkStart) 218 + expectedLinkEnd := int64(linkStart + len(repoWithTag)) 219 + 220 + if linkFacet.Index.ByteStart != expectedLinkStart { 221 + t.Errorf("link ByteStart = %d, want %d", linkFacet.Index.ByteStart, expectedLinkStart) 222 + } 223 + if linkFacet.Index.ByteEnd != expectedLinkEnd { 224 + t.Errorf("link ByteEnd = %d, want %d", linkFacet.Index.ByteEnd, expectedLinkEnd) 225 + } 226 + 227 + // Verify the link text extraction 228 + extractedLink := text[linkFacet.Index.ByteStart:linkFacet.Index.ByteEnd] 229 + if extractedLink != repoWithTag { 230 + t.Errorf("extracted link = %q, want %q", extractedLink, repoWithTag) 231 + } 232 + } 233 + 234 + func TestBuildFacets_UTF8Handling(t *testing.T) { 235 + // Test with Unicode characters to ensure byte offsets work correctly 236 + text := "@alice.bsky.social just pushed 🚀myapp:latest" 237 + userHandle := "alice.bsky.social" 238 + repoWithTag := "🚀myapp:latest" // Note: emoji is multi-byte 239 + appViewURL := "https://atcr.io/r/alice.bsky.social/myapp" 240 + 241 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 242 + 243 + if len(facets) != 2 { 244 + t.Fatalf("expected 2 facets, got %d", len(facets)) 245 + } 246 + 247 + // Verify that byte extraction works with UTF-8 248 + mentionFacet := facets[0] 249 + extractedMention := text[mentionFacet.Index.ByteStart:mentionFacet.Index.ByteEnd] 250 + expectedMention := "@alice.bsky.social" 251 + if extractedMention != expectedMention { 252 + t.Errorf("extracted mention = %q, want %q", extractedMention, expectedMention) 253 + } 254 + 255 + linkFacet := facets[1] 256 + extractedLink := text[linkFacet.Index.ByteStart:linkFacet.Index.ByteEnd] 257 + if extractedLink != repoWithTag { 258 + t.Errorf("extracted link = %q, want %q", extractedLink, repoWithTag) 259 + } 260 + } 261 + 262 + func TestBuildFacets_NoOverlap(t *testing.T) { 263 + // Ensure facets don't overlap 264 + text := "@alice.bsky.social just pushed myapp:latest" 265 + userHandle := "alice.bsky.social" 266 + repoWithTag := "myapp:latest" 267 + appViewURL := "https://atcr.io/r/alice.bsky.social/myapp" 268 + 269 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 270 + 271 + if len(facets) != 2 { 272 + t.Fatalf("expected 2 facets, got %d", len(facets)) 273 + } 274 + 275 + // Facets should not overlap 276 + facet1 := facets[0] 277 + facet2 := facets[1] 278 + 279 + if facet1.Index.ByteEnd > facet2.Index.ByteStart { 280 + t.Errorf("facets overlap: facet1 ends at %d, facet2 starts at %d", 281 + facet1.Index.ByteEnd, facet2.Index.ByteStart) 282 + } 283 + } 284 + 285 + func TestBuildFacets_RealWorldExample(t *testing.T) { 286 + // Test with the actual example from the requirements 287 + repository := "hsm-secrets-operator" 288 + tag := "latest" 289 + userHandle := "evan.jarrett.net" 290 + digest := "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" 291 + totalSize := int64(12800000) // ~12.2 MB 292 + 293 + repoWithTag := repository + ":" + tag 294 + digestShort := formatDigest(digest) 295 + sizeStr := formatSize(totalSize) 296 + 297 + text := "@" + userHandle + " just pushed " + repoWithTag + "\nDigest: " + digestShort + " Size: " + sizeStr 298 + appViewURL := "https://atcr.io/r/" + userHandle + "/" + repository 299 + 300 + facets := buildFacets(text, userHandle, repoWithTag, appViewURL) 301 + 302 + // Should have 2 facets: mention and link 303 + if len(facets) != 2 { 304 + t.Fatalf("expected 2 facets, got %d", len(facets)) 305 + } 306 + 307 + // Verify the complete post structure 308 + post := &bsky.FeedPost{ 309 + LexiconTypeID: "app.bsky.feed.post", 310 + Text: text, 311 + Facets: facets, 312 + } 313 + 314 + if post.Text == "" { 315 + t.Error("post text is empty") 316 + } 317 + 318 + if len(post.Facets) != 2 { 319 + t.Errorf("post has %d facets, want 2", len(post.Facets)) 320 + } 321 + 322 + // Verify text contains expected components 323 + expectedTexts := []string{ 324 + "@" + userHandle, 325 + repoWithTag, 326 + digestShort, 327 + sizeStr, 328 + } 329 + 330 + for _, expected := range expectedTexts { 331 + if !strings.Contains(text, expected) { 332 + t.Errorf("post text missing expected component: %q", expected) 333 + } 334 + } 335 + }
+2 -2
pkg/hold/pds/repomgr.go
··· 139 139 } 140 140 141 141 func (rm *RepoManager) lockUser(ctx context.Context, user models.Uid) func() { 142 - ctx, span := otel.Tracer("repoman").Start(ctx, "userLock") 142 + _, span := otel.Tracer("repoman").Start(ctx, "userLock") 143 143 defer span.End() 144 144 145 145 rm.lklk.Lock() ··· 1062 1062 return nil 1063 1063 }) 1064 1064 if err != nil { 1065 - return fmt.Errorf("process new repo (current rev: %s): %w:", currev, err) 1065 + return fmt.Errorf("process new repo (current rev: %s): %w", currev, err) 1066 1066 } 1067 1067 1068 1068 return nil
+2 -1
pkg/hold/pds/server.go
··· 20 20 // init registers our custom ATProto types with indigo's lexutil type registry 21 21 // This allows repomgr.GetRecord to automatically unmarshal our types 22 22 func init() { 23 - // Register captain, crew, and tangled profile record types 23 + // Register captain, crew, tangled profile, and layer record types 24 24 // These must match the $type field in the records 25 25 lexutil.RegisterType(atproto.CaptainCollection, &atproto.CaptainRecord{}) 26 26 lexutil.RegisterType(atproto.CrewCollection, &atproto.CrewRecord{}) 27 + lexutil.RegisterType(atproto.LayerCollection, &atproto.LayerRecord{}) 27 28 lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{}) 28 29 } 29 30
+1 -1
pkg/hold/pds/xrpc.go
··· 339 339 // buildProfileResponse builds a profile response map (shared by GetProfile and GetProfiles) 340 340 func (h *XRPCHandler) buildProfileResponse(ctx context.Context) map[string]any { 341 341 // Get profile record from repo 342 - _, profileVal, err := h.pds.repomgr.GetRecord( 342 + _, profileVal, _ := h.pds.repomgr.GetRecord( 343 343 ctx, 344 344 h.pds.uid, 345 345 "app.bsky.actor.profile",