···8080| `/xrpc/io.atcr.hold.requestCrew` | POST | auth | Request crew membership |
8181| `/xrpc/io.atcr.hold.exportUserData` | GET | auth | GDPR data export |
8282| `/xrpc/io.atcr.hold.getQuota` | GET | none | Get user quota info |
8383+| `/xrpc/io.atcr.hold.getLayersForManifest` | GET | none | Get layer records for a manifest AT-URI |
8484+| `/xrpc/io.atcr.hold.image.getConfig` | GET | none | Get OCI image config record for a manifest digest |
8385| `/xrpc/io.atcr.hold.listTiers` | GET | none | List hold's available tiers with quotas and features (scanOnPush) |
8486| `/xrpc/io.atcr.hold.updateCrewTier` | POST | appview token | Update crew member's tier |
8587
+1-1
pkg/appview/db/delete.go
···3636 }
37373838 // 3. Delete user (cascades to manifests, tags, stars, annotations, etc.)
3939- if err := DeleteUserData(db, did); err != nil {
3939+ if _, err := DeleteUserData(db, did); err != nil {
4040 slog.Error("Failed to delete user data", "did", did, "error", err)
4141 return fmt.Errorf("failed to delete user data: %w", err)
4242 }
+23-6
pkg/appview/db/models.go
···124124125125// PlatformInfo represents platform information (OS/Architecture)
126126type PlatformInfo struct {
127127- OS string
128128- Architecture string
129129- Variant string
130130- OSVersion string
131131- Digest string // child platform manifest digest (for manifest lists)
132132- HoldEndpoint string // hold endpoint for this platform manifest
127127+ OS string
128128+ Architecture string
129129+ Variant string
130130+ OSVersion string
131131+ Digest string // child platform manifest digest (for manifest lists)
132132+ HoldEndpoint string // hold endpoint for this platform manifest
133133+ CompressedSize int64 // sum of layer sizes (compressed)
133134}
134135135136// TagWithPlatforms extends Tag with platform information
···140141 IsMultiArch bool
141142 HasAttestations bool // true if manifest list contains attestation references
142143 ArtifactType string // container-image, helm-chart, unknown
144144+ CompressedSize int64 // sum of layer sizes for single-arch tags
143145}
144146145147// ManifestWithMetadata extends Manifest with tags and platform information
···153155 Reachable bool // Whether the hold endpoint is reachable
154156 Pending bool // Whether health check is still in progress
155157 // Note: ArtifactType is available via embedded Manifest struct
158158+}
159159+160160+// ManifestEntry is a unified view model for the tags tab.
161161+// Every entry is a manifest — labeled by tag name or digest.
162162+type ManifestEntry struct {
163163+ Label string // tag name, or digest if untagged
164164+ Digest string // manifest digest
165165+ IsTagged bool
166166+ CreatedAt time.Time
167167+ HoldEndpoint string
168168+ Platforms []PlatformInfo
169169+ IsMultiArch bool
170170+ HasAttestations bool
171171+ ArtifactType string
172172+ CompressedSize int64 // for single-arch
156173}
157174158175// AttestationDetail represents an attestation manifest and its layers
+95-71
pkg/appview/db/queries.go
···692692 return err
693693}
694694695695-// GetTagsWithPlatforms returns all tags for a repository with platform information
695695+// LatestTagInfo holds the most recent tag name and its artifact type.
696696+type LatestTagInfo struct {
697697+ Tag string
698698+ ArtifactType string
699699+}
700700+701701+// RepositoryExists checks if any manifests exist for a given repository.
702702+func RepositoryExists(db DBTX, did, repository string) (bool, error) {
703703+ var count int
704704+ err := db.QueryRow(`SELECT COUNT(*) FROM manifests WHERE did = ? AND repository = ? LIMIT 1`, did, repository).Scan(&count)
705705+ if err != nil {
706706+ return false, err
707707+ }
708708+ return count > 0, nil
709709+}
710710+711711+// GetLatestTag returns the most recently created tag and its artifact type for a repository.
712712+// Returns nil if no tags exist.
713713+func GetLatestTag(db DBTX, did, repository string) (*LatestTagInfo, error) {
714714+ var info LatestTagInfo
715715+ err := db.QueryRow(`
716716+ SELECT t.tag, COALESCE(m.artifact_type, 'container-image')
717717+ FROM tags t
718718+ JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository
719719+ WHERE t.did = ? AND t.repository = ?
720720+ ORDER BY t.created_at DESC LIMIT 1
721721+ `, did, repository).Scan(&info.Tag, &info.ArtifactType)
722722+ if err != nil {
723723+ return nil, nil // no tags is not an error
724724+ }
725725+ return &info, nil
726726+}
727727+728728+// CountTags returns the total number of tags for a repository.
729729+func CountTags(db DBTX, did, repository string) (int, error) {
730730+ var count int
731731+ err := db.QueryRow(`SELECT COUNT(*) FROM tags WHERE did = ? AND repository = ?`, did, repository).Scan(&count)
732732+ return count, err
733733+}
734734+735735+// GetTagsWithPlatforms returns tags for a repository with platform information
696736// Only multi-arch tags (manifest lists) have platform info in manifest_references
697737// Single-arch tags will have empty Platforms slice (platform is obvious for single-arch)
698738// Attestation references (unknown/unknown platforms) are filtered out but tracked via HasAttestations
699699-func GetTagsWithPlatforms(db DBTX, did, repository string) ([]TagWithPlatforms, error) {
739739+func GetTagsWithPlatforms(db DBTX, did, repository string, limit, offset int) ([]TagWithPlatforms, error) {
700740 rows, err := db.Query(`
741741+ WITH paged_tags AS (
742742+ SELECT id, did, repository, tag, digest, created_at
743743+ FROM tags
744744+ WHERE did = ? AND repository = ?
745745+ ORDER BY created_at DESC
746746+ LIMIT ? OFFSET ?
747747+ )
701748 SELECT
702749 t.id,
703750 t.did,
···714761 COALESCE(mr.platform_os_version, '') as platform_os_version,
715762 COALESCE(mr.is_attestation, 0) as is_attestation,
716763 COALESCE(mr.digest, '') as child_digest,
717717- COALESCE(child_m.hold_endpoint, m.hold_endpoint, '') as child_hold_endpoint
718718- FROM tags t
764764+ COALESCE(child_m.hold_endpoint, m.hold_endpoint, '') as child_hold_endpoint,
765765+ COALESCE((SELECT SUM(l.size) FROM layers l WHERE l.manifest_id = COALESCE(child_m.id, m.id)), 0) as compressed_size
766766+ FROM paged_tags t
719767 JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository
720768 LEFT JOIN manifest_references mr ON m.id = mr.manifest_id
721769 LEFT JOIN manifests child_m ON mr.digest = child_m.digest AND child_m.did = t.did AND child_m.repository = t.repository
722722- WHERE t.did = ? AND t.repository = ?
723770 ORDER BY t.created_at DESC, mr.reference_index
724724- `, did, repository)
771771+ `, did, repository, limit, offset)
725772726773 if err != nil {
727774 return nil, err
···738785 var platformOS, platformArch, platformVariant, platformOSVersion string
739786 var isAttestation bool
740787 var childDigest, childHoldEndpoint string
788788+ var compressedSize int64
741789742790 if err := rows.Scan(&t.ID, &t.DID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt,
743791 &mediaType, &artifactType, &holdEndpoint,
744792 &platformOS, &platformArch, &platformVariant, &platformOSVersion,
745745- &isAttestation, &childDigest, &childHoldEndpoint); err != nil {
793793+ &isAttestation, &childDigest, &childHoldEndpoint, &compressedSize); err != nil {
746794 return nil, err
747795 }
748796···750798 tagKey := t.Tag
751799 if _, exists := tagMap[tagKey]; !exists {
752800 tagMap[tagKey] = &TagWithPlatforms{
753753- Tag: t,
754754- HoldEndpoint: holdEndpoint,
755755- Platforms: []PlatformInfo{},
756756- ArtifactType: artifactType,
801801+ Tag: t,
802802+ HoldEndpoint: holdEndpoint,
803803+ Platforms: []PlatformInfo{},
804804+ ArtifactType: artifactType,
805805+ CompressedSize: compressedSize, // for single-arch (no manifest_references row)
757806 }
758807 tagOrder = append(tagOrder, tagKey)
759808 }
···768817 // Add platform info if present (only for multi-arch manifest lists)
769818 if platformOS != "" || platformArch != "" {
770819 tagMap[tagKey].Platforms = append(tagMap[tagKey].Platforms, PlatformInfo{
771771- OS: platformOS,
772772- Architecture: platformArch,
773773- Variant: platformVariant,
774774- OSVersion: platformOSVersion,
775775- Digest: childDigest,
776776- HoldEndpoint: childHoldEndpoint,
820820+ OS: platformOS,
821821+ Architecture: platformArch,
822822+ Variant: platformVariant,
823823+ OSVersion: platformOSVersion,
824824+ Digest: childDigest,
825825+ HoldEndpoint: childHoldEndpoint,
826826+ CompressedSize: compressedSize,
777827 })
778828 }
779829 }
···809859//
810860// Due to ON DELETE CASCADE in the schema, deleting from users will automatically
811861// cascade to: manifests, tags, layers, references, annotations, stars, repo_pages, etc.
812812-func DeleteUserData(db DBTX, did string) error {
862862+func DeleteUserData(db DBTX, did string) (bool, error) {
813863 result, err := db.Exec(`DELETE FROM users WHERE did = ?`, did)
814864 if err != nil {
815815- return fmt.Errorf("failed to delete user: %w", err)
865865+ return false, fmt.Errorf("failed to delete user: %w", err)
816866 }
817867818868 rowsAffected, _ := result.RowsAffected()
819819- if rowsAffected == 0 {
820820- // User didn't exist, nothing to delete
821821- return nil
822822- }
823823-824824- return nil
869869+ return rowsAffected > 0, nil
825870}
826871827872// GetManifest fetches a single manifest by digest
···10861131 if manifests[i].IsManifestList {
10871132 platformRows, err := db.Query(`
10881133 SELECT
10891089- mr.platform_os,
10901090- mr.platform_architecture,
10911091- mr.platform_variant,
10921092- mr.platform_os_version,
10931093- COALESCE(mr.is_attestation, 0) as is_attestation
11341134+ COALESCE(mr.platform_os, '') as platform_os,
11351135+ COALESCE(mr.platform_architecture, '') as platform_architecture,
11361136+ COALESCE(mr.platform_variant, '') as platform_variant,
11371137+ COALESCE(mr.platform_os_version, '') as platform_os_version,
11381138+ COALESCE(mr.is_attestation, 0) as is_attestation,
11391139+ COALESCE(mr.digest, '') as child_digest,
11401140+ COALESCE(child_m.hold_endpoint, '') as child_hold_endpoint,
11411141+ COALESCE((SELECT SUM(l.size) FROM layers l WHERE l.manifest_id = child_m.id), 0) as compressed_size
10941142 FROM manifest_references mr
11431143+ LEFT JOIN manifests child_m ON mr.digest = child_m.digest AND child_m.did = ? AND child_m.repository = ?
10951144 WHERE mr.manifest_id = ?
10961145 ORDER BY mr.reference_index
10971097- `, manifests[i].ID)
11461146+ `, manifests[i].DID, manifests[i].Repository, manifests[i].ID)
1098114710991148 if err != nil {
11001149 return nil, err
···11031152 manifests[i].Platforms = []PlatformInfo{}
11041153 for platformRows.Next() {
11051154 var p PlatformInfo
11061106- var os, arch, variant, osVersion sql.NullString
11071155 var isAttestation bool
1108115611091109- if err := platformRows.Scan(&os, &arch, &variant, &osVersion, &isAttestation); err != nil {
11571157+ if err := platformRows.Scan(&p.OS, &p.Architecture, &p.Variant, &p.OSVersion,
11581158+ &isAttestation, &p.Digest, &p.HoldEndpoint, &p.CompressedSize); err != nil {
11101159 platformRows.Close()
11111160 return nil, err
11121161 }
···11141163 // Track if manifest list has attestations
11151164 if isAttestation {
11161165 manifests[i].HasAttestations = true
11171117- // Skip attestation references in platform display
11181166 continue
11191119- }
11201120-11211121- if os.Valid {
11221122- p.OS = os.String
11231123- }
11241124- if arch.Valid {
11251125- p.Architecture = arch.String
11261126- }
11271127- if variant.Valid {
11281128- p.Variant = variant.String
11291129- }
11301130- if osVersion.Valid {
11311131- p.OSVersion = osVersion.String
11321167 }
1133116811341169 manifests[i].Platforms = append(manifests[i].Platforms, p)
···11901225 // Determine if manifest list
11911226 m.IsManifestList = strings.Contains(m.MediaType, "index") || strings.Contains(m.MediaType, "manifest.list")
1192122711931193- // If this is a manifest list, get platform details
12281228+ // If this is a manifest list, get platform details with child digests and sizes
11941229 if m.IsManifestList {
11951230 platforms, err := db.Query(`
11961231 SELECT
11971197- mr.platform_os,
11981198- mr.platform_architecture,
11991199- mr.platform_variant,
12001200- mr.platform_os_version,
12011201- COALESCE(mr.is_attestation, 0) as is_attestation
12321232+ COALESCE(mr.platform_os, '') as platform_os,
12331233+ COALESCE(mr.platform_architecture, '') as platform_architecture,
12341234+ COALESCE(mr.platform_variant, '') as platform_variant,
12351235+ COALESCE(mr.platform_os_version, '') as platform_os_version,
12361236+ COALESCE(mr.is_attestation, 0) as is_attestation,
12371237+ COALESCE(mr.digest, '') as child_digest,
12381238+ COALESCE(child_m.hold_endpoint, '') as child_hold_endpoint,
12391239+ COALESCE((SELECT SUM(l.size) FROM layers l WHERE l.manifest_id = child_m.id), 0) as compressed_size
12021240 FROM manifest_references mr
12411241+ LEFT JOIN manifests child_m ON mr.digest = child_m.digest AND child_m.did = ? AND child_m.repository = ?
12031242 WHERE mr.manifest_id = ?
12041243 ORDER BY mr.reference_index
12051205- `, m.ID)
12441244+ `, m.DID, m.Repository, m.ID)
1206124512071246 if err != nil {
12081247 return nil, err
···12121251 m.Platforms = []PlatformInfo{}
12131252 for platforms.Next() {
12141253 var p PlatformInfo
12151215- var os, arch, variant, osVersion sql.NullString
12161254 var isAttestation bool
1217125512181218- if err := platforms.Scan(&os, &arch, &variant, &osVersion, &isAttestation); err != nil {
12561256+ if err := platforms.Scan(&p.OS, &p.Architecture, &p.Variant, &p.OSVersion,
12571257+ &isAttestation, &p.Digest, &p.HoldEndpoint, &p.CompressedSize); err != nil {
12191258 return nil, err
12201259 }
1221126012221222- // Track if manifest list has attestations
12231261 if isAttestation {
12241262 m.HasAttestations = true
12251225- // Skip attestation references in platform display
12261263 continue
12271227- }
12281228-12291229- if os.Valid {
12301230- p.OS = os.String
12311231- }
12321232- if arch.Valid {
12331233- p.Architecture = arch.String
12341234- }
12351235- if variant.Valid {
12361236- p.Variant = variant.String
12371237- }
12381238- if osVersion.Valid {
12391239- p.OSVersion = osVersion.String
12401264 }
1241126512421266 m.Platforms = append(m.Platforms, p)
+6-4
pkg/appview/db/queries_test.go
···882882 t.Fatalf("Failed to insert single-arch tag: %v", err)
883883 }
884884885885- tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp")
885885+ tagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "myapp", 100, 0)
886886 if err != nil {
887887 t.Fatalf("Failed to get tags with platforms: %v", err)
888888 }
···951951 t.Fatalf("Failed to insert multi-arch tag: %v", err)
952952 }
953953954954- multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp")
954954+ multiTagsWithPlatforms, err := GetTagsWithPlatforms(db, testUser.DID, "multiapp", 100, 0)
955955 if err != nil {
956956 t.Fatalf("Failed to get multi-arch tags with platforms: %v", err)
957957 }
···12801280 }
1281128112821282 // Delete user data
12831283- if err := DeleteUserData(db, testUser.DID); err != nil {
12831283+ if _, err := DeleteUserData(db, testUser.DID); err != nil {
12841284 t.Fatalf("Failed to delete user data: %v", err)
12851285 }
12861286···13031303 }
1304130413051305 // Test idempotency - deleting non-existent user should not error
13061306- if err := DeleteUserData(db, testUser.DID); err != nil {
13061306+ if deleted, err := DeleteUserData(db, testUser.DID); err != nil {
13071307 t.Errorf("Deleting non-existent user should not error, got: %v", err)
13081308+ } else if deleted {
13091309+ t.Errorf("Deleting non-existent user should return false, got true")
13081310 }
13091311}
13101312
+265-142
pkg/appview/handlers/repository.go
···66 "log/slog"
77 "net/http"
88 "net/url"
99+ "strconv"
910 "strings"
1011 "sync"
1112 "time"
···5051 owner.Handle = resolvedHandle
5152 }
52535353- // Fetch tags with platform information
5454- tagsWithPlatforms, err := db.GetTagsWithPlatforms(h.ReadOnlyDB, owner.DID, repository)
5454+ // Check if repository exists
5555+ exists, err := db.RepositoryExists(h.ReadOnlyDB, owner.DID, repository)
5556 if err != nil {
5657 http.Error(w, err.Error(), http.StatusInternalServerError)
5758 return
5859 }
6060+ if !exists {
6161+ RenderNotFound(w, r, &h.BaseUIHandler)
6262+ return
6363+ }
59646060- // Fetch top-level manifests (filters out platform-specific manifests)
6161- manifests, err := db.GetTopLevelManifests(h.ReadOnlyDB, owner.DID, repository, 50, 0)
6565+ // Fetch latest tag for pull command
6666+ latestTag, err := db.GetLatestTag(h.ReadOnlyDB, owner.DID, repository)
6267 if err != nil {
6368 http.Error(w, err.Error(), http.StatusInternalServerError)
6469 return
6570 }
66716767- // Check health status for each manifest's hold endpoint (concurrent with 1s timeout)
6868- if h.HealthChecker != nil {
6969- // Create context with 1 second deadline for fast-fail
7070- ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second)
7171- defer cancel()
7272-7373- var wg sync.WaitGroup
7474- var mu sync.Mutex
7575-7676- for i := range manifests {
7777- if manifests[i].HoldEndpoint == "" {
7878- // No hold endpoint, mark as unreachable
7979- manifests[i].Reachable = false
8080- manifests[i].Pending = false
8181- continue
8282- }
8383-8484- wg.Go(func() {
8585- endpoint := manifests[i].HoldEndpoint
8686-8787- // Try to get cached status first (instant)
8888- if cached := h.HealthChecker.GetCachedStatus(endpoint); cached != nil {
8989- mu.Lock()
9090- manifests[i].Reachable = cached.Reachable
9191- manifests[i].Pending = false
9292- mu.Unlock()
9393- return
9494- }
9595-9696- // Perform health check with timeout context
9797- reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
9898-9999- mu.Lock()
100100- if ctx.Err() == context.DeadlineExceeded {
101101- // Timeout - mark as pending for HTMX polling
102102- manifests[i].Reachable = false
103103- manifests[i].Pending = true
104104- } else if err != nil {
105105- // Error - mark as unreachable
106106- manifests[i].Reachable = false
107107- manifests[i].Pending = false
108108- } else {
109109- // Success
110110- manifests[i].Reachable = reachable
111111- manifests[i].Pending = false
112112- }
113113- mu.Unlock()
114114- })
115115- }
116116-117117- // Wait for all checks to complete or timeout
118118- wg.Wait()
119119- } else {
120120- // If no health checker, assume all are reachable (backward compatibility)
121121- for i := range manifests {
122122- manifests[i].Reachable = true
123123- manifests[i].Pending = false
124124- }
125125- }
126126-127127- if len(tagsWithPlatforms) == 0 && len(manifests) == 0 {
128128- RenderNotFound(w, r, &h.BaseUIHandler)
129129- return
7272+ // Determine artifact type from latest tag
7373+ artifactType := "container-image"
7474+ latestTagName := ""
7575+ if latestTag != nil {
7676+ latestTagName = latestTag.Tag
7777+ artifactType = latestTag.ArtifactType
13078 }
1317913280 // Create repository summary
13381 repo := &db.Repository{
134134- Name: repository,
135135- TagCount: len(tagsWithPlatforms),
136136- ManifestCount: len(manifests),
8282+ Name: repository,
13783 }
1388413985 // Fetch repository metadata from annotations table
14086 metadata, err := db.GetRepositoryMetadata(h.ReadOnlyDB, owner.DID, repository)
14187 if err != nil {
14288 slog.Warn("Failed to fetch repository metadata", "error", err)
143143- // Continue without metadata on error
14489 } else {
14590 repo.Title = metadata["org.opencontainers.image.title"]
14691 repo.Description = metadata["org.opencontainers.image.description"]
···156101 stats, err := db.GetRepositoryStats(h.ReadOnlyDB, owner.DID, repository)
157102 if err != nil {
158103 slog.Warn("Failed to fetch repository stats", "error", err)
159159- // Continue with zero stats on error
160104 stats = &db.RepositoryStats{StarCount: 0}
161105 }
162106···164108 isStarred := false
165109 user := middleware.GetUser(r)
166110 if user != nil && h.Refresher != nil && h.Directory != nil {
167167- // Create ATProto client with session provider (uses DoWithSession for DPoP nonce safety)
168111 pdsClient := atproto.NewClientWithSessionProvider(user.PDSEndpoint, user.DID, h.Refresher)
169169-170170- // Check if star record exists
171112 rkey := atproto.StarRecordKey(owner.DID, repository)
172113 _, err := pdsClient.GetRecord(r.Context(), atproto.StarCollection, rkey)
173114 isStarred = (err == nil)
···182123 // Fetch README content from repo page record or annotations
183124 var readmeHTML template.HTML
184125185185- // Try repo page record from database (synced from PDS via Jetstream)
186126 repoPage, err := db.GetRepoPage(h.ReadOnlyDB, owner.DID, repository)
187127 if err == nil && repoPage != nil {
188188- // Use repo page avatar if present
189128 if repoPage.AvatarCID != "" {
190129 repo.IconURL = atproto.BlobCDNURL(owner.DID, repoPage.AvatarCID)
191130 }
192192- // Render description as markdown if present
193131 if repoPage.Description != "" && h.ReadmeFetcher != nil {
194132 html, err := h.ReadmeFetcher.RenderMarkdown([]byte(repoPage.Description))
195133 if err != nil {
···199137 }
200138 }
201139 }
202202- // Fall back to fetching README from URL annotations if no description in repo page
203140 if readmeHTML == "" && h.ReadmeFetcher != nil {
204204- // Fall back to fetching from URL annotations
205141 readmeURL := repo.ReadmeURL
206142 if readmeURL == "" && repo.SourceURL != "" {
207207- // Try to derive README URL from source URL
208143 readmeURL = readme.DeriveReadmeURL(repo.SourceURL, "main")
209144 if readmeURL == "" {
210145 readmeURL = readme.DeriveReadmeURL(repo.SourceURL, "master")
···220155 }
221156 }
222157223223- // Determine artifact type for header section from first tag
224224- // This is used for the "Pull this image/chart" header command
225225- artifactType := "container-image"
226226- if len(tagsWithPlatforms) > 0 {
227227- artifactType = tagsWithPlatforms[0].ArtifactType
228228- } else if len(manifests) > 0 {
229229- // Fallback to manifests if no tags
230230- artifactType = manifests[0].ArtifactType
231231- }
232232-233233- // Collect digests for batch scan-result requests, grouped by hold endpoint
234234- holdDigests := make(map[string][]string) // holdEndpoint → []hexDigest
235235- seen := make(map[string]bool) // dedup digests
236236- for _, t := range tagsWithPlatforms {
237237- if len(t.Platforms) > 0 {
238238- // Multi-arch: collect each platform's child digest
239239- for _, p := range t.Platforms {
240240- if p.Digest != "" && p.HoldEndpoint != "" && !seen[p.Digest] {
241241- seen[p.Digest] = true
242242- hex := strings.TrimPrefix(p.Digest, "sha256:")
243243- holdDigests[p.HoldEndpoint] = append(holdDigests[p.HoldEndpoint], hex)
244244- }
245245- }
246246- } else if t.HoldEndpoint != "" {
247247- // Single-arch: use tag's own digest
248248- if !seen[t.Digest] {
249249- seen[t.Digest] = true
250250- hex := strings.TrimPrefix(t.Digest, "sha256:")
251251- holdDigests[t.HoldEndpoint] = append(holdDigests[t.HoldEndpoint], hex)
252252- }
253253- }
254254- }
255255- var scanBatchParams []template.HTML
256256- for hold, digests := range holdDigests {
257257- scanBatchParams = append(scanBatchParams, template.HTML(
258258- "holdEndpoint="+url.QueryEscape(hold)+"&digests="+strings.Join(digests, ",")))
259259- }
260260-261158 // Build page meta
262159 title := owner.Handle + "/" + repository + " - " + h.ClientShortName
263160 if repo.Title != "" {
···284181285182 data := struct {
286183 PageData
287287- Meta *PageMeta
288288- Owner *db.User // Repository owner
289289- Repository *db.Repository // Repository summary
290290- Tags []db.TagWithPlatforms // Tags with platform info
291291- Manifests []db.ManifestWithMetadata // Top-level manifests only
292292- StarCount int
293293- PullCount int
294294- IsStarred bool
295295- IsOwner bool // Whether current user owns this repository
296296- ReadmeHTML template.HTML
297297- ArtifactType string // Dominant artifact type: container-image, helm-chart, unknown
298298- ScanBatchParams []template.HTML // Pre-encoded query strings for batch scan-result endpoint (one per hold)
184184+ Meta *PageMeta
185185+ Owner *db.User
186186+ Repository *db.Repository
187187+ LatestTag string
188188+ StarCount int
189189+ PullCount int
190190+ IsStarred bool
191191+ IsOwner bool
192192+ ReadmeHTML template.HTML
193193+ ArtifactType string
194194+ }{
195195+ PageData: NewPageData(r, &h.BaseUIHandler),
196196+ Meta: meta,
197197+ Owner: owner,
198198+ Repository: repo,
199199+ LatestTag: latestTagName,
200200+ StarCount: stats.StarCount,
201201+ PullCount: stats.PullCount,
202202+ IsStarred: isStarred,
203203+ IsOwner: isOwner,
204204+ ReadmeHTML: readmeHTML,
205205+ ArtifactType: artifactType,
206206+ }
207207+208208+ if err := h.Templates.ExecuteTemplate(w, "repository", data); err != nil {
209209+ http.Error(w, err.Error(), http.StatusInternalServerError)
210210+ return
211211+ }
212212+}
213213+214214+// RepositoryTagsHandler returns the tags+manifests HTMX partial for a repository
215215+type RepositoryTagsHandler struct {
216216+ BaseUIHandler
217217+}
218218+219219+func (h *RepositoryTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
220220+ identifier := chi.URLParam(r, "handle")
221221+ repository := strings.TrimPrefix(chi.URLParam(r, "*"), "/")
222222+223223+ did, _, _, err := atproto.ResolveIdentity(r.Context(), identifier)
224224+ if err != nil {
225225+ http.Error(w, "Not found", http.StatusNotFound)
226226+ return
227227+ }
228228+229229+ owner, err := db.GetUserByDID(h.ReadOnlyDB, did)
230230+ if err != nil || owner == nil {
231231+ http.Error(w, "Not found", http.StatusNotFound)
232232+ return
233233+ }
234234+235235+ // Parse pagination
236236+ const pageSize = 50
237237+ offset := 0
238238+ if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" {
239239+ if parsed, err := strconv.Atoi(offsetStr); err == nil && parsed > 0 {
240240+ offset = parsed
241241+ }
242242+ }
243243+244244+ // Count total tags for pagination
245245+ totalTags, err := db.CountTags(h.ReadOnlyDB, owner.DID, repository)
246246+ if err != nil {
247247+ http.Error(w, err.Error(), http.StatusInternalServerError)
248248+ return
249249+ }
250250+251251+ // Fetch tags with platform information and compressed sizes
252252+ tagsWithPlatforms, err := db.GetTagsWithPlatforms(h.ReadOnlyDB, owner.DID, repository, pageSize, offset)
253253+ if err != nil {
254254+ http.Error(w, err.Error(), http.StatusInternalServerError)
255255+ return
256256+ }
257257+258258+ // Fetch untagged manifests only on first page
259259+ var manifests []db.ManifestWithMetadata
260260+ if offset == 0 {
261261+ manifests, err = db.GetTopLevelManifests(h.ReadOnlyDB, owner.DID, repository, 50, 0)
262262+ if err != nil {
263263+ http.Error(w, err.Error(), http.StatusInternalServerError)
264264+ return
265265+ }
266266+ }
267267+268268+ // Check health status for each manifest's hold endpoint
269269+ if h.HealthChecker != nil {
270270+ ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second)
271271+ defer cancel()
272272+273273+ var wg sync.WaitGroup
274274+ var mu sync.Mutex
275275+276276+ for i := range manifests {
277277+ if manifests[i].HoldEndpoint == "" {
278278+ manifests[i].Reachable = false
279279+ manifests[i].Pending = false
280280+ continue
281281+ }
282282+283283+ wg.Go(func() {
284284+ endpoint := manifests[i].HoldEndpoint
285285+286286+ if cached := h.HealthChecker.GetCachedStatus(endpoint); cached != nil {
287287+ mu.Lock()
288288+ manifests[i].Reachable = cached.Reachable
289289+ manifests[i].Pending = false
290290+ mu.Unlock()
291291+ return
292292+ }
293293+294294+ reachable, err := h.HealthChecker.CheckHealth(ctx, endpoint)
295295+296296+ mu.Lock()
297297+ if ctx.Err() == context.DeadlineExceeded {
298298+ manifests[i].Reachable = false
299299+ manifests[i].Pending = true
300300+ } else if err != nil {
301301+ manifests[i].Reachable = false
302302+ manifests[i].Pending = false
303303+ } else {
304304+ manifests[i].Reachable = reachable
305305+ manifests[i].Pending = false
306306+ }
307307+ mu.Unlock()
308308+ })
309309+ }
310310+311311+ wg.Wait()
312312+ } else {
313313+ for i := range manifests {
314314+ manifests[i].Reachable = true
315315+ manifests[i].Pending = false
316316+ }
317317+ }
318318+319319+ // Check if current user is the repository owner
320320+ isOwner := false
321321+ user := middleware.GetUser(r)
322322+ if user != nil {
323323+ isOwner = (user.DID == owner.DID)
324324+ }
325325+326326+ // Build unified entries list: tagged first, then untagged.
327327+ // Single-arch entries get a one-element Platforms slice so the template
328328+ // can always just range over .Platforms without branching.
329329+ var entries []db.ManifestEntry
330330+ for _, t := range tagsWithPlatforms {
331331+ platforms := t.Platforms
332332+ if len(platforms) == 0 {
333333+ platforms = []db.PlatformInfo{{
334334+ Digest: t.Digest,
335335+ HoldEndpoint: t.HoldEndpoint,
336336+ CompressedSize: t.CompressedSize,
337337+ }}
338338+ }
339339+ entries = append(entries, db.ManifestEntry{
340340+ Label: t.Tag.Tag,
341341+ Digest: t.Digest,
342342+ IsTagged: true,
343343+ CreatedAt: t.CreatedAt,
344344+ HoldEndpoint: t.HoldEndpoint,
345345+ Platforms: platforms,
346346+ IsMultiArch: t.IsMultiArch,
347347+ HasAttestations: t.HasAttestations,
348348+ ArtifactType: t.ArtifactType,
349349+ })
350350+ }
351351+ for _, m := range manifests {
352352+ if len(m.Tags) > 0 {
353353+ continue
354354+ }
355355+ platforms := m.Platforms
356356+ if len(platforms) == 0 {
357357+ platforms = []db.PlatformInfo{{
358358+ Digest: m.Digest,
359359+ HoldEndpoint: m.HoldEndpoint,
360360+ }}
361361+ }
362362+ entries = append(entries, db.ManifestEntry{
363363+ Label: m.Digest,
364364+ Digest: m.Digest,
365365+ IsTagged: false,
366366+ CreatedAt: m.CreatedAt,
367367+ HoldEndpoint: m.HoldEndpoint,
368368+ Platforms: platforms,
369369+ IsMultiArch: m.IsManifestList,
370370+ HasAttestations: m.HasAttestations,
371371+ ArtifactType: m.ArtifactType,
372372+ })
373373+ }
374374+375375+ // Collect digests for batch scan-result requests
376376+ holdDigests := make(map[string][]string)
377377+ seen := make(map[string]bool)
378378+ for _, e := range entries {
379379+ if len(e.Platforms) > 0 {
380380+ for _, p := range e.Platforms {
381381+ if p.Digest != "" && p.HoldEndpoint != "" && !seen[p.Digest] {
382382+ seen[p.Digest] = true
383383+ hex := strings.TrimPrefix(p.Digest, "sha256:")
384384+ holdDigests[p.HoldEndpoint] = append(holdDigests[p.HoldEndpoint], hex)
385385+ }
386386+ }
387387+ } else if e.HoldEndpoint != "" {
388388+ if !seen[e.Digest] {
389389+ seen[e.Digest] = true
390390+ hex := strings.TrimPrefix(e.Digest, "sha256:")
391391+ holdDigests[e.HoldEndpoint] = append(holdDigests[e.HoldEndpoint], hex)
392392+ }
393393+ }
394394+ }
395395+ var scanBatchParams []template.HTML
396396+ for hold, digests := range holdDigests {
397397+ // Chunk into batches of 50 to match the batch handler's limit
398398+ for i := 0; i < len(digests); i += 50 {
399399+ end := i + 50
400400+ if end > len(digests) {
401401+ end = len(digests)
402402+ }
403403+ scanBatchParams = append(scanBatchParams, template.HTML(
404404+ "holdEndpoint="+url.QueryEscape(hold)+"&digests="+strings.Join(digests[i:end], ",")))
405405+ }
406406+ }
407407+408408+ hasMore := offset+pageSize < totalTags
409409+ isFirstPage := offset == 0
410410+411411+ data := struct {
412412+ Owner *db.User
413413+ Repository *db.Repository
414414+ Entries []db.ManifestEntry
415415+ IsOwner bool
416416+ ScanBatchParams []template.HTML
417417+ RegistryURL string
418418+ HasMore bool
419419+ NextOffset int
420420+ IsFirstPage bool
299421 }{
300300- PageData: NewPageData(r, &h.BaseUIHandler),
301301- Meta: meta,
302422 Owner: owner,
303303- Repository: repo,
304304- Tags: tagsWithPlatforms,
305305- Manifests: manifests,
306306- StarCount: stats.StarCount,
307307- PullCount: stats.PullCount,
308308- IsStarred: isStarred,
423423+ Repository: &db.Repository{Name: repository},
424424+ Entries: entries,
309425 IsOwner: isOwner,
310310- ReadmeHTML: readmeHTML,
311311- ArtifactType: artifactType,
312426 ScanBatchParams: scanBatchParams,
427427+ RegistryURL: h.RegistryURL,
428428+ HasMore: hasMore,
429429+ NextOffset: offset + pageSize,
430430+ IsFirstPage: isFirstPage,
313431 }
314432315315- if err := h.Templates.ExecuteTemplate(w, "repository", data); err != nil {
433433+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
434434+ templateName := "repo-tags"
435435+ if !isFirstPage {
436436+ templateName = "repo-tags-page"
437437+ }
438438+ if err := h.Templates.ExecuteTemplate(w, templateName, data); err != nil {
316439 http.Error(w, err.Error(), http.StatusInternalServerError)
317440 return
318441 }
+37-57
pkg/appview/handlers/scan_result.go
···4949 return
5050 }
51515252- // Resolve hold identity: holdEndpoint may be a DID or URL
5353- holdDID, err := atproto.ResolveHoldDID(r.Context(), holdEndpoint)
5454- if err != nil {
5555- slog.Debug("Failed to resolve hold DID", "holdEndpoint", holdEndpoint, "error", err)
5656- h.renderBadge(w, vulnBadgeData{Error: true})
5757- return
5858- }
5959-6060- // Check if this hold has a successor — scan records may live there instead
6161- resolvedHoldDID := resolveHoldSuccessor(h.ReadOnlyDB, holdDID)
6262-6363- // Resolve to HTTP endpoint URL. If successor redirected, resolve the new DID;
6464- // otherwise use the original holdEndpoint (which may already be a URL).
6565- holdURLTarget := holdEndpoint
6666- if resolvedHoldDID != holdDID {
6767- holdDID = resolvedHoldDID
6868- holdURLTarget = resolvedHoldDID
6969- }
7070- holdURL, err := atproto.ResolveHoldURL(r.Context(), holdURLTarget)
5252+ hold, err := ResolveHold(r.Context(), h.ReadOnlyDB, holdEndpoint)
7153 if err != nil {
7272- slog.Debug("Failed to resolve hold URL", "holdEndpoint", holdEndpoint, "error", err)
5454+ slog.Debug("Failed to resolve hold", "holdEndpoint", holdEndpoint, "error", err)
7355 h.renderBadge(w, vulnBadgeData{Error: true})
7456 return
7557 }
5858+ holdDID := hold.DID
5959+ holdURL := hold.URL
76607761 // Compute rkey from digest (strip sha256: prefix)
7862 rkey := strings.TrimPrefix(digest, "sha256:")
···226210 digests = digests[:50]
227211 }
228212229229- holdDID, err := atproto.ResolveHoldDID(r.Context(), holdEndpoint)
213213+ hold, err := ResolveHold(r.Context(), h.ReadOnlyDB, holdEndpoint)
230214 if err != nil {
231231- // Can't resolve hold — render empty OOB spans
232232- slog.Debug("Failed to resolve hold DID for batch scan", "holdEndpoint", holdEndpoint, "error", err)
215215+ slog.Debug("Failed to resolve hold for batch scan", "holdEndpoint", holdEndpoint, "error", err)
233216 w.Header().Set("Content-Type", "text/html")
234217 for _, d := range digests {
235218 fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML"></span>`, template.HTMLEscapeString(d))
236219 }
237220 return
238221 }
239239-240240- // Check if this hold has a successor — scan records may live there instead
241241- resolvedHoldDID := resolveHoldSuccessor(h.ReadOnlyDB, holdDID)
242242-243243- // Resolve to HTTP endpoint URL. If successor redirected, resolve the new DID;
244244- // otherwise use the original holdEndpoint (which may already be a URL).
245245- holdURLTarget := holdEndpoint
246246- if resolvedHoldDID != holdDID {
247247- holdDID = resolvedHoldDID
248248- holdURLTarget = resolvedHoldDID
249249- }
250250- holdURL, err := atproto.ResolveHoldURL(r.Context(), holdURLTarget)
251251- if err != nil {
252252- slog.Debug("Failed to resolve hold URL for batch scan", "holdEndpoint", holdEndpoint, "error", err)
253253- w.Header().Set("Content-Type", "text/html")
254254- for _, d := range digests {
255255- fmt.Fprintf(w, `<span id="scan-badge-%s" hx-swap-oob="outerHTML"></span>`, template.HTMLEscapeString(d))
256256- }
257257- return
258258- }
222222+ holdDID := hold.DID
223223+ holdURL := hold.URL
259224260225 // Fetch scan records concurrently with a semaphore to limit parallelism
261226 type result struct {
···294259 }
295260}
296261297297-// resolveHoldSuccessor checks if a hold has a successor in the cached captain records.
298298-// Returns the successor DID if set, otherwise returns the original holdDID.
299299-// Single-hop only — does not follow chains.
300300-func resolveHoldSuccessor(database *sql.DB, holdDID string) string {
301301- if database == nil {
302302- return holdDID
262262+// ResolvedHold contains the resolved DID and URL for a hold endpoint,
263263+// after following any successor chain.
264264+type ResolvedHold struct {
265265+ DID string
266266+ URL string
267267+}
268268+269269+// ResolveHold resolves a hold endpoint (DID, URL, or hostname) to its final
270270+// DID and URL, following a single successor hop if one exists in the captain records.
271271+func ResolveHold(ctx context.Context, database *sql.DB, holdEndpoint string) (*ResolvedHold, error) {
272272+ holdDID, err := atproto.ResolveHoldDID(ctx, holdEndpoint)
273273+ if err != nil {
274274+ return nil, fmt.Errorf("resolve hold DID: %w", err)
303275 }
304304- captain, err := db.GetCaptainRecord(database, holdDID)
305305- if err != nil || captain == nil {
306306- return holdDID
276276+277277+ // Check for successor
278278+ resolveTarget := holdEndpoint
279279+ if database != nil {
280280+ captain, err := db.GetCaptainRecord(database, holdDID)
281281+ if err == nil && captain != nil && captain.Successor != "" {
282282+ slog.Debug("Following hold successor", "from", holdDID, "to", captain.Successor)
283283+ holdDID = captain.Successor
284284+ resolveTarget = captain.Successor
285285+ }
307286 }
308308- if captain.Successor != "" {
309309- slog.Debug("Scan result: following hold successor",
310310- "from", holdDID, "to", captain.Successor)
311311- return captain.Successor
287287+288288+ holdURL, err := atproto.ResolveHoldURL(ctx, resolveTarget)
289289+ if err != nil {
290290+ return nil, fmt.Errorf("resolve hold URL: %w", err)
312291 }
313313- return holdDID
292292+293293+ return &ResolvedHold{DID: holdDID, URL: holdURL}, nil
314294}
+65-6
pkg/appview/handlers/scan_result_test.go
···110110 if !strings.Contains(body, `data-tip="Low">3<`) {
111111 t.Error("Expected low count of 3")
112112 }
113113- // Should be clickable (has openVulnDetails)
114114- if !strings.Contains(body, "openVulnDetails") {
115115- t.Error("Expected body to contain openVulnDetails click handler")
113113+ // Should show vulnerability strip with tooltip
114114+ if !strings.Contains(body, "vuln-strip") {
115115+ t.Error("Expected body to contain vuln-strip class")
116116 }
117117}
118118···141141 if !strings.Contains(body, "badge-success") {
142142 t.Error("Expected body to contain badge-success for clean scan")
143143 }
144144- // Should NOT be clickable
145145- if strings.Contains(body, "openVulnDetails") {
146146- t.Error("Clean badge should not have openVulnDetails click handler")
144144+ // Clean badge should not have vuln-strip
145145+ if strings.Contains(body, "vuln-strip") {
146146+ t.Error("Clean badge should not have vuln-strip")
147147 }
148148}
149149···412412 // Should NOT contain vulnerability badges
413413 if strings.Contains(body, "badge-error") || strings.Contains(body, "Clean") {
414414 t.Error("Unreachable hold should not render badge content")
415415+ }
416416+}
417417+418418+// --- ResolveHold tests ---
419419+420420+func TestResolveHold_DirectURL(t *testing.T) {
421421+ // Mock hold that serves DID resolution
422422+ hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
423423+ if r.URL.Path == "/.well-known/atproto-did" {
424424+ w.Write([]byte("did:web:hold.example.com"))
425425+ return
426426+ }
427427+ http.Error(w, "not found", http.StatusNotFound)
428428+ }))
429429+ defer hold.Close()
430430+431431+ resolved, err := handlers.ResolveHold(t.Context(), nil, hold.URL)
432432+ if err != nil {
433433+ t.Fatalf("ResolveHold failed: %v", err)
434434+ }
435435+ if resolved.DID != "did:web:hold.example.com" {
436436+ t.Errorf("DID = %q, want %q", resolved.DID, "did:web:hold.example.com")
437437+ }
438438+ if resolved.URL != hold.URL {
439439+ t.Errorf("URL = %q, want %q", resolved.URL, hold.URL)
440440+ }
441441+}
442442+443443+func TestResolveHold_NilDB_NoSuccessor(t *testing.T) {
444444+ // With nil DB, successor check is skipped
445445+ hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
446446+ if r.URL.Path == "/.well-known/atproto-did" {
447447+ w.Write([]byte("did:web:hold.example.com"))
448448+ return
449449+ }
450450+ http.Error(w, "not found", http.StatusNotFound)
451451+ }))
452452+ defer hold.Close()
453453+454454+ resolved, err := handlers.ResolveHold(t.Context(), nil, hold.URL)
455455+ if err != nil {
456456+ t.Fatalf("ResolveHold failed: %v", err)
457457+ }
458458+ // Should resolve to the original hold since no DB to check successor
459459+ if resolved.DID != "did:web:hold.example.com" {
460460+ t.Errorf("DID = %q, want %q", resolved.DID, "did:web:hold.example.com")
461461+ }
462462+ if resolved.URL != hold.URL {
463463+ t.Errorf("URL = %q, want %q", resolved.URL, hold.URL)
464464+ }
465465+}
466466+467467+func TestResolveHold_Unreachable(t *testing.T) {
468468+ hold := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
469469+ hold.Close()
470470+471471+ _, err := handlers.ResolveHold(t.Context(), nil, hold.URL)
472472+ if err == nil {
473473+ t.Error("Expected error for unreachable hold")
415474 }
416475}
417476
+143-9
pkg/appview/handlers/vuln_details.go
···9797 return
9898 }
9999100100- holdDID, err := atproto.ResolveHoldDID(r.Context(), holdEndpoint)
100100+ hold, err := ResolveHold(r.Context(), h.ReadOnlyDB, holdEndpoint)
101101 if err != nil {
102102- slog.Debug("Failed to resolve hold DID", "holdEndpoint", holdEndpoint, "error", err)
103103- h.renderDetails(w, vulnDetailsData{Error: "Could not resolve hold identity"})
104104- return
105105- }
106106-107107- // Resolve to HTTP endpoint URL (handles DID, URL, or hostname)
108108- holdURL, err := atproto.ResolveHoldURL(r.Context(), holdEndpoint)
109109- if err != nil {
102102+ slog.Debug("Failed to resolve hold", "holdEndpoint", holdEndpoint, "error", err)
110103 h.renderDetails(w, vulnDetailsData{Error: "Could not resolve hold endpoint"})
111104 return
112105 }
106106+ holdDID := hold.DID
107107+ holdURL := hold.URL
113108114109 rkey := strings.TrimPrefix(digest, "sha256:")
115110···255250 slog.Warn("Failed to render vuln details", "error", err)
256251 }
257252}
253253+254254+// FetchVulnDetails fetches vulnerability scan details for a digest from a hold.
255255+// This is the shared logic used by both VulnDetailsHandler and DigestDetailHandler.
256256+// holdEndpoint should already be resolved (successor-aware) before calling this.
257257+func FetchVulnDetails(ctx context.Context, holdEndpoint, digest string) vulnDetailsData {
258258+ holdDID, err := atproto.ResolveHoldDID(ctx, holdEndpoint)
259259+ if err != nil {
260260+ return vulnDetailsData{Error: "Could not resolve hold identity"}
261261+ }
262262+263263+ holdURL, err := atproto.ResolveHoldURL(ctx, holdEndpoint)
264264+ if err != nil {
265265+ return vulnDetailsData{Error: "Could not resolve hold endpoint"}
266266+ }
267267+268268+ rkey := strings.TrimPrefix(digest, "sha256:")
269269+270270+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
271271+ defer cancel()
272272+273273+ // Fetch the scan record
274274+ scanURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
275275+ holdURL,
276276+ url.QueryEscape(holdDID),
277277+ url.QueryEscape(atproto.ScanCollection),
278278+ url.QueryEscape(rkey),
279279+ )
280280+281281+ req, err := http.NewRequestWithContext(ctx, "GET", scanURL, nil)
282282+ if err != nil {
283283+ return vulnDetailsData{Error: "Failed to build request"}
284284+ }
285285+286286+ resp, err := http.DefaultClient.Do(req)
287287+ if err != nil {
288288+ return vulnDetailsData{Error: "Hold service unreachable"}
289289+ }
290290+ defer resp.Body.Close()
291291+292292+ if resp.StatusCode != http.StatusOK {
293293+ return vulnDetailsData{Error: "No scan record found"}
294294+ }
295295+296296+ var envelope struct {
297297+ Value json.RawMessage `json:"value"`
298298+ }
299299+ if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
300300+ return vulnDetailsData{Error: "Failed to parse scan record"}
301301+ }
302302+303303+ var scanRecord atproto.ScanRecord
304304+ if err := json.Unmarshal(envelope.Value, &scanRecord); err != nil {
305305+ return vulnDetailsData{Error: "Failed to parse scan record"}
306306+ }
307307+308308+ summary := vulnSummary{
309309+ Critical: scanRecord.Critical,
310310+ High: scanRecord.High,
311311+ Medium: scanRecord.Medium,
312312+ Low: scanRecord.Low,
313313+ Total: scanRecord.Total,
314314+ }
315315+316316+ // Fetch the vulnerability report blob
317317+ if scanRecord.VulnReportBlob == nil || scanRecord.VulnReportBlob.Ref.String() == "" {
318318+ return vulnDetailsData{
319319+ Summary: summary,
320320+ ScannedAt: scanRecord.ScannedAt,
321321+ Error: "No detailed vulnerability report available. Only summary counts were recorded.",
322322+ }
323323+ }
324324+325325+ blobCID := scanRecord.VulnReportBlob.Ref.String()
326326+ blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
327327+ holdURL,
328328+ url.QueryEscape(holdDID),
329329+ url.QueryEscape(blobCID),
330330+ )
331331+332332+ blobReq, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil)
333333+ if err != nil {
334334+ return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to build blob request"}
335335+ }
336336+337337+ blobResp, err := http.DefaultClient.Do(blobReq)
338338+ if err != nil {
339339+ return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to fetch vulnerability report"}
340340+ }
341341+ defer blobResp.Body.Close()
342342+343343+ if blobResp.StatusCode != http.StatusOK {
344344+ return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Vulnerability report not accessible"}
345345+ }
346346+347347+ var report grypeReport
348348+ if err := json.NewDecoder(blobResp.Body).Decode(&report); err != nil {
349349+ return vulnDetailsData{Summary: summary, ScannedAt: scanRecord.ScannedAt, Error: "Failed to parse vulnerability report"}
350350+ }
351351+352352+ matches := make([]vulnMatch, 0, len(report.Matches))
353353+ for _, m := range report.Matches {
354354+ fixedIn := ""
355355+ if len(m.Vulnerability.Fix.Versions) > 0 {
356356+ fixedIn = strings.Join(m.Vulnerability.Fix.Versions, ", ")
357357+ }
358358+359359+ cveURL := ""
360360+ if strings.HasPrefix(m.Vulnerability.ID, "CVE-") {
361361+ cveURL = "https://nvd.nist.gov/vuln/detail/" + m.Vulnerability.ID
362362+ } else if strings.HasPrefix(m.Vulnerability.ID, "GHSA-") {
363363+ cveURL = "https://github.com/advisories/" + m.Vulnerability.ID
364364+ }
365365+366366+ matches = append(matches, vulnMatch{
367367+ CVEID: m.Vulnerability.ID,
368368+ CVEURL: cveURL,
369369+ Severity: m.Vulnerability.Metadata.Severity,
370370+ Package: m.Package.Name,
371371+ Version: m.Package.Version,
372372+ FixedIn: fixedIn,
373373+ Type: m.Package.Type,
374374+ })
375375+ }
376376+377377+ sort.Slice(matches, func(i, j int) bool {
378378+ oi := severityOrder[matches[i].Severity]
379379+ oj := severityOrder[matches[j].Severity]
380380+ if oi != oj {
381381+ return oi < oj
382382+ }
383383+ return matches[i].CVEID < matches[j].CVEID
384384+ })
385385+386386+ return vulnDetailsData{
387387+ Matches: matches,
388388+ Summary: summary,
389389+ ScannedAt: scanRecord.ScannedAt,
390390+ }
391391+}
+3-3
pkg/appview/handlers/vuln_details_test.go
···222222 t.Error("Expected body to contain fix version '1.2.4'")
223223 }
224224225225- // Should contain "No fix" for unfixed vuln
226226- if !strings.Contains(body, "No fix") {
227227- t.Error("Expected body to contain 'No fix' for unfixed vulnerability")
225225+ // Should contain "-" placeholder for unfixed vuln
226226+ if !strings.Contains(body, `opacity-40`) {
227227+ t.Error("Expected body to contain opacity-40 placeholder for unfixed vulnerability")
228228 }
229229230230 // Should contain a table
+107-30
pkg/appview/jetstream/backfill.go
···151151 strings.Contains(errStr, "Could not find repo") ||
152152 strings.Contains(errStr, "status 400") ||
153153 strings.Contains(errStr, "status 404") {
154154- if delErr := db.DeleteUserData(b.db, repo.DID); delErr != nil {
154154+ deleted, delErr := db.DeleteUserData(b.db, repo.DID)
155155+ if delErr != nil {
155156 slog.Warn("Backfill failed to delete data for removed repo", "did", repo.DID, "error", delErr)
156156- } else {
157157+ } else if deleted {
157158 slog.Info("Backfill cleaned up data for deleted/deactivated repo", "did", repo.DID)
158159 }
159160 } else {
···183184}
184185185186// backfillRepo backfills all records for a single repo/DID.
186186-// Per-record processing is wrapped in a single SQL transaction to batch writes
187187-// (one commit per repo instead of per-statement).
187187+// Records are fetched from PDS first, then network-dependent caches are warmed,
188188+// and finally DB writes happen in chunked transactions to batch writes while
189189+// staying under the remote SQLite transaction timeout (~5s on Bunny Database).
188190func (b *BackfillWorker) backfillRepo(ctx context.Context, did, collection string) (int, error) {
189191 // Resolve DID to get user's PDS endpoint
190192 pdsEndpoint, err := atproto.ResolveDIDToPDS(ctx, did)
···193195 }
194196195197 // Create a client for this user's PDS with the user's DID
196196- // This allows GetRecord to work properly with the repo parameter
197198 pdsClient := atproto.NewClient(pdsEndpoint, did, "")
198199199199- // Begin transaction for per-record processing (batches all writes into one commit)
200200- tx, err := b.db.Begin()
201201- if err != nil {
202202- return 0, fmt.Errorf("failed to begin transaction: %w", err)
203203- }
204204- defer tx.Rollback()
205205-206206- // Create a transactional processor — all DB writes go through this tx
207207- txProcessor := NewProcessor(tx, false, b.processor.statsCache)
208208-209200 var recordCursor string
210210- recordCount := 0
211201212202 // Track which records exist on the PDS for reconciliation
213203 var foundManifestDigests []string
214204 var foundTags []struct{ Repository, Tag string }
215205 foundStars := make(map[string]time.Time) // key: "ownerDID/repository", value: createdAt
216206217217- // Paginate through all records for this repo
207207+ // Phase 1: Collect all records from PDS (network I/O, no transaction)
208208+ var allRecords []atproto.Record
218209 for {
219210 records, cursor, err := pdsClient.ListRecordsForRepo(ctx, did, collection, 100, recordCursor)
220211 if err != nil {
221221- return recordCount, fmt.Errorf("failed to list records: %w", err)
212212+ return 0, fmt.Errorf("failed to list records: %w", err)
222213 }
223214224224- // Process each record
225215 for _, record := range records {
226226- // Track what we found for deletion reconciliation
227216 switch collection {
228217 case atproto.ManifestCollection:
229218 var manifestRecord atproto.ManifestRecord
···248237 }
249238 }
250239251251- if err := b.processRecordWith(ctx, txProcessor, did, collection, &record); err != nil {
252252- slog.Warn("Backfill failed to process record", "uri", record.URI, "error", err)
253253- continue
254254- }
255255- recordCount++
240240+ allRecords = append(allRecords, record)
256241 }
257242258258- // Check if there are more pages
259243 if cursor == "" {
260244 break
261245 }
246246+ recordCursor = cursor
247247+ }
262248263263- recordCursor = cursor
249249+ // Phase 2: Pre-warm caches outside any transaction so that ProcessRecord
250250+ // inside transactions hits only DB (no network I/O that could cause timeouts).
251251+252252+ // Ensure user exists in DB (resolves DID → handle/PDS, fetches profile)
253253+ switch collection {
254254+ case atproto.SailorProfileCollection:
255255+ if err := b.processor.EnsureUser(ctx, did); err != nil {
256256+ slog.Warn("Backfill failed to pre-ensure user", "did", did, "error", err)
257257+ }
258258+ case atproto.ManifestCollection, atproto.TagCollection, atproto.StarCollection, atproto.RepoPageCollection:
259259+ if err := b.processor.EnsureUserExists(ctx, did); err != nil {
260260+ slog.Warn("Backfill failed to pre-ensure user", "did", did, "error", err)
261261+ }
264262 }
265263266266- // Commit all per-record writes in one batch
267267- if err := tx.Commit(); err != nil {
268268- return 0, fmt.Errorf("failed to commit transaction: %w", err)
264264+ // Pre-cache hold DIDs and captain records referenced in records.
265265+ // ProcessSailorProfile calls ResolveHoldDID + queryCaptainFn,
266266+ // ProcessManifest calls ResolveHoldDID for legacy manifests.
267267+ b.prewarmHoldCaches(ctx, collection, allRecords)
268268+269269+ // Phase 3: Process records in chunked transactions.
270270+ // All network I/O should be cached by now, so transactions stay fast.
271271+ const chunkSize = 20
272272+ recordCount := 0
273273+274274+ for i := 0; i < len(allRecords); i += chunkSize {
275275+ end := i + chunkSize
276276+ if end > len(allRecords) {
277277+ end = len(allRecords)
278278+ }
279279+280280+ tx, err := b.db.Begin()
281281+ if err != nil {
282282+ return recordCount, fmt.Errorf("failed to begin transaction: %w", err)
283283+ }
284284+285285+ txProcessor := NewProcessor(tx, false, b.processor.statsCache)
286286+287287+ for j := i; j < end; j++ {
288288+ if err := b.processRecordWith(ctx, txProcessor, did, collection, &allRecords[j]); err != nil {
289289+ slog.Warn("Backfill failed to process record", "uri", allRecords[j].URI, "error", err)
290290+ continue
291291+ }
292292+ recordCount++
293293+ }
294294+295295+ if err := tx.Commit(); err != nil {
296296+ tx.Rollback()
297297+ return recordCount, fmt.Errorf("failed to commit transaction: %w", err)
298298+ }
269299 }
270300271301 // Reconciliation runs outside the transaction (involves network I/O and fewer writes)
···352382 }
353383354384 return nil
385385+}
386386+387387+// prewarmHoldCaches resolves hold DIDs and caches captain records before
388388+// records are processed inside transactions. This ensures ProcessRecord's
389389+// network-dependent code paths (ResolveHoldDID, queryCaptainRecord) hit
390390+// cached data so transactions stay fast and don't timeout.
391391+func (b *BackfillWorker) prewarmHoldCaches(ctx context.Context, collection string, records []atproto.Record) {
392392+ seen := make(map[string]bool)
393393+394394+ for _, record := range records {
395395+ var holdRef string
396396+397397+ switch collection {
398398+ case atproto.SailorProfileCollection:
399399+ var profileRecord atproto.SailorProfileRecord
400400+ if err := json.Unmarshal(record.Value, &profileRecord); err == nil {
401401+ holdRef = profileRecord.DefaultHold
402402+ }
403403+ case atproto.ManifestCollection:
404404+ var manifestRecord atproto.ManifestRecord
405405+ if err := json.Unmarshal(record.Value, &manifestRecord); err == nil {
406406+ // Only legacy manifests need network resolution (URL → DID)
407407+ if manifestRecord.HoldDID == "" && manifestRecord.HoldEndpoint != "" {
408408+ holdRef = manifestRecord.HoldEndpoint
409409+ }
410410+ }
411411+ default:
412412+ return // No hold references in other collections
413413+ }
414414+415415+ if holdRef == "" || seen[holdRef] {
416416+ continue
417417+ }
418418+ seen[holdRef] = true
419419+420420+ // Resolve hold identifier to DID (caches in resolver)
421421+ holdDID, err := atproto.ResolveHoldDID(ctx, holdRef)
422422+ if err != nil {
423423+ slog.Warn("Backfill failed to pre-resolve hold DID", "hold_ref", holdRef, "error", err)
424424+ continue
425425+ }
426426+427427+ // Pre-cache captain record (skips if cached within last hour)
428428+ if err := b.queryCaptainRecord(ctx, holdDID); err != nil {
429429+ slog.Warn("Backfill failed to pre-cache captain record", "hold_did", holdDID, "error", err)
430430+ }
431431+ }
355432}
356433357434// processRecordWith processes a single record using the given processor.
+1-1
pkg/appview/jetstream/processor.go
···925925 switch status {
926926 case "deleted":
927927 // Account permanently deleted - remove all cached data
928928- if err := db.DeleteUserData(p.db, did); err != nil {
928928+ if _, err := db.DeleteUserData(p.db, did); err != nil {
929929 slog.Error("Failed to delete user data for deleted account",
930930 "component", "processor",
931931 "did", did,
···4848 // Stored in hold's embedded PDS to track scan results per manifest
4949 ScanCollection = "io.atcr.hold.scan"
50505151+ // ImageConfigCollection is the collection name for OCI image configs
5252+ // Stored in hold's embedded PDS, one per manifest (keyed by manifest digest hex)
5353+ ImageConfigCollection = "io.atcr.hold.image.config"
5454+5155 // TangledProfileCollection is the collection name for tangled profiles
5256 // Stored in hold's embedded PDS (singleton record at rkey "self")
5357 TangledProfileCollection = "sh.tangled.actor.profile"
···818822func ScanRecordKey(manifestDigest string) string {
819823 // Remove the "sha256:" prefix - the hex digest is already a valid rkey
820824 return strings.TrimPrefix(manifestDigest, "sha256:")
825825+}
826826+827827+// ImageConfigRecord represents an OCI image config stored in the hold's embedded PDS
828828+// Collection: io.atcr.hold.image.config
829829+// One record per manifest, keyed by manifest digest hex (same pattern as ScanRecord)
830830+// Stores the full OCI config JSON so the appview can display layer history including empty layers
831831+type ImageConfigRecord struct {
832832+ Type string `json:"$type" cborgen:"$type"`
833833+ Manifest string `json:"manifest" cborgen:"manifest"` // AT-URI of the manifest
834834+ ConfigJSON string `json:"configJson" cborgen:"configJson"` // Raw OCI image config JSON
835835+ CreatedAt string `json:"createdAt" cborgen:"createdAt"` // RFC3339 timestamp
836836+}
837837+838838+// NewImageConfigRecord creates a new image config record
839839+func NewImageConfigRecord(manifestURI, configJSON string) *ImageConfigRecord {
840840+ return &ImageConfigRecord{
841841+ Type: ImageConfigCollection,
842842+ Manifest: manifestURI,
843843+ ConfigJSON: configJSON,
844844+ CreatedAt: time.Now().Format(time.RFC3339),
845845+ }
821846}
822847823848// TangledProfileRecord represents a Tangled profile for the hold
···815815 }
816816}
817817818818+// StartBackfillConfigs launches image config backfill in the background.
819819+// Creates io.atcr.hold.image.config records for manifests that don't have one yet
820820+// by fetching OCI config blobs from S3.
821821+func (gc *GarbageCollector) StartBackfillConfigs() bool {
822822+ return gc.startBackground("backfill-configs", "records", "Scanning for manifests missing image config records...", func(ctx context.Context) error {
823823+ _, err := gc.doBackfillConfigs(ctx)
824824+ return err
825825+ })
826826+}
827827+828828+// doBackfillConfigs creates image config records for manifests that are missing them.
829829+func (gc *GarbageCollector) doBackfillConfigs(ctx context.Context) (*GCResult, error) {
830830+ recordsIndex := gc.pds.RecordsIndex()
831831+ if recordsIndex == nil {
832832+ return nil, fmt.Errorf("records index not available")
833833+ }
834834+835835+ // Step 1: Collect unique manifest URIs from layer records
836836+ manifestURIs := make(map[string]bool)
837837+ cursor := ""
838838+ totalScanned := 0
839839+840840+ for {
841841+ records, nextCursor, err := recordsIndex.ListRecords(atproto.LayerCollection, 1000, cursor, true)
842842+ if err != nil {
843843+ return nil, fmt.Errorf("list layer records: %w", err)
844844+ }
845845+846846+ for _, rec := range records {
847847+ totalScanned++
848848+ layer, err := gc.decodeLayerRecord(ctx, rec)
849849+ if err != nil {
850850+ continue
851851+ }
852852+ manifestURIs[layer.Manifest] = true
853853+ }
854854+855855+ if nextCursor == "" {
856856+ break
857857+ }
858858+ cursor = nextCursor
859859+ }
860860+861861+ gc.logger.Info("Found unique manifests from layer records",
862862+ "manifests", len(manifestURIs),
863863+ "layersScanned", totalScanned)
864864+865865+ // Step 2: For each manifest, check if config record exists, create if not
866866+ start := time.Now()
867867+ result := &GCResult{}
868868+ created := int64(0)
869869+ skipped := int64(0)
870870+ processed := 0
871871+ httpClient := &http.Client{Timeout: 30 * time.Second}
872872+873873+ for manifestURI := range manifestURIs {
874874+ processed++
875875+ gc.setProgress("records",
876876+ fmt.Sprintf("Backfilling configs (%d/%d manifests)...", processed, len(manifestURIs)),
877877+ "backfill-configs")
878878+879879+ aturi, err := syntax.ParseATURI(manifestURI)
880880+ if err != nil {
881881+ gc.logger.Warn("Invalid manifest URI", "uri", manifestURI, "error", err)
882882+ continue
883883+ }
884884+885885+ manifestDigest := "sha256:" + aturi.RecordKey().String()
886886+887887+ // Check if config record already exists
888888+ if _, _, err := gc.pds.GetImageConfigRecord(ctx, manifestDigest); err == nil {
889889+ skipped++
890890+ continue
891891+ }
892892+893893+ userDID := aturi.Authority().String()
894894+ manifestRkey := aturi.RecordKey().String()
895895+896896+ pdsEndpoint, err := atproto.ResolveDIDToPDS(ctx, userDID)
897897+ if err != nil {
898898+ gc.logger.Warn("Failed to resolve PDS for backfill", "did", userDID, "error", err)
899899+ continue
900900+ }
901901+902902+ // Fetch manifest via getRecord to get config digest
903903+ reqURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
904904+ pdsEndpoint,
905905+ url.QueryEscape(userDID),
906906+ url.QueryEscape(atproto.ManifestCollection),
907907+ url.QueryEscape(manifestRkey))
908908+909909+ req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil)
910910+ if err != nil {
911911+ continue
912912+ }
913913+ resp, err := httpClient.Do(req)
914914+ if err != nil {
915915+ gc.logger.Warn("Failed to fetch manifest for backfill", "uri", manifestURI, "error", err)
916916+ continue
917917+ }
918918+ if resp.StatusCode != http.StatusOK {
919919+ resp.Body.Close()
920920+ continue
921921+ }
922922+923923+ var envelope struct {
924924+ Value json.RawMessage `json:"value"`
925925+ }
926926+ if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil {
927927+ resp.Body.Close()
928928+ continue
929929+ }
930930+ resp.Body.Close()
931931+932932+ var manifest atproto.ManifestRecord
933933+ if err := json.Unmarshal(envelope.Value, &manifest); err != nil {
934934+ continue
935935+ }
936936+937937+ if manifest.Config == nil || manifest.Config.Digest == "" {
938938+ continue
939939+ }
940940+941941+ // Fetch config blob from S3
942942+ configBytes, err := gc.s3.GetBytes(ctx, s3.BlobPath(manifest.Config.Digest))
943943+ if err != nil {
944944+ gc.logger.Warn("Failed to fetch config blob", "digest", manifest.Config.Digest, "error", err)
945945+ continue
946946+ }
947947+948948+ // Create image config record
949949+ configRecord := atproto.NewImageConfigRecord(manifestURI, string(configBytes))
950950+ if _, _, err := gc.pds.CreateImageConfigRecord(ctx, configRecord, manifestDigest); err != nil {
951951+ gc.logger.Warn("Failed to create image config record", "manifest", manifestURI, "error", err)
952952+ continue
953953+ }
954954+ created++
955955+ time.Sleep(200 * time.Millisecond) // throttle firehose events
956956+ }
957957+958958+ result.RecordsReconciled = created
959959+ result.Duration = time.Since(start)
960960+961961+ gc.mu.Lock()
962962+ gc.lastResult = result
963963+ gc.lastResultAt = time.Now()
964964+ gc.mu.Unlock()
965965+966966+ gc.logger.Info("Image config backfill complete", "created", created, "skipped", skipped)
967967+ return result, nil
968968+}
969969+818970// discoverUserDIDs returns all DIDs that may have manifests referencing this hold.
819971// Union of: captain owner + crew members + distinct DIDs from layer records.
820972func (gc *GarbageCollector) discoverUserDIDs(ctx context.Context) ([]string, error) {
+34-13
pkg/hold/oci/xrpc.go
···297297 // Build manifest AT-URI for layer records
298298 manifestURI := atproto.BuildManifestURI(req.UserDID, req.ManifestDigest)
299299300300- // Create layer records for each blob
301301- for _, layer := range req.Manifest.Layers {
302302- record := atproto.NewLayerRecord(
303303- layer.Digest,
304304- layer.Size,
305305- layer.MediaType,
306306- req.UserDID,
307307- manifestURI,
308308- )
300300+ // Skip layer record creation if records already exist for this manifest
301301+ existingLayers, _ := h.pds.ListLayerRecordsForManifest(ctx, manifestURI)
302302+ if len(existingLayers) > 0 {
303303+ layersCreated = len(existingLayers)
304304+ slog.Debug("Layer records already exist for manifest, skipping creation",
305305+ "manifestURI", manifestURI, "existing", len(existingLayers))
306306+ } else {
307307+ // Create layer records for each blob
308308+ for _, layer := range req.Manifest.Layers {
309309+ record := atproto.NewLayerRecord(
310310+ layer.Digest,
311311+ layer.Size,
312312+ layer.MediaType,
313313+ req.UserDID,
314314+ manifestURI,
315315+ )
316316+317317+ _, _, err := h.pds.CreateLayerRecord(ctx, record)
318318+ if err != nil {
319319+ slog.Error("Failed to create layer record", "error", err)
320320+ // Continue creating other records
321321+ } else {
322322+ layersCreated++
323323+ }
324324+ }
325325+ }
309326310310- _, _, err := h.pds.CreateLayerRecord(ctx, record)
327327+ // Store OCI image config as a separate record (best-effort)
328328+ if req.Manifest.Config.Digest != "" {
329329+ configBytes, err := h.s3Service.GetBytes(ctx, s3.BlobPath(req.Manifest.Config.Digest))
311330 if err != nil {
312312- slog.Error("Failed to create layer record", "error", err)
313313- // Continue creating other records
331331+ slog.Warn("Failed to fetch config blob for image config record", "error", err, "configDigest", req.Manifest.Config.Digest)
314332 } else {
315315- layersCreated++
333333+ configRecord := atproto.NewImageConfigRecord(manifestURI, string(configBytes))
334334+ if _, _, err := h.pds.CreateImageConfigRecord(ctx, configRecord, req.ManifestDigest); err != nil {
335335+ slog.Warn("Failed to create image config record", "error", err)
336336+ }
316337 }
317338 }
318339
+1-1
pkg/hold/oci/xrpc_test.go
···678678679679 // 6. Verify blob was moved to final location in mock S3
680680 finalS3Key := "test-prefix/docker/registry/v2/blobs/sha256/ab/abc123def456/data"
681681- if mockS3Client.GetObject(finalS3Key) == nil {
681681+ if mockS3Client.GetObjectBytes(finalS3Key) == nil {
682682 t.Errorf("Expected blob at final S3 key %s", finalS3Key)
683683 }
684684}
+84
pkg/hold/pds/layer.go
···9090 return nil, fmt.Errorf("GetLayerRecord not yet implemented - use via XRPC listRecords instead")
9191}
92929393+// UpdateLayerRecord updates an existing layer record by rkey.
9494+func (p *HoldPDS) UpdateLayerRecord(ctx context.Context, rkey string, record *atproto.LayerRecord) error {
9595+ _, err := p.repomgr.UpdateRecord(ctx, p.uid, atproto.LayerCollection, rkey, record)
9696+ if err != nil {
9797+ return fmt.Errorf("failed to update layer record: %w", err)
9898+ }
9999+ return nil
100100+}
101101+93102// DeleteLayerRecord deletes a layer record by rkey
94103// This deletes from both the repo (MST) and the records index
95104func (p *HoldPDS) DeleteLayerRecord(ctx context.Context, rkey string) error {
···294303295304 return records, nil
296305}
306306+307307+// ListLayerRecordsForManifest returns all layer records for a specific manifest AT-URI.
308308+func (p *HoldPDS) ListLayerRecordsForManifest(ctx context.Context, manifestURI string) ([]*atproto.LayerRecord, error) {
309309+ if p.recordsIndex == nil {
310310+ return nil, fmt.Errorf("records index not available")
311311+ }
312312+313313+ session, err := p.carstore.ReadOnlySession(p.uid)
314314+ if err != nil {
315315+ return nil, fmt.Errorf("failed to create session: %w", err)
316316+ }
317317+318318+ head, err := p.carstore.GetUserRepoHead(ctx, p.uid)
319319+ if err != nil {
320320+ return nil, fmt.Errorf("failed to get repo head: %w", err)
321321+ }
322322+323323+ if !head.Defined() {
324324+ return []*atproto.LayerRecord{}, nil
325325+ }
326326+327327+ repoHandle, err := repo.OpenRepo(ctx, session, head)
328328+ if err != nil {
329329+ return nil, fmt.Errorf("failed to open repo: %w", err)
330330+ }
331331+332332+ var records []*atproto.LayerRecord
333333+ seen := make(map[string]int) // digest → index in records slice
334334+ cursor := ""
335335+ batchSize := 1000
336336+337337+ for {
338338+ indexRecords, nextCursor, err := p.recordsIndex.ListRecords(atproto.LayerCollection, batchSize, cursor, false)
339339+ if err != nil {
340340+ return nil, fmt.Errorf("failed to list layer records: %w", err)
341341+ }
342342+343343+ for _, rec := range indexRecords {
344344+ recordPath := rec.Collection + "/" + rec.Rkey
345345+346346+ _, recBytes, err := repoHandle.GetRecordBytes(ctx, recordPath)
347347+ if err != nil {
348348+ continue
349349+ }
350350+351351+ recordValue, err := lexutil.CborDecodeValue(*recBytes)
352352+ if err != nil {
353353+ continue
354354+ }
355355+356356+ layerRecord, ok := recordValue.(*atproto.LayerRecord)
357357+ if !ok {
358358+ continue
359359+ }
360360+361361+ if layerRecord.Manifest == manifestURI {
362362+ if _, exists := seen[layerRecord.Digest]; !exists {
363363+ seen[layerRecord.Digest] = len(records)
364364+ records = append(records, layerRecord)
365365+ }
366366+ }
367367+ }
368368+369369+ if nextCursor == "" {
370370+ break
371371+ }
372372+ cursor = nextCursor
373373+ }
374374+375375+ if records == nil {
376376+ records = []*atproto.LayerRecord{}
377377+ }
378378+379379+ return records, nil
380380+}
+1
pkg/hold/pds/server.go
···3232 lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{})
3333 lexutil.RegisterType(atproto.StatsCollection, &atproto.StatsRecord{})
3434 lexutil.RegisterType(atproto.ScanCollection, &atproto.ScanRecord{})
3535+ lexutil.RegisterType(atproto.ImageConfigCollection, &atproto.ImageConfigRecord{})
3536}
36373738// HoldPDS is a minimal ATProto PDS implementation for a hold service
+45
pkg/hold/pds/xrpc.go
···168168 r.Get(atproto.RepoDescribeRepo, h.HandleDescribeRepo)
169169 r.Get(atproto.RepoGetRecord, h.HandleGetRecord)
170170 r.Get(atproto.RepoListRecords, h.HandleListRecords)
171171+ r.Get(atproto.HoldGetLayersForManifest, h.HandleGetLayersForManifest)
172172+ r.Get(atproto.HoldGetImageConfig, h.HandleGetImageConfig)
171173172174 // Sync endpoints
173175 r.Get(atproto.SyncListBlobs, h.HandleListBlobs)
···485487 "cid": recordCID.String(),
486488 "value": recordValue,
487489 })
490490+}
491491+492492+// HandleGetLayersForManifest returns layer records for a specific manifest AT-URI.
493493+func (h *XRPCHandler) HandleGetLayersForManifest(w http.ResponseWriter, r *http.Request) {
494494+ manifestURI := r.URL.Query().Get("manifest")
495495+ if manifestURI == "" {
496496+ http.Error(w, `{"error":"InvalidRequest","message":"manifest parameter is required"}`, http.StatusBadRequest)
497497+ return
498498+ }
499499+500500+ records, err := h.pds.ListLayerRecordsForManifest(r.Context(), manifestURI)
501501+ if err != nil {
502502+ slog.Error("Failed to list layer records for manifest", "error", err, "manifest", manifestURI)
503503+ http.Error(w, `{"error":"InternalServerError","message":"failed to list layer records"}`, http.StatusInternalServerError)
504504+ return
505505+ }
506506+507507+ w.Header().Set("Content-Type", "application/json")
508508+ if err := json.NewEncoder(w).Encode(map[string]any{
509509+ "layers": records,
510510+ }); err != nil {
511511+ slog.Error("Failed to encode layer records response", "error", err)
512512+ }
513513+}
514514+515515+// HandleGetImageConfig returns the OCI image config record for a manifest digest.
516516+func (h *XRPCHandler) HandleGetImageConfig(w http.ResponseWriter, r *http.Request) {
517517+ digest := r.URL.Query().Get("digest")
518518+ if digest == "" {
519519+ http.Error(w, `{"error":"InvalidRequest","message":"digest parameter is required"}`, http.StatusBadRequest)
520520+ return
521521+ }
522522+523523+ _, record, err := h.pds.GetImageConfigRecord(r.Context(), digest)
524524+ if err != nil {
525525+ http.Error(w, `{"error":"NotFound","message":"image config not found"}`, http.StatusNotFound)
526526+ return
527527+ }
528528+529529+ w.Header().Set("Content-Type", "application/json")
530530+ if err := json.NewEncoder(w).Encode(record); err != nil {
531531+ slog.Error("Failed to encode image config response", "error", err)
532532+ }
488533}
489534490535// HandleListRecords lists records in a collection
+105
pkg/hold/pds/xrpc_test.go
···10621062 }
10631063}
1064106410651065+// Tests for HandleGetLayersForManifest
10661066+10671067+func TestHandleGetLayersForManifest(t *testing.T) {
10681068+ handler, ctx := setupTestXRPCHandlerWithIndex(t)
10691069+10701070+ manifestURI := "at://did:plc:testuser/io.atcr.manifest/abc123"
10711071+ otherManifestURI := "at://did:plc:testuser/io.atcr.manifest/def456"
10721072+10731073+ // Create layer records for the target manifest
10741074+ for i := range 3 {
10751075+ record := atproto.NewLayerRecord(
10761076+ fmt.Sprintf("sha256:layer%d", i),
10771077+ int64(1024*(i+1)),
10781078+ "application/vnd.oci.image.layer.v1.tar+gzip",
10791079+ "did:plc:testuser",
10801080+ manifestURI,
10811081+ )
10821082+ if _, _, err := handler.pds.CreateLayerRecord(ctx, record); err != nil {
10831083+ t.Fatalf("Failed to create layer record %d: %v", i, err)
10841084+ }
10851085+ }
10861086+10871087+ // Create a layer record for a different manifest (should not be returned)
10881088+ otherRecord := atproto.NewLayerRecord(
10891089+ "sha256:otherlayer",
10901090+ 2048,
10911091+ "application/vnd.oci.image.layer.v1.tar+gzip",
10921092+ "did:plc:testuser",
10931093+ otherManifestURI,
10941094+ )
10951095+ if _, _, err := handler.pds.CreateLayerRecord(ctx, otherRecord); err != nil {
10961096+ t.Fatalf("Failed to create other layer record: %v", err)
10971097+ }
10981098+10991099+ // Query layers for the target manifest
11001100+ req := makeXRPCGetRequest(atproto.HoldGetLayersForManifest, map[string]string{
11011101+ "manifest": manifestURI,
11021102+ })
11031103+ w := httptest.NewRecorder()
11041104+ handler.HandleGetLayersForManifest(w, req)
11051105+11061106+ result := assertJSONResponse(t, w, http.StatusOK)
11071107+11081108+ layers, ok := result["layers"].([]any)
11091109+ if !ok {
11101110+ t.Fatal("Expected layers array in response")
11111111+ }
11121112+11131113+ if len(layers) != 3 {
11141114+ t.Fatalf("Expected 3 layers, got %d", len(layers))
11151115+ }
11161116+11171117+ // Verify all layers have digests and belong to the target manifest
11181118+ digests := make(map[string]bool)
11191119+ for i, l := range layers {
11201120+ layer, ok := l.(map[string]any)
11211121+ if !ok {
11221122+ t.Fatalf("Layer %d: expected map, got %T", i, l)
11231123+ }
11241124+ digest, ok := layer["digest"].(string)
11251125+ if !ok || digest == "" {
11261126+ t.Errorf("Layer %d: expected non-empty digest", i)
11271127+ }
11281128+ digests[digest] = true
11291129+ }
11301130+ for i := range 3 {
11311131+ expected := fmt.Sprintf("sha256:layer%d", i)
11321132+ if !digests[expected] {
11331133+ t.Errorf("Expected digest %q in results", expected)
11341134+ }
11351135+ }
11361136+}
11371137+11381138+func TestHandleGetLayersForManifest_MissingParam(t *testing.T) {
11391139+ handler, _ := setupTestXRPCHandlerWithIndex(t)
11401140+11411141+ req := makeXRPCGetRequest(atproto.HoldGetLayersForManifest, map[string]string{})
11421142+ w := httptest.NewRecorder()
11431143+ handler.HandleGetLayersForManifest(w, req)
11441144+11451145+ if w.Code != http.StatusBadRequest {
11461146+ t.Errorf("Expected status 400, got %d", w.Code)
11471147+ }
11481148+}
11491149+11501150+func TestHandleGetLayersForManifest_NoMatchingLayers(t *testing.T) {
11511151+ handler, _ := setupTestXRPCHandlerWithIndex(t)
11521152+11531153+ req := makeXRPCGetRequest(atproto.HoldGetLayersForManifest, map[string]string{
11541154+ "manifest": "at://did:plc:nobody/io.atcr.manifest/nonexistent",
11551155+ })
11561156+ w := httptest.NewRecorder()
11571157+ handler.HandleGetLayersForManifest(w, req)
11581158+11591159+ result := assertJSONResponse(t, w, http.StatusOK)
11601160+11611161+ layers, ok := result["layers"].([]any)
11621162+ if !ok {
11631163+ t.Fatal("Expected layers array in response")
11641164+ }
11651165+ if len(layers) != 0 {
11661166+ t.Errorf("Expected 0 layers, got %d", len(layers))
11671167+ }
11681168+}
11691169+10651170// Tests for HandleDeleteRecord
1066117110671172// TestHandleDeleteRecord tests com.atproto.repo.deleteRecord
+19-2
pkg/s3/mock.go
···400400 m.Objects[key] = append([]byte{}, data...)
401401}
402402403403-// GetObject is a test helper to read an object from the mock store (nil if not found).
404404-func (m *MockS3Client) GetObject(key string) []byte {
403403+// GetObject implements S3Client
404404+func (m *MockS3Client) GetObject(ctx context.Context, input *awss3.GetObjectInput, opts ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) {
405405+ m.mu.Lock()
406406+ defer m.mu.Unlock()
407407+408408+ key := aws.ToString(input.Key)
409409+ data, ok := m.Objects[key]
410410+ if !ok {
411411+ return nil, fmt.Errorf("NoSuchKey: %s", key)
412412+ }
413413+414414+ return &awss3.GetObjectOutput{
415415+ Body: io.NopCloser(bytes.NewReader(bytes.Clone(data))),
416416+ ContentLength: aws.Int64(int64(len(data))),
417417+ }, nil
418418+}
419419+420420+// GetObjectBytes is a test helper to read an object from the mock store (nil if not found).
421421+func (m *MockS3Client) GetObjectBytes(key string) []byte {
405422 m.mu.Lock()
406423 defer m.mu.Unlock()
407424 data, ok := m.Objects[key]