···11package handlers
2233import (
44- "fmt"
44+ "log/slog"
55 "net/http"
6677 "atcr.io/pkg/appview/db"
···3232 // Parse DID for OAuth logout
3333 did, err := syntax.ParseDID(uiSession.DID)
3434 if err != nil {
3535- fmt.Printf("WARNING [logout]: Failed to parse DID %s: %v\n", uiSession.DID, err)
3535+ slog.Warn("Failed to parse DID for logout", "component", "logout", "did", uiSession.DID, "error", err)
3636 } else {
3737 // Attempt to revoke OAuth tokens on PDS side
3838 if uiSession.OAuthSessionID != "" {
3939 // Call indigo's Logout to revoke tokens on PDS
4040 if err := h.OAuthApp.GetClientApp().Logout(r.Context(), did, uiSession.OAuthSessionID); err != nil {
4141 // Log error but don't block logout - best effort revocation
4242- fmt.Printf("WARNING [logout]: Failed to revoke OAuth tokens for %s on PDS: %v\n", uiSession.DID, err)
4242+ slog.Warn("Failed to revoke OAuth tokens on PDS", "component", "logout", "did", uiSession.DID, "error", err)
4343 } else {
4444- fmt.Printf("INFO [logout]: Successfully revoked OAuth tokens for %s on PDS\n", uiSession.DID)
4444+ slog.Info("Successfully revoked OAuth tokens on PDS", "component", "logout", "did", uiSession.DID)
4545 }
46464747 // Invalidate refresher cache to clear local access tokens
4848 h.Refresher.InvalidateSession(uiSession.DID)
4949- fmt.Printf("INFO [logout]: Invalidated local OAuth cache for %s\n", uiSession.DID)
4949+ slog.Info("Invalidated local OAuth cache", "component", "logout", "did", uiSession.DID)
50505151 // Delete OAuth session from database (cleanup, might already be done by Logout)
5252 if err := h.OAuthStore.DeleteSession(r.Context(), did, uiSession.OAuthSessionID); err != nil {
5353- fmt.Printf("WARNING [logout]: Failed to delete OAuth session from database: %v\n", err)
5353+ slog.Warn("Failed to delete OAuth session from database", "component", "logout", "error", err)
5454 }
5555 } else {
5656- fmt.Printf("WARNING [logout]: No OAuth session ID found for user %s\n", uiSession.DID)
5656+ slog.Warn("No OAuth session ID found for user", "component", "logout", "did", uiSession.DID)
5757 }
5858 }
5959 }
+4-4
pkg/appview/handlers/repository.go
···44 "context"
55 "database/sql"
66 "html/template"
77- "log"
77+ "log/slog"
88 "net/http"
99 "sync"
1010 "time"
···139139 // Fetch repository metadata from annotations table
140140 metadata, err := db.GetRepositoryMetadata(h.DB, owner.DID, repository)
141141 if err != nil {
142142- log.Printf("Failed to fetch repository metadata: %v", err)
142142+ slog.Warn("Failed to fetch repository metadata", "error", err)
143143 // Continue without metadata on error
144144 } else {
145145 repo.Title = metadata["org.opencontainers.image.title"]
···155155 // Fetch star count
156156 stats, err := db.GetRepositoryStats(h.DB, owner.DID, repository)
157157 if err != nil {
158158- log.Printf("Failed to fetch repository stats: %v", err)
158158+ slog.Warn("Failed to fetch repository stats", "error", err)
159159 // Continue with zero stats on error
160160 stats = &db.RepositoryStats{StarCount: 0}
161161 }
···193193194194 html, err := h.ReadmeCache.Get(ctx, repo.ReadmeURL)
195195 if err != nil {
196196- log.Printf("Failed to fetch README from %s: %v", repo.ReadmeURL, err)
196196+ slog.Warn("Failed to fetch README", "url", repo.ReadmeURL, "error", err)
197197 // Continue without README on error
198198 } else {
199199 readmeHTML = template.HTML(html)
+6-6
pkg/appview/handlers/settings.go
···11package handlers
2233import (
44- "fmt"
54 "html/template"
55+ "log/slog"
66 "net/http"
77 "time"
88···3030 session, err := h.Refresher.GetSession(r.Context(), user.DID)
3131 if err != nil {
3232 // OAuth session not found or expired - redirect to re-authenticate
3333- fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err)
3333+ slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err)
3434 http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound)
3535 return
3636 }
···4545 profile, err := storage.GetProfile(r.Context(), client)
4646 if err != nil {
4747 // Error fetching profile - log out user
4848- fmt.Printf("WARNING [settings]: Failed to fetch profile for %s: %v - logging out\n", user.DID, err)
4848+ slog.Warn("Failed to fetch profile, logging out", "component", "settings", "did", user.DID, "error", err)
4949 http.Redirect(w, r, "/auth/logout", http.StatusFound)
5050 return
5151 }
52525353 if profile == nil {
5454 // Profile doesn't exist yet (404) - user needs to log out and back in to create it
5555- fmt.Printf("WARNING [settings]: Profile doesn't exist for %s - logging out\n", user.DID)
5555+ slog.Warn("Profile doesn't exist, logging out", "component", "settings", "did", user.DID)
5656 http.Redirect(w, r, "/auth/logout", http.StatusFound)
5757 return
5858 }
59596060- fmt.Printf("DEBUG [settings]: Fetched profile for %s: defaultHold=%s\n", user.DID, profile.DefaultHold)
6060+ slog.Debug("Fetched profile", "component", "settings", "did", user.DID, "default_hold", profile.DefaultHold)
61616262 data := struct {
6363 PageData
···100100 session, err := h.Refresher.GetSession(r.Context(), user.DID)
101101 if err != nil {
102102 // OAuth session not found or expired - redirect to re-authenticate
103103- fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err)
103103+ slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err)
104104 http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound)
105105 return
106106 }
+15-15
pkg/appview/holdhealth/worker.go
···44 "context"
55 "database/sql"
66 "fmt"
77- "log"
77+ "log/slog"
88 "strings"
99 "sync"
1010 "time"
···5656 go func() {
5757 defer w.wg.Done()
58585959- log.Println("Hold health worker: Starting background health checks")
5959+ slog.Info("Hold health worker starting background health checks")
60606161 // Wait for services to be ready (Docker startup race condition)
6262 if w.startupDelay > 0 {
6363- log.Printf("Hold health worker: Waiting %s for services to be ready...", w.startupDelay)
6363+ slog.Info("Hold health worker waiting for services to be ready", "delay", w.startupDelay)
6464 select {
6565 case <-time.After(w.startupDelay):
6666 // Continue with initial check
6767 case <-ctx.Done():
6868- log.Println("Hold health worker: Context cancelled during startup delay")
6868+ slog.Info("Hold health worker context cancelled during startup delay")
6969 return
7070 }
7171 }
···7676 for {
7777 select {
7878 case <-ctx.Done():
7979- log.Println("Hold health worker: Context cancelled, stopping")
7979+ slog.Info("Hold health worker context cancelled, stopping")
8080 return
8181 case <-w.stopChan:
8282- log.Println("Hold health worker: Stop signal received")
8282+ slog.Info("Hold health worker stop signal received")
8383 return
8484 case <-w.refreshTicker.C:
8585 w.refreshAllHolds(ctx)
8686 case <-w.cleanupTicker.C:
8787- log.Println("Hold health worker: Running cache cleanup")
8787+ slog.Info("Hold health worker running cache cleanup")
8888 w.checker.Cleanup()
8989 }
9090 }
···9797 w.refreshTicker.Stop()
9898 w.cleanupTicker.Stop()
9999 w.wg.Wait()
100100- log.Println("Hold health worker: Stopped")
100100+ slog.Info("Hold health worker stopped")
101101}
102102103103// refreshAllHolds queries the database for unique hold endpoints and refreshes their health status
104104func (w *Worker) refreshAllHolds(ctx context.Context) {
105105- log.Println("Hold health worker: Starting refresh cycle")
105105+ slog.Info("Hold health worker starting refresh cycle")
106106107107 // Get unique hold endpoints from database
108108 endpoints, err := w.db.GetUniqueHoldEndpoints()
109109 if err != nil {
110110- log.Printf("Hold health worker: Failed to fetch hold endpoints: %v", err)
110110+ slog.Error("Hold health worker failed to fetch hold endpoints", "error", err)
111111 return
112112 }
113113114114 if len(endpoints) == 0 {
115115- log.Println("Hold health worker: No hold endpoints to check")
115115+ slog.Info("Hold health worker no hold endpoints to check")
116116 return
117117 }
118118119119- log.Printf("Hold health worker: Fetched %d hold endpoint entries from database", len(endpoints))
119119+ slog.Info("Hold health worker fetched hold endpoint entries from database", "count", len(endpoints))
120120121121 // Deduplicate endpoints by normalizing to canonical DID format
122122 // This handles cases where the same hold is stored with different representations:
···141141 uniqueEndpoints = append(uniqueEndpoints, normalizedDID)
142142 }
143143144144- log.Printf("Hold health worker: Checking %d unique hold endpoints (deduplicated from %d)", len(uniqueEndpoints), len(endpoints))
144144+ slog.Info("Hold health worker checking unique hold endpoints", "unique_count", len(uniqueEndpoints), "total_count", len(endpoints))
145145146146 // Check health concurrently with rate limiting
147147 // Use a semaphore to limit concurrent requests (max 10 at a time)
···174174 reachable++
175175 } else {
176176 unreachable++
177177- log.Printf("Hold health worker: Hold unreachable: %s (error: %v)", ep, err)
177177+ slog.Warn("Hold health worker hold unreachable", "endpoint", ep, "error", err)
178178 }
179179 statsMu.Unlock()
180180 }(endpoint)
···183183 // Wait for all checks to complete
184184 wg.Wait()
185185186186- log.Printf("Hold health worker: Refresh complete - %d reachable, %d unreachable", reachable, unreachable)
186186+ slog.Info("Hold health worker refresh complete", "reachable", reachable, "unreachable", unreachable)
187187}
188188189189// DBAdapter wraps sql.DB to implement DBQuerier interface
+20-19
pkg/appview/jetstream/backfill.go
···55 "database/sql"
66 "encoding/json"
77 "fmt"
88+ "log/slog"
89 "strings"
910 "time"
1011···52535354// Start runs the backfill for all ATCR collections
5455func (b *BackfillWorker) Start(ctx context.Context) error {
5555- fmt.Println("Backfill: Starting sync-based backfill...")
5656+ slog.Info("Backfill: Starting sync-based backfill...")
56575758 // First, query and cache the default hold's captain record
5859 if b.defaultHoldDID != "" {
5959- fmt.Printf("Backfill: Querying default hold captain record: %s\n", b.defaultHoldDID)
6060+ slog.Info("Backfill: Querying default hold captain record: %s\n", b.defaultHoldDID)
6061 if err := b.queryCaptainRecord(ctx, b.defaultHoldDID); err != nil {
6161- fmt.Printf("WARNING: Failed to query default hold captain record: %v\n", err)
6262+ slog.Warn("Backfill: Failed to query default hold captain record: %v\n", err)
6263 // Don't fail the whole backfill - just warn
6364 }
6465 }
···7172 }
72737374 for _, collection := range collections {
7474- fmt.Printf("Backfill: Processing collection: %s\n", collection)
7575+ slog.Info("Backfill: Processing collection: %s\n", collection)
75767677 if err := b.backfillCollection(ctx, collection); err != nil {
7778 return fmt.Errorf("failed to backfill collection %s: %w", collection, err)
7879 }
79808080- fmt.Printf("Backfill: Completed collection: %s\n", collection)
8181+ slog.Info("Backfill: Completed collection: %s\n", collection)
8182 }
82838383- fmt.Println("Backfill: All collections completed!")
8484+ slog.Info("Backfill: All collections completed!")
8485 return nil
8586}
8687···9899 return fmt.Errorf("failed to list repos: %w", err)
99100 }
100101101101- fmt.Printf("Backfill: Found %d repos with %s (cursor: %s)\n", len(result.Repos), collection, repoCursor)
102102+ slog.Info("Backfill: Found %d repos with %s (cursor: %s)\n", len(result.Repos), collection, repoCursor)
102103103104 // Process each repo (DID)
104105 for _, repo := range result.Repos {
105106 recordCount, err := b.backfillRepo(ctx, repo.DID, collection)
106107 if err != nil {
107107- fmt.Printf("WARNING: Failed to backfill repo %s: %v\n", repo.DID, err)
108108+ slog.Warn("Backfill: Failed to backfill repo %s: %v\n", repo.DID, err)
108109 continue
109110 }
110111···112113 processedRecords += recordCount
113114114115 if processedRepos%10 == 0 {
115115- fmt.Printf("Backfill: Progress - %d repos, %d records\n", processedRepos, processedRecords)
116116+ slog.Info("Backfill: Progress - %d repos, %d records\n", processedRepos, processedRecords)
116117 }
117118 }
118119···124125 repoCursor = result.Cursor
125126 }
126127127127- fmt.Printf("Backfill: Collection %s complete - %d repos, %d records\n", collection, processedRepos, processedRecords)
128128+ slog.Info("Backfill: Collection %s complete - %d repos, %d records\n", collection, processedRepos, processedRecords)
128129 return nil
129130}
130131···196197 }
197198198199 if err := b.processRecord(ctx, did, collection, &record); err != nil {
199199- fmt.Printf("WARNING: Failed to process record %s: %v\n", record.URI, err)
200200+ slog.Warn("Backfill: Failed to process record %s: %v\n", record.URI, err)
200201 continue
201202 }
202203 recordCount++
···212213213214 // Reconcile deletions - remove records from DB that no longer exist on PDS
214215 if err := b.reconcileDeletions(did, collection, foundManifestDigests, foundTags, foundStars); err != nil {
215215- fmt.Printf("WARNING: Failed to reconcile deletions for %s: %v\n", did, err)
216216+ slog.Warn("Backfill: Failed to reconcile deletions for %s: %v\n", did, err)
216217 }
217218218219 // After processing manifests, clean up orphaned tags (tags pointing to non-existent manifests)
219220 if collection == atproto.ManifestCollection {
220221 if err := db.CleanupOrphanedTags(b.db, did); err != nil {
221221- fmt.Printf("WARNING: Failed to cleanup orphaned tags for %s: %v\n", did, err)
222222+ slog.Warn("Backfill: Failed to cleanup orphaned tags for %s: %v\n", did, err)
222223 }
223224224225 // Reconcile annotations - ensure they come from newest manifest per repository
225226 // This fixes out-of-order backfill where older manifests can overwrite newer annotations
226227 if err := b.reconcileAnnotations(ctx, did, pdsClient); err != nil {
227227- fmt.Printf("WARNING: Failed to reconcile annotations for %s: %v\n", did, err)
228228+ slog.Warn("Backfill: Failed to reconcile annotations for %s: %v\n", did, err)
228229 }
229230 }
230231···249250 // Log deletions
250251 deleted := len(dbDigests) - len(foundManifestDigests)
251252 if deleted > 0 {
252252- fmt.Printf("Backfill: Deleted %d orphaned manifests for %s\n", deleted, did)
253253+ slog.Info("Backfill: Deleted %d orphaned manifests for %s\n", deleted, did)
253254 }
254255255256 case atproto.TagCollection:
···267268 // Log deletions
268269 deleted := len(dbTags) - len(foundTags)
269270 if deleted > 0 {
270270- fmt.Printf("Backfill: Deleted %d orphaned tags for %s\n", deleted, did)
271271+ slog.Info("Backfill: Deleted %d orphaned tags for %s\n", deleted, did)
271272 }
272273273274 case atproto.StarCollection:
···342343343344 // Retry on connection errors (hold service might still be starting)
344345 if attempt < maxRetries && strings.Contains(err.Error(), "connection refused") {
345345- fmt.Printf("Backfill: Hold not ready (attempt %d/%d), retrying in 2s...\n", attempt, maxRetries)
346346+ slog.Info("Backfill: Hold not ready (attempt %d/%d), retrying in 2s...\n", attempt, maxRetries)
346347 time.Sleep(2 * time.Second)
347348 continue
348349 }
···364365 return fmt.Errorf("failed to cache captain record: %w", err)
365366 }
366367367367- fmt.Printf("Backfill: Cached captain record for hold %s (owner: %s)\n", holdDID, captainRecord.OwnerDID)
368368+ slog.Info("Backfill: Cached captain record for hold %s (owner: %s)\n", holdDID, captainRecord.OwnerDID)
368369 return nil
369370}
370371···416417 if err != nil {
417418 fmt.Printf("WARNING [backfill]: Failed to reconcile annotations for %s/%s: %v\n", did, repo, err)
418419 } else {
419419- fmt.Printf("Backfill: Reconciled annotations for %s/%s from newest manifest %s\n", did, repo, newestManifest.Digest)
420420+ slog.Info("Backfill: Reconciled annotations for %s/%s from newest manifest %s\n", did, repo, newestManifest.Digest)
420421 }
421422 }
422423 }
+3-2
pkg/appview/jetstream/processor.go
···55 "database/sql"
66 "encoding/json"
77 "fmt"
88+ "log/slog"
89 "strings"
910 "time"
1011···111112 publicClient := atproto.NewClient("https://public.api.bsky.app", "", "")
112113 profile, err := publicClient.GetActorProfile(ctx, resolvedDID)
113114 if err != nil {
114114- fmt.Printf("WARNING [processor]: Failed to fetch profile for DID %s: %v\n", resolvedDID, err)
115115+ slog.Warn("Failed to fetch profile", "component", "processor", "did", resolvedDID, "error", err)
115116 // Continue without avatar
116117 } else {
117118 avatar = profile.Avatar
···307308 // Convert hold URL/DID to canonical DID
308309 holdDID := atproto.ResolveHoldDIDFromURL(profileRecord.DefaultHold)
309310 if holdDID == "" {
310310- fmt.Printf("WARNING [processor]: Invalid hold reference in profile for %s: %s\n", did, profileRecord.DefaultHold)
311311+ slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", profileRecord.DefaultHold)
311312 return nil
312313 }
313314
+13-12
pkg/appview/jetstream/worker.go
···88 "database/sql"
99 "encoding/json"
1010 "fmt"
1111+ "log/slog"
1112 "net/url"
1213 "sync"
1314 "time"
···8990 // Calculate lag (cursor is in microseconds)
9091 now := time.Now().UnixMicro()
9192 lagSeconds := float64(now-w.startCursor) / 1_000_000.0
9292- fmt.Printf("Jetstream: Starting from cursor %d (%.1f seconds behind live)\n", w.startCursor, lagSeconds)
9393+ slog.Info("Jetstream: Starting from cursor %d (%.1f seconds behind live)\n", w.startCursor, lagSeconds)
9394 }
94959596 // Disable compression for now to debug
···138139 }
139140 defer decoder.Close()
140141141141- fmt.Println("Connected to Jetstream, listening for events...")
142142+ slog.Info("Connected to Jetstream, listening for events...")
142143143144 // Start heartbeat ticker to show Jetstream is alive
144145 heartbeatTicker := time.NewTicker(30 * time.Second)
···169170170171 // If no pong for 60 seconds, connection is likely dead
171172 if timeSinceLastPong > 60*time.Second {
172172- fmt.Printf("Jetstream: No pong received for %s (sent %d pings, got %d pongs), closing connection\n",
173173+ slog.Info("Jetstream: No pong received for %s (sent %d pings, got %d pongs), closing connection\n",
173174 timeSinceLastPong, pingsTotal, pongsTotal)
174175 conn.Close()
175176 return
···178179 // Send ping with write deadline
179180 conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
180181 if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
181181- fmt.Printf("Jetstream: Failed to send ping: %v\n", err)
182182+ slog.Info("Jetstream: Failed to send ping: %v\n", err)
182183 conn.Close()
183184 return
184185 }
···200201 return ctx.Err()
201202 case <-heartbeatTicker.C:
202203 elapsed := time.Since(lastHeartbeat)
203203- fmt.Printf("Jetstream: Alive (processed %d events in last %.0fs)\n", eventCount, elapsed.Seconds())
204204+ slog.Info("Jetstream: Alive (processed %d events in last %.0fs)\n", eventCount, elapsed.Seconds())
204205 eventCount = 0
205206 lastHeartbeat = time.Now()
206207 default:
···236237 }
237238238239 // Log detailed context about the failure
239239- fmt.Printf("Jetstream: Connection closed after %s\n", connDuration)
240240+ slog.Info("Jetstream: Connection closed after %s\n", connDuration)
240241 fmt.Printf(" - Events in last 30s: %d\n", eventCount)
241242 fmt.Printf(" - Time since last event: %s\n", timeSinceLastEvent)
242243 fmt.Printf(" - Ping/Pong: %d/%d (%.1f%% success)\n", pongsTotal, pingsTotal, pongRate)
···312313 // Process based on collection
313314 switch commit.Collection {
314315 case atproto.ManifestCollection:
315315- fmt.Printf("Jetstream: Processing manifest event: did=%s, operation=%s, rkey=%s\n",
316316+ slog.Info("Jetstream: Processing manifest event: did=%s, operation=%s, rkey=%s\n",
316317 commit.DID, commit.Operation, commit.RKey)
317318 return w.processManifest(commit)
318319 case atproto.TagCollection:
319319- fmt.Printf("Jetstream: Processing tag event: did=%s, operation=%s, rkey=%s\n",
320320+ slog.Info("Jetstream: Processing tag event: did=%s, operation=%s, rkey=%s\n",
320321 commit.DID, commit.Operation, commit.RKey)
321322 return w.processTag(commit)
322323 case atproto.StarCollection:
323323- fmt.Printf("Jetstream: Processing star event: did=%s, operation=%s, rkey=%s\n",
324324+ slog.Info("Jetstream: Processing star event: did=%s, operation=%s, rkey=%s\n",
324325 commit.DID, commit.Operation, commit.RKey)
325326 return w.processStar(commit)
326327 default:
···372373 if commit.Operation == "delete" {
373374 // Delete tag - decode rkey back to repository and tag
374375 repo, tag := atproto.RKeyToRepositoryTag(commit.RKey)
375375- fmt.Printf("Jetstream: Deleting tag: did=%s, repository=%s, tag=%s (from rkey=%s)\n",
376376+ slog.Info("Jetstream: Deleting tag: did=%s, repository=%s, tag=%s (from rkey=%s)\n",
376377 commit.DID, repo, tag, commit.RKey)
377378 if err := db.DeleteTag(w.db, commit.DID, repo, tag); err != nil {
378378- fmt.Printf("Jetstream: ERROR deleting tag: %v\n", err)
379379+ slog.Info("Jetstream: ERROR deleting tag: %v\n", err)
379380 return err
380381 }
381381- fmt.Printf("Jetstream: Successfully deleted tag: did=%s, repository=%s, tag=%s\n",
382382+ slog.Info("Jetstream: Successfully deleted tag: did=%s, repository=%s, tag=%s\n",
382383 commit.DID, repo, tag)
383384 return nil
384385 }
+10-9
pkg/appview/middleware/registry.go
···44 "context"
55 "encoding/json"
66 "fmt"
77+ "log/slog"
78 "strings"
89 "sync"
910···160161 return nil, fmt.Errorf("no PDS endpoint found for %s", identityStr)
161162 }
162163163163- fmt.Printf("DEBUG [registry/middleware]: Resolved identity: did=%s, pds=%s, handle=%s\n", did, pdsEndpoint, handle)
164164+ slog.Debug("Resolved identity", "component", "registry/middleware", "did", did, "pds", pdsEndpoint, "handle", handle)
164165165166 // Query for hold DID - either user's hold or default hold service
166167 holdDID := nr.findHoldDID(ctx, did, pdsEndpoint)
···174175 // This ensures users can push immediately after docker login without web sign-in
175176 // EnsureCrewMembership is best-effort and logs errors without failing the request
176177 if holdDID != "" && nr.refresher != nil {
177177- fmt.Printf("DEBUG [registry/middleware]: Auto-reconciling crew membership for DID=%s at hold=%s\n", did, holdDID)
178178+ slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
178179 client := atproto.NewClient(pdsEndpoint, did, "")
179180 storage.EnsureCrewMembership(ctx, client, nr.refresher, holdDID)
180181 }
···185186 var err error
186187 serviceToken, err = token.GetOrFetchServiceToken(ctx, nr.refresher, did, holdDID, pdsEndpoint)
187188 if err != nil {
188188- fmt.Printf("ERROR [registry/middleware]: Failed to get service token for DID=%s: %v\n", did, err)
189189- fmt.Printf("ERROR [registry/middleware]: User needs to re-authenticate via credential helper\n")
189189+ slog.Error("Failed to get service token", "component", "registry/middleware", "did", did, "error", err)
190190+ slog.Error("User needs to re-authenticate via credential helper", "component", "registry/middleware")
190191 return nil, nr.authErrorMessage("OAuth session expired")
191192 }
192193 }
···219220 apiClient := session.APIClient()
220221 atprotoClient = atproto.NewClientWithIndigoClient(pdsEndpoint, did, apiClient)
221222 } else {
222222- fmt.Printf("DEBUG [registry/middleware]: OAuth refresh failed for DID=%s: %v, falling back to Basic Auth\n", did, err)
223223+ slog.Debug("OAuth refresh failed, falling back to Basic Auth", "component", "registry/middleware", "did", did, "error", err)
223224 }
224225 }
225226···227228 if atprotoClient == nil {
228229 accessToken, ok := auth.GetGlobalTokenCache().Get(did)
229230 if !ok {
230230- fmt.Printf("DEBUG [registry/middleware]: No cached access token found for DID=%s (neither OAuth nor Basic Auth)\n", did)
231231+ slog.Debug("No cached access token found (neither OAuth nor Basic Auth)", "component", "registry/middleware", "did", did)
231232 accessToken = "" // Will fail on manifest push, but let it try
232233 } else {
233233- fmt.Printf("DEBUG [registry/middleware]: Using Basic Auth access token for DID=%s (length=%d)\n", did, len(accessToken))
234234+ slog.Debug("Using Basic Auth access token", "component", "registry/middleware", "did", did, "token_length", len(accessToken))
234235 }
235236 atprotoClient = atproto.NewClient(pdsEndpoint, did, accessToken)
236237 }
···304305 profile, err := storage.GetProfile(ctx, client)
305306 if err != nil {
306307 // Error reading profile (not a 404) - log and continue
307307- fmt.Printf("WARNING: failed to read profile for %s: %v\n", did, err)
308308+ slog.Warn("Failed to read profile", "did", did, "error", err)
308309 }
309310310311 if profile != nil && profile.DefaultHold != "" {
···314315 if nr.isHoldReachable(ctx, profile.DefaultHold) {
315316 return profile.DefaultHold
316317 }
317317- fmt.Printf("DEBUG [registry/middleware/testmode]: User's defaultHold %s unreachable, falling back to default\n", profile.DefaultHold)
318318+ slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", profile.DefaultHold)
318319 return nr.defaultHoldDID
319320 }
320321 return profile.DefaultHold
+2-3
pkg/appview/readme/cache.go
···77import (
88 "context"
99 "database/sql"
1010- "fmt"
1010+ "log/slog"
1111 "time"
1212)
1313···5454 // Store in cache
5555 if err := c.storeInDB(readmeURL, html); err != nil {
5656 // Log error but don't fail - we have the content
5757- // In production, you'd use proper logging here
5858- fmt.Printf("Failed to cache README: %v\n", err)
5757+ slog.Warn("Failed to cache README", "error", err)
5958 }
60596160 return html, nil
+11-10
pkg/appview/storage/manifest_store.go
···77 "errors"
88 "fmt"
99 "io"
1010+ "log/slog"
1011 "maps"
1112 "net/http"
1213 "strings"
···8889 if s.ctx.Database != nil {
8990 go func() {
9091 if err := s.ctx.Database.IncrementPullCount(s.ctx.DID, s.ctx.Repository); err != nil {
9191- fmt.Printf("WARNING: Failed to increment pull count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err)
9292+ slog.Warn("Failed to increment pull count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err)
9293 }
9394 }()
9495 }
···143144 labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest)
144145 if err != nil {
145146 // Log error but don't fail the push - labels are optional
146146- fmt.Printf("WARNING: Failed to extract config labels: %v\n", err)
147147+ slog.Warn("Failed to extract config labels", "error", err)
147148 } else {
148149 // Initialize annotations map if needed
149150 if manifestRecord.Annotations == nil {
···153154 // Copy labels to annotations (Dockerfile LABELs → manifest annotations)
154155 maps.Copy(manifestRecord.Annotations, labels)
155156156156- fmt.Printf("DEBUG: Extracted %d labels from config blob\n", len(labels))
157157+ slog.Debug("Extracted labels from config blob", "count", len(labels))
157158 }
158159 }
159160···168169 if s.ctx.Database != nil {
169170 go func() {
170171 if err := s.ctx.Database.IncrementPushCount(s.ctx.DID, s.ctx.Repository); err != nil {
171171- fmt.Printf("WARNING: Failed to increment push count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err)
172172+ slog.Warn("Failed to increment push count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err)
172173 }
173174 }()
174175 }
···192193 if tag != "" && s.ctx.ServiceToken != "" && s.ctx.Handle != "" {
193194 go func() {
194195 if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String()); err != nil {
195195- fmt.Printf("WARNING: Failed to notify hold about manifest: %v\n", err)
196196+ slog.Warn("Failed to notify hold about manifest", "error", err)
196197 }
197198 }()
198199 }
···362363 // Parse response (optional logging)
363364 var notifyResp map[string]any
364365 if err := json.NewDecoder(resp.Body).Decode(¬ifyResp); err == nil {
365365- fmt.Printf("INFO: Hold notification successful for %s:%s - %+v\n", s.ctx.Repository, tag, notifyResp)
366366+ slog.Info("Hold notification successful", "repository", s.ctx.Repository, "tag", tag, "response", notifyResp)
366367 }
367368368369 return nil
···386387 return
387388 }
388389389389- fmt.Printf("INFO: Refreshing README cache for %s/%s from %s\n", s.ctx.DID, s.ctx.Repository, readmeURL)
390390+ slog.Info("Refreshing README cache", "did", s.ctx.DID, "repository", s.ctx.Repository, "url", readmeURL)
390391391392 // Invalidate the cached entry first
392393 if err := s.ctx.ReadmeCache.Invalidate(readmeURL); err != nil {
393393- fmt.Printf("WARNING: Failed to invalidate README cache for %s: %v\n", readmeURL, err)
394394+ slog.Warn("Failed to invalidate README cache", "url", readmeURL, "error", err)
394395 // Continue anyway - Get() will still fetch fresh content
395396 }
396397···401402402403 _, err := s.ctx.ReadmeCache.Get(ctxWithTimeout, readmeURL)
403404 if err != nil {
404404- fmt.Printf("WARNING: Failed to refresh README cache for %s: %v\n", readmeURL, err)
405405+ slog.Warn("Failed to refresh README cache", "url", readmeURL, "error", err)
405406 // Not a critical error - cache will be refreshed on next page view
406407 return
407408 }
408409409409- fmt.Printf("INFO: README cache refreshed successfully for %s\n", readmeURL)
410410+ slog.Info("README cache refreshed successfully", "url", readmeURL)
410411}
+5-4
pkg/appview/storage/profile.go
···55 "encoding/json"
66 "errors"
77 "fmt"
88+ "log/slog"
89 "sync"
910 "time"
1011···4647 return fmt.Errorf("failed to create sailor profile: %w", err)
4748 }
48494949- fmt.Printf("DEBUG [profile]: Created sailor profile with defaultHold=%s\n", normalizedDID)
5050+ slog.Debug("Created sailor profile", "component", "profile", "default_hold", normalizedDID)
5051 return nil
5152}
5253···9596 // Update the profile on the PDS
9697 profile.UpdatedAt = time.Now()
9798 if err := UpdateProfile(ctx, client, &profile); err != nil {
9898- fmt.Printf("WARNING [profile]: Failed to persist URL-to-DID migration for %s: %v\n", did, err)
9999+ slog.Warn("Failed to persist URL-to-DID migration", "component", "profile", "did", did, "error", err)
99100 } else {
100100- fmt.Printf("DEBUG [profile]: Persisted defaultHold migration to DID: %s (for DID: %s)\n", migratedDID, did)
101101+ slog.Debug("Persisted defaultHold migration to DID", "component", "profile", "migrated_did", migratedDID, "did", did)
101102 }
102103 }()
103104 }
···113114 // This ensures we always store DIDs, even if user provides a URL
114115 if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) {
115116 profile.DefaultHold = atproto.ResolveHoldDIDFromURL(profile.DefaultHold)
116116- fmt.Printf("DEBUG [profile]: Normalized defaultHold to DID: %s\n", profile.DefaultHold)
117117+ slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", profile.DefaultHold)
117118 }
118119119120 _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, ProfileRKey, profile)
+19-19
pkg/appview/storage/proxy_blob_store.go
···66 "encoding/json"
77 "fmt"
88 "io"
99+ "log/slog"
910 "net/http"
1011 "sync"
1112 "time"
···4142 // Resolve DID to URL once at construction time
4243 holdURL := atproto.ResolveHoldURL(ctx.HoldDID)
43444444- fmt.Printf("DEBUG [proxy_blob_store]: NewProxyBlobStore created with holdDID=%s, holdURL=%s, userDID=%s, repo=%s\n",
4545- ctx.HoldDID, holdURL, ctx.DID, ctx.Repository)
4545+ slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", ctx.HoldDID, "hold_url", holdURL, "user_did", ctx.DID, "repo", ctx.Repository)
46464747 return &ProxyBlobStore{
4848 ctx: ctx,
···6767 // Middleware fails fast with HTTP 401 if OAuth session is invalid
6868 if p.ctx.ServiceToken == "" {
6969 // Should never happen - middleware validates OAuth before handlers run
7070- fmt.Printf("ERROR [proxy_blob_store]: No service token in context for DID=%s\n", p.ctx.DID)
7070+ slog.Error("No service token in context", "component", "proxy_blob_store", "did", p.ctx.DID)
7171 return nil, fmt.Errorf("no service token available (middleware should have validated)")
7272 }
7373···9999 return nil // No authorization check if authorizer not configured
100100 }
101101102102- fmt.Printf("[checkWriteAccess] Checking write access for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID)
102102+ slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
103103 allowed, err := p.ctx.Authorizer.CheckWriteAccess(ctx, p.ctx.HoldDID, p.ctx.DID)
104104 if err != nil {
105105- fmt.Printf("[checkWriteAccess] Authorization check error: %v\n", err)
105105+ slog.Error("Authorization check error", "component", "proxy_blob_store", "error", err)
106106 return fmt.Errorf("authorization check failed: %w", err)
107107 }
108108 if !allowed {
109109- fmt.Printf("[checkWriteAccess] Write access DENIED for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID)
109109+ slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
110110 return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.HoldDID))
111111 }
112112- fmt.Printf("[checkWriteAccess] Write access ALLOWED for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID)
112112+ slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
113113 return nil
114114}
115115···243243 // Use Create() flow for all uploads (goes through multipart XRPC endpoints)
244244 writer, err := p.Create(ctx)
245245 if err != nil {
246246- fmt.Printf("[proxy_blob_store/Put] Failed to create writer: %v\n", err)
246246+ slog.Error("Failed to create writer", "component", "proxy_blob_store/Put", "error", err)
247247 return distribution.Descriptor{}, err
248248 }
249249250250 // Write the content
251251 if _, err := writer.Write(content); err != nil {
252252 writer.Cancel(ctx)
253253- fmt.Printf("[proxy_blob_store/Put] Failed to write content: %v\n", err)
253253+ slog.Error("Failed to write content", "component", "proxy_blob_store/Put", "error", err)
254254 return distribution.Descriptor{}, err
255255 }
256256···261261 MediaType: mediaType,
262262 })
263263 if err != nil {
264264- fmt.Printf("[proxy_blob_store/Put] Failed to commit: %v\n", err)
264264+ slog.Error("Failed to commit", "component", "proxy_blob_store/Put", "error", err)
265265 return distribution.Descriptor{}, err
266266 }
267267268268- fmt.Printf("[proxy_blob_store/Put] Upload successful: digest=%s, size=%d\n", dgst, len(content))
268268+ slog.Debug("Upload successful", "component", "proxy_blob_store/Put", "digest", dgst, "size", len(content))
269269 return desc, nil
270270}
271271···393393 return "", fmt.Errorf("hold service returned empty URL")
394394 }
395395396396- fmt.Printf("DEBUG [proxy_blob_store]: Got presigned HEAD URL from hold service: %s\n", result.URL)
396396+ slog.Debug("Got presigned HEAD URL from hold service", "component", "proxy_blob_store", "url", result.URL)
397397 return result.URL, nil
398398}
399399···676676 ETag: etag,
677677 })
678678679679- fmt.Printf("[flushPart] Part %d uploaded successfully: ETag=%s\n", w.partNumber, etag)
679679+ slog.Debug("Part uploaded successfully", "component", "proxy_blob_store/flushPart", "part_number", w.partNumber, "etag", etag)
680680681681 // Reset buffer and increment part number
682682 w.buffer.Reset()
···734734735735 // Flush any remaining buffered data
736736 if w.buffer.Len() > 0 {
737737- fmt.Printf("[Commit] Flushing final buffer: %d bytes\n", w.buffer.Len())
737737+ slog.Debug("Flushing final buffer", "component", "proxy_blob_store/Commit", "bytes", w.buffer.Len())
738738 if err := w.flushPart(); err != nil {
739739 // Try to abort multipart on error
740740 tempDigest := fmt.Sprintf("uploads/temp-%s", w.id)
···745745746746 // Complete multipart upload - XRPC complete action handles move internally
747747 // Send the real digest (not tempDigest) so hold can move temp → final location
748748- fmt.Printf("🔒 [Commit] Completing multipart upload: uploadID=%s, parts=%d, digest=%s\n", w.uploadID, len(w.parts), desc.Digest)
748748+ slog.Info("Completing multipart upload", "component", "proxy_blob_store/Commit", "upload_id", w.uploadID, "parts", len(w.parts), "digest", desc.Digest)
749749 if err := w.store.completeMultipartUpload(ctx, desc.Digest.String(), w.uploadID, w.parts); err != nil {
750750 return distribution.Descriptor{}, fmt.Errorf("failed to complete multipart upload: %w", err)
751751 }
752752753753- fmt.Printf("[Commit] Upload completed successfully: digest=%s, size=%d, parts=%d\n", desc.Digest, w.size, len(w.parts))
753753+ slog.Info("Upload completed successfully", "component", "proxy_blob_store/Commit", "digest", desc.Digest, "size", w.size, "parts", len(w.parts))
754754755755 return distribution.Descriptor{
756756 Digest: desc.Digest,
···763763func (w *ProxyBlobWriter) Cancel(ctx context.Context) error {
764764 w.closed = true
765765766766- fmt.Printf("[Cancel] Cancelling upload: id=%s\n", w.id)
766766+ slog.Debug("Cancelling upload", "component", "proxy_blob_store/Cancel", "id", w.id)
767767768768 // Remove from global uploads map
769769 globalUploadsMu.Lock()
···773773 // Abort multipart upload
774774 tempDigest := fmt.Sprintf("uploads/temp-%s", w.id)
775775 if err := w.store.abortMultipartUpload(ctx, tempDigest, w.uploadID); err != nil {
776776- fmt.Printf("⚠️ [Cancel] Failed to abort multipart upload: %v\n", err)
776776+ slog.Warn("Failed to abort multipart upload", "component", "proxy_blob_store/Cancel", "error", err)
777777 // Continue anyway - we want to mark upload as cancelled
778778 }
779779780780- fmt.Printf("[Cancel] Upload cancelled: id=%s\n", w.id)
780780+ slog.Debug("Upload cancelled", "component", "proxy_blob_store/Cancel", "id", w.id)
781781 return nil
782782}
783783
+5-9
pkg/appview/storage/routing_repository.go
···6677import (
88 "context"
99- "fmt"
99+ "log/slog"
1010 "time"
11111212 "github.com/distribution/distribution/v3"
···4646 if holdDID := r.manifestStore.GetLastFetchedHoldDID(); holdDID != "" {
4747 // Cache for 10 minutes - should cover typical pull operations
4848 GetGlobalHoldCache().Set(r.Ctx.DID, r.Ctx.Repository, holdDID, 10*time.Minute)
4949- fmt.Printf("DEBUG [storage/routing]: Cached hold DID: did=%s, repo=%s, hold=%s\n",
5050- r.Ctx.DID, r.Ctx.Repository, holdDID)
4949+ slog.Debug("Cached hold DID", "component", "storage/routing", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID)
5150 }
5251 }()
5352···5958func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
6059 // Return cached blob store if available
6160 if r.blobStore != nil {
6262- fmt.Printf("DEBUG [storage/blobs]: Returning cached blob store for did=%s, repo=%s\n",
6363- r.Ctx.DID, r.Ctx.Repository)
6161+ slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository)
6462 return r.blobStore
6563 }
6664···7169 if cachedHoldDID, ok := GetGlobalHoldCache().Get(r.Ctx.DID, r.Ctx.Repository); ok {
7270 // Use cached hold DID from manifest
7371 holdDID = cachedHoldDID
7474- fmt.Printf("DEBUG [storage/blobs]: Using cached hold from manifest: did=%s, repo=%s, hold=%s\n",
7575- r.Ctx.DID, r.Ctx.Repository, cachedHoldDID)
7272+ slog.Debug("Using cached hold from manifest", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", cachedHoldDID)
7673 } else {
7774 // No cached hold, use discovery-based DID (for push or first pull)
7878- fmt.Printf("DEBUG [storage/blobs]: Using discovery-based hold: did=%s, repo=%s, hold=%s\n",
7979- r.Ctx.DID, r.Ctx.Repository, holdDID)
7575+ slog.Debug("Using discovery-based hold", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID)
8076 }
81778278 if holdDID == "" {