A container registry that uses the AT Protocol for manifest storage and S3 for blob storage.
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

slog slog slog slog slog

+155 -151
+2 -1
pkg/appview/config.go
··· 9 9 "crypto/rand" 10 10 "encoding/hex" 11 11 "fmt" 12 + "log/slog" 12 13 "net/url" 13 14 "os" 14 15 "strconv" ··· 339 340 340 341 parsed, err := time.ParseDuration(envVal) 341 342 if err != nil { 342 - fmt.Printf("Warning: Invalid %s '%s', using default %s\n", envKey, envVal, defaultValue) 343 + slog.Warn("Invalid duration, using default", "env_key", envKey, "env_value", envVal, "default", defaultValue) 343 344 return defaultValue 344 345 } 345 346
+6 -6
pkg/appview/db/readonly.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 - "fmt" 6 + "log/slog" 7 7 "os" 8 8 "path/filepath" 9 9 "time" ··· 35 35 action == sqlite3.SQLITE_INSERT || action == sqlite3.SQLITE_DELETE || 36 36 action == sqlite3.SQLITE_SELECT { 37 37 if sensitiveTables[tableName] { 38 - fmt.Printf("SECURITY: Blocked access to sensitive table '%s' (action=%d)\n", tableName, action) 38 + slog.Warn("Blocked access to sensitive table", "component", "SECURITY", "table", tableName, "action", action) 39 39 return sqlite3.SQLITE_DENY 40 40 } 41 41 } ··· 65 65 // Ensure directory exists 66 66 dbDir := filepath.Dir(dbPath) 67 67 if err := os.MkdirAll(dbDir, 0700); err != nil { 68 - fmt.Printf("Warning: Failed to create UI database directory: %v\n", err) 68 + slog.Warn("Failed to create UI database directory", "error", err) 69 69 return nil, nil, nil 70 70 } 71 71 72 72 // Initialize read-write database (for writes and auth operations) 73 73 database, err := InitDB(dbPath) 74 74 if err != nil { 75 - fmt.Printf("Warning: Failed to initialize UI database: %v\n", err) 75 + slog.Warn("Failed to initialize UI database", "error", err) 76 76 return nil, nil, nil 77 77 } 78 78 ··· 81 81 // This prevents accidental writes and blocks access to sensitive tables even if SQL injection occurs 82 82 readOnlyDB, err := sql.Open(ReadOnlyDriverName, "file:"+dbPath+"?mode=ro") 83 83 if err != nil { 84 - fmt.Printf("Warning: Failed to open read-only database connection: %v\n", err) 84 + slog.Warn("Failed to open read-only database connection", "error", err) 85 85 return nil, nil, nil 86 86 } 87 87 88 - fmt.Printf("UI database (readonly) initialized at %s\n", dbPath) 88 + slog.Info("UI database initialized", "mode", "readonly", "path", dbPath) 89 89 90 90 // Create SQLite-backed session store 91 91 sessionStore := NewSessionStore(database)
+3 -2
pkg/appview/db/schema.go
··· 9 9 "embed" 10 10 "fmt" 11 11 "io/fs" 12 + "log/slog" 12 13 "path/filepath" 13 14 "sort" 14 15 "strconv" ··· 84 85 } 85 86 86 87 // Apply migration 87 - fmt.Printf("Applying migration %d: %s\n%s\n", m.Version, m.Name, m.Description) 88 + slog.Info("Applying migration", "version", m.Version, "name", m.Name, "description", m.Description) 88 89 if _, err := db.Exec(m.Query); err != nil { 89 90 return fmt.Errorf("failed to apply migration %d (%s): %w", m.Version, m.Name, err) 90 91 } ··· 94 95 return fmt.Errorf("failed to record migration %d: %w", m.Version, err) 95 96 } 96 97 97 - fmt.Printf("Migration %d applied successfully\n", m.Version) 98 + slog.Info("Migration applied successfully", "version", m.Version) 98 99 } 99 100 100 101 return nil
+8 -7
pkg/appview/db/session_store.go
··· 6 6 "database/sql" 7 7 "encoding/base64" 8 8 "fmt" 9 + "log/slog" 9 10 "net/http" 10 11 "time" 11 12 ) ··· 83 84 return nil, false 84 85 } 85 86 if err != nil { 86 - fmt.Printf("Warning: Failed to query session: %v\n", err) 87 + slog.Warn("Failed to query session", "error", err) 87 88 return nil, false 88 89 } 89 90 ··· 124 125 `, id) 125 126 126 127 if err != nil { 127 - fmt.Printf("Warning: Failed to delete session: %v\n", err) 128 + slog.Warn("Failed to delete session", "error", err) 128 129 } 129 130 } 130 131 ··· 136 137 `, did) 137 138 138 139 if err != nil { 139 - fmt.Printf("Warning: Failed to delete sessions for DID %s: %v\n", did, err) 140 + slog.Warn("Failed to delete sessions for DID", "did", did, "error", err) 140 141 return 141 142 } 142 143 143 144 deleted, _ := result.RowsAffected() 144 145 if deleted > 0 { 145 - fmt.Printf("Deleted %d UI session(s) for DID %s due to OAuth failure\n", deleted, did) 146 + slog.Info("Deleted UI sessions for DID due to OAuth failure", "count", deleted, "did", did) 146 147 } 147 148 } 148 149 ··· 154 155 `) 155 156 156 157 if err != nil { 157 - fmt.Printf("Warning: Failed to cleanup sessions: %v\n", err) 158 + slog.Warn("Failed to cleanup sessions", "error", err) 158 159 return 159 160 } 160 161 161 162 deleted, _ := result.RowsAffected() 162 163 if deleted > 0 { 163 - fmt.Printf("Cleaned up %d expired UI sessions\n", deleted) 164 + slog.Info("Cleaned up expired UI sessions", "count", deleted) 164 165 } 165 166 } 166 167 ··· 177 178 178 179 deleted, _ := result.RowsAffected() 179 180 if deleted > 0 { 180 - fmt.Printf("Cleaned up %d expired UI sessions\n", deleted) 181 + slog.Info("Cleaned up expired UI sessions", "count", deleted) 181 182 } 182 183 183 184 return nil
+14 -14
pkg/appview/handlers/api.go
··· 6 6 "encoding/json" 7 7 "errors" 8 8 "fmt" 9 - "log" 9 + "log/slog" 10 10 "net/http" 11 11 12 12 "atcr.io/pkg/appview/db" ··· 41 41 // Resolve owner's handle to DID 42 42 ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle) 43 43 if err != nil { 44 - log.Printf("StarRepository: Failed to resolve handle %s: %v", handle, err) 44 + slog.Warn("Failed to resolve handle for star", "handle", handle, "error", err) 45 45 http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest) 46 46 return 47 47 } 48 48 49 49 // Get OAuth session for the authenticated user 50 - log.Printf("StarRepository: Getting OAuth session for user DID %s", user.DID) 50 + slog.Debug("Getting OAuth session for star", "user_did", user.DID) 51 51 session, err := h.Refresher.GetSession(r.Context(), user.DID) 52 52 if err != nil { 53 - log.Printf("StarRepository: Failed to get OAuth session for %s: %v", user.DID, err) 53 + slog.Warn("Failed to get OAuth session for star", "user_did", user.DID, "error", err) 54 54 http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized) 55 55 return 56 56 } ··· 66 66 // Write star record to user's PDS 67 67 _, err = pdsClient.PutRecord(r.Context(), atproto.StarCollection, rkey, starRecord) 68 68 if err != nil { 69 - log.Printf("StarRepository: Failed to create star record: %v", err) 69 + slog.Error("Failed to create star record", "error", err) 70 70 http.Error(w, fmt.Sprintf("Failed to create star: %v", err), http.StatusInternalServerError) 71 71 return 72 72 } ··· 100 100 // Resolve owner's handle to DID 101 101 ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle) 102 102 if err != nil { 103 - log.Printf("UnstarRepository: Failed to resolve handle %s: %v", handle, err) 103 + slog.Warn("Failed to resolve handle for unstar", "handle", handle, "error", err) 104 104 http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest) 105 105 return 106 106 } 107 107 108 108 // Get OAuth session for the authenticated user 109 - log.Printf("UnstarRepository: Getting OAuth session for user DID %s", user.DID) 109 + slog.Debug("Getting OAuth session for unstar", "user_did", user.DID) 110 110 session, err := h.Refresher.GetSession(r.Context(), user.DID) 111 111 if err != nil { 112 - log.Printf("UnstarRepository: Failed to get OAuth session for %s: %v", user.DID, err) 112 + slog.Warn("Failed to get OAuth session for unstar", "user_did", user.DID, "error", err) 113 113 http.Error(w, fmt.Sprintf("Failed to get OAuth session: %v", err), http.StatusUnauthorized) 114 114 return 115 115 } ··· 120 120 121 121 // Delete star record from user's PDS 122 122 rkey := atproto.StarRecordKey(ownerDID, repository) 123 - log.Printf("UnstarRepository: Deleting star record for %s/%s (rkey: %s)", handle, repository, rkey) 123 + slog.Debug("Deleting star record", "handle", handle, "repository", repository, "rkey", rkey) 124 124 err = pdsClient.DeleteRecord(r.Context(), atproto.StarCollection, rkey) 125 125 if err != nil { 126 126 // If record doesn't exist, still return success (idempotent) 127 127 if !errors.Is(err, atproto.ErrRecordNotFound) { 128 - log.Printf("UnstarRepository: Failed to delete star record: %v", err) 128 + slog.Error("Failed to delete star record", "error", err) 129 129 http.Error(w, fmt.Sprintf("Failed to delete star: %v", err), http.StatusInternalServerError) 130 130 return 131 131 } 132 - log.Printf("UnstarRepository: Star record not found (already unstarred)") 132 + slog.Debug("Star record not found, already unstarred") 133 133 } 134 134 135 135 // Return success ··· 162 162 // Resolve owner's handle to DID 163 163 ownerDID, err := resolveIdentityToDID(r.Context(), h.Directory, handle) 164 164 if err != nil { 165 - log.Printf("CheckStar: Failed to resolve handle %s: %v", handle, err) 165 + slog.Warn("Failed to resolve handle for check star", "handle", handle, "error", err) 166 166 http.Error(w, fmt.Sprintf("Failed to resolve handle: %v", err), http.StatusBadRequest) 167 167 return 168 168 } ··· 170 170 // Get OAuth session for the authenticated user 171 171 session, err := h.Refresher.GetSession(r.Context(), user.DID) 172 172 if err != nil { 173 - log.Printf("CheckStar: Failed to get OAuth session for %s: %v", user.DID, err) 173 + slog.Debug("Failed to get OAuth session for check star", "user_did", user.DID, "error", err) 174 174 // No OAuth session - return not starred 175 175 w.Header().Set("Content-Type", "application/json") 176 176 json.NewEncoder(w).Encode(map[string]bool{"starred": false}) ··· 250 250 http.Error(w, "Manifest not found", http.StatusNotFound) 251 251 return 252 252 } 253 - log.Printf("GetManifestDetail error: %v", err) 253 + slog.Error("Failed to get manifest detail", "error", err) 254 254 http.Error(w, "Failed to fetch manifest", http.StatusInternalServerError) 255 255 return 256 256 }
+2 -2
pkg/appview/handlers/auth.go
··· 1 1 package handlers 2 2 3 3 import ( 4 - "fmt" 5 4 "html/template" 5 + "log/slog" 6 6 "net/http" 7 7 ) 8 8 ··· 13 13 14 14 func (h *LoginHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 15 15 returnTo := r.URL.Query().Get("return_to") 16 - fmt.Printf("DEBUG [login]: GET request. return_to param=%s, full query=%s\n", returnTo, r.URL.RawQuery) 16 + slog.Debug("Login GET request", "return_to", returnTo, "query", r.URL.RawQuery) 17 17 if returnTo == "" { 18 18 returnTo = "/" 19 19 }
+7 -7
pkg/appview/handlers/logout.go
··· 1 1 package handlers 2 2 3 3 import ( 4 - "fmt" 4 + "log/slog" 5 5 "net/http" 6 6 7 7 "atcr.io/pkg/appview/db" ··· 32 32 // Parse DID for OAuth logout 33 33 did, err := syntax.ParseDID(uiSession.DID) 34 34 if err != nil { 35 - fmt.Printf("WARNING [logout]: Failed to parse DID %s: %v\n", uiSession.DID, err) 35 + slog.Warn("Failed to parse DID for logout", "component", "logout", "did", uiSession.DID, "error", err) 36 36 } else { 37 37 // Attempt to revoke OAuth tokens on PDS side 38 38 if uiSession.OAuthSessionID != "" { 39 39 // Call indigo's Logout to revoke tokens on PDS 40 40 if err := h.OAuthApp.GetClientApp().Logout(r.Context(), did, uiSession.OAuthSessionID); err != nil { 41 41 // Log error but don't block logout - best effort revocation 42 - fmt.Printf("WARNING [logout]: Failed to revoke OAuth tokens for %s on PDS: %v\n", uiSession.DID, err) 42 + slog.Warn("Failed to revoke OAuth tokens on PDS", "component", "logout", "did", uiSession.DID, "error", err) 43 43 } else { 44 - fmt.Printf("INFO [logout]: Successfully revoked OAuth tokens for %s on PDS\n", uiSession.DID) 44 + slog.Info("Successfully revoked OAuth tokens on PDS", "component", "logout", "did", uiSession.DID) 45 45 } 46 46 47 47 // Invalidate refresher cache to clear local access tokens 48 48 h.Refresher.InvalidateSession(uiSession.DID) 49 - fmt.Printf("INFO [logout]: Invalidated local OAuth cache for %s\n", uiSession.DID) 49 + slog.Info("Invalidated local OAuth cache", "component", "logout", "did", uiSession.DID) 50 50 51 51 // Delete OAuth session from database (cleanup, might already be done by Logout) 52 52 if err := h.OAuthStore.DeleteSession(r.Context(), did, uiSession.OAuthSessionID); err != nil { 53 - fmt.Printf("WARNING [logout]: Failed to delete OAuth session from database: %v\n", err) 53 + slog.Warn("Failed to delete OAuth session from database", "component", "logout", "error", err) 54 54 } 55 55 } else { 56 - fmt.Printf("WARNING [logout]: No OAuth session ID found for user %s\n", uiSession.DID) 56 + slog.Warn("No OAuth session ID found for user", "component", "logout", "did", uiSession.DID) 57 57 } 58 58 } 59 59 }
+4 -4
pkg/appview/handlers/repository.go
··· 4 4 "context" 5 5 "database/sql" 6 6 "html/template" 7 - "log" 7 + "log/slog" 8 8 "net/http" 9 9 "sync" 10 10 "time" ··· 139 139 // Fetch repository metadata from annotations table 140 140 metadata, err := db.GetRepositoryMetadata(h.DB, owner.DID, repository) 141 141 if err != nil { 142 - log.Printf("Failed to fetch repository metadata: %v", err) 142 + slog.Warn("Failed to fetch repository metadata", "error", err) 143 143 // Continue without metadata on error 144 144 } else { 145 145 repo.Title = metadata["org.opencontainers.image.title"] ··· 155 155 // Fetch star count 156 156 stats, err := db.GetRepositoryStats(h.DB, owner.DID, repository) 157 157 if err != nil { 158 - log.Printf("Failed to fetch repository stats: %v", err) 158 + slog.Warn("Failed to fetch repository stats", "error", err) 159 159 // Continue with zero stats on error 160 160 stats = &db.RepositoryStats{StarCount: 0} 161 161 } ··· 193 193 194 194 html, err := h.ReadmeCache.Get(ctx, repo.ReadmeURL) 195 195 if err != nil { 196 - log.Printf("Failed to fetch README from %s: %v", repo.ReadmeURL, err) 196 + slog.Warn("Failed to fetch README", "url", repo.ReadmeURL, "error", err) 197 197 // Continue without README on error 198 198 } else { 199 199 readmeHTML = template.HTML(html)
+6 -6
pkg/appview/handlers/settings.go
··· 1 1 package handlers 2 2 3 3 import ( 4 - "fmt" 5 4 "html/template" 5 + "log/slog" 6 6 "net/http" 7 7 "time" 8 8 ··· 30 30 session, err := h.Refresher.GetSession(r.Context(), user.DID) 31 31 if err != nil { 32 32 // OAuth session not found or expired - redirect to re-authenticate 33 - fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err) 33 + slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err) 34 34 http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound) 35 35 return 36 36 } ··· 45 45 profile, err := storage.GetProfile(r.Context(), client) 46 46 if err != nil { 47 47 // Error fetching profile - log out user 48 - fmt.Printf("WARNING [settings]: Failed to fetch profile for %s: %v - logging out\n", user.DID, err) 48 + slog.Warn("Failed to fetch profile, logging out", "component", "settings", "did", user.DID, "error", err) 49 49 http.Redirect(w, r, "/auth/logout", http.StatusFound) 50 50 return 51 51 } 52 52 53 53 if profile == nil { 54 54 // Profile doesn't exist yet (404) - user needs to log out and back in to create it 55 - fmt.Printf("WARNING [settings]: Profile doesn't exist for %s - logging out\n", user.DID) 55 + slog.Warn("Profile doesn't exist, logging out", "component", "settings", "did", user.DID) 56 56 http.Redirect(w, r, "/auth/logout", http.StatusFound) 57 57 return 58 58 } 59 59 60 - fmt.Printf("DEBUG [settings]: Fetched profile for %s: defaultHold=%s\n", user.DID, profile.DefaultHold) 60 + slog.Debug("Fetched profile", "component", "settings", "did", user.DID, "default_hold", profile.DefaultHold) 61 61 62 62 data := struct { 63 63 PageData ··· 100 100 session, err := h.Refresher.GetSession(r.Context(), user.DID) 101 101 if err != nil { 102 102 // OAuth session not found or expired - redirect to re-authenticate 103 - fmt.Printf("WARNING [settings]: OAuth session not found for %s: %v - redirecting to login\n", user.DID, err) 103 + slog.Warn("OAuth session not found, redirecting to login", "component", "settings", "did", user.DID, "error", err) 104 104 http.Redirect(w, r, "/auth/oauth/login?return_to=/settings", http.StatusFound) 105 105 return 106 106 }
+15 -15
pkg/appview/holdhealth/worker.go
··· 4 4 "context" 5 5 "database/sql" 6 6 "fmt" 7 - "log" 7 + "log/slog" 8 8 "strings" 9 9 "sync" 10 10 "time" ··· 56 56 go func() { 57 57 defer w.wg.Done() 58 58 59 - log.Println("Hold health worker: Starting background health checks") 59 + slog.Info("Hold health worker starting background health checks") 60 60 61 61 // Wait for services to be ready (Docker startup race condition) 62 62 if w.startupDelay > 0 { 63 - log.Printf("Hold health worker: Waiting %s for services to be ready...", w.startupDelay) 63 + slog.Info("Hold health worker waiting for services to be ready", "delay", w.startupDelay) 64 64 select { 65 65 case <-time.After(w.startupDelay): 66 66 // Continue with initial check 67 67 case <-ctx.Done(): 68 - log.Println("Hold health worker: Context cancelled during startup delay") 68 + slog.Info("Hold health worker context cancelled during startup delay") 69 69 return 70 70 } 71 71 } ··· 76 76 for { 77 77 select { 78 78 case <-ctx.Done(): 79 - log.Println("Hold health worker: Context cancelled, stopping") 79 + slog.Info("Hold health worker context cancelled, stopping") 80 80 return 81 81 case <-w.stopChan: 82 - log.Println("Hold health worker: Stop signal received") 82 + slog.Info("Hold health worker stop signal received") 83 83 return 84 84 case <-w.refreshTicker.C: 85 85 w.refreshAllHolds(ctx) 86 86 case <-w.cleanupTicker.C: 87 - log.Println("Hold health worker: Running cache cleanup") 87 + slog.Info("Hold health worker running cache cleanup") 88 88 w.checker.Cleanup() 89 89 } 90 90 } ··· 97 97 w.refreshTicker.Stop() 98 98 w.cleanupTicker.Stop() 99 99 w.wg.Wait() 100 - log.Println("Hold health worker: Stopped") 100 + slog.Info("Hold health worker stopped") 101 101 } 102 102 103 103 // refreshAllHolds queries the database for unique hold endpoints and refreshes their health status 104 104 func (w *Worker) refreshAllHolds(ctx context.Context) { 105 - log.Println("Hold health worker: Starting refresh cycle") 105 + slog.Info("Hold health worker starting refresh cycle") 106 106 107 107 // Get unique hold endpoints from database 108 108 endpoints, err := w.db.GetUniqueHoldEndpoints() 109 109 if err != nil { 110 - log.Printf("Hold health worker: Failed to fetch hold endpoints: %v", err) 110 + slog.Error("Hold health worker failed to fetch hold endpoints", "error", err) 111 111 return 112 112 } 113 113 114 114 if len(endpoints) == 0 { 115 - log.Println("Hold health worker: No hold endpoints to check") 115 + slog.Info("Hold health worker no hold endpoints to check") 116 116 return 117 117 } 118 118 119 - log.Printf("Hold health worker: Fetched %d hold endpoint entries from database", len(endpoints)) 119 + slog.Info("Hold health worker fetched hold endpoint entries from database", "count", len(endpoints)) 120 120 121 121 // Deduplicate endpoints by normalizing to canonical DID format 122 122 // This handles cases where the same hold is stored with different representations: ··· 141 141 uniqueEndpoints = append(uniqueEndpoints, normalizedDID) 142 142 } 143 143 144 - log.Printf("Hold health worker: Checking %d unique hold endpoints (deduplicated from %d)", len(uniqueEndpoints), len(endpoints)) 144 + slog.Info("Hold health worker checking unique hold endpoints", "unique_count", len(uniqueEndpoints), "total_count", len(endpoints)) 145 145 146 146 // Check health concurrently with rate limiting 147 147 // Use a semaphore to limit concurrent requests (max 10 at a time) ··· 174 174 reachable++ 175 175 } else { 176 176 unreachable++ 177 - log.Printf("Hold health worker: Hold unreachable: %s (error: %v)", ep, err) 177 + slog.Warn("Hold health worker hold unreachable", "endpoint", ep, "error", err) 178 178 } 179 179 statsMu.Unlock() 180 180 }(endpoint) ··· 183 183 // Wait for all checks to complete 184 184 wg.Wait() 185 185 186 - log.Printf("Hold health worker: Refresh complete - %d reachable, %d unreachable", reachable, unreachable) 186 + slog.Info("Hold health worker refresh complete", "reachable", reachable, "unreachable", unreachable) 187 187 } 188 188 189 189 // DBAdapter wraps sql.DB to implement DBQuerier interface
+20 -19
pkg/appview/jetstream/backfill.go
··· 5 5 "database/sql" 6 6 "encoding/json" 7 7 "fmt" 8 + "log/slog" 8 9 "strings" 9 10 "time" 10 11 ··· 52 53 53 54 // Start runs the backfill for all ATCR collections 54 55 func (b *BackfillWorker) Start(ctx context.Context) error { 55 - fmt.Println("Backfill: Starting sync-based backfill...") 56 + slog.Info("Backfill: Starting sync-based backfill...") 56 57 57 58 // First, query and cache the default hold's captain record 58 59 if b.defaultHoldDID != "" { 59 - fmt.Printf("Backfill: Querying default hold captain record: %s\n", b.defaultHoldDID) 60 + slog.Info("Backfill: Querying default hold captain record: %s\n", b.defaultHoldDID) 60 61 if err := b.queryCaptainRecord(ctx, b.defaultHoldDID); err != nil { 61 - fmt.Printf("WARNING: Failed to query default hold captain record: %v\n", err) 62 + slog.Warn("Backfill: Failed to query default hold captain record: %v\n", err) 62 63 // Don't fail the whole backfill - just warn 63 64 } 64 65 } ··· 71 72 } 72 73 73 74 for _, collection := range collections { 74 - fmt.Printf("Backfill: Processing collection: %s\n", collection) 75 + slog.Info("Backfill: Processing collection: %s\n", collection) 75 76 76 77 if err := b.backfillCollection(ctx, collection); err != nil { 77 78 return fmt.Errorf("failed to backfill collection %s: %w", collection, err) 78 79 } 79 80 80 - fmt.Printf("Backfill: Completed collection: %s\n", collection) 81 + slog.Info("Backfill: Completed collection: %s\n", collection) 81 82 } 82 83 83 - fmt.Println("Backfill: All collections completed!") 84 + slog.Info("Backfill: All collections completed!") 84 85 return nil 85 86 } 86 87 ··· 98 99 return fmt.Errorf("failed to list repos: %w", err) 99 100 } 100 101 101 - fmt.Printf("Backfill: Found %d repos with %s (cursor: %s)\n", len(result.Repos), collection, repoCursor) 102 + slog.Info("Backfill: Found %d repos with %s (cursor: %s)\n", len(result.Repos), collection, repoCursor) 102 103 103 104 // Process each repo (DID) 104 105 for _, repo := range result.Repos { 105 106 recordCount, err := b.backfillRepo(ctx, repo.DID, collection) 106 107 if err != nil { 107 - fmt.Printf("WARNING: Failed to backfill repo %s: %v\n", repo.DID, err) 108 + slog.Warn("Backfill: Failed to backfill repo %s: %v\n", repo.DID, err) 108 109 continue 109 110 } 110 111 ··· 112 113 processedRecords += recordCount 113 114 114 115 if processedRepos%10 == 0 { 115 - fmt.Printf("Backfill: Progress - %d repos, %d records\n", processedRepos, processedRecords) 116 + slog.Info("Backfill: Progress - %d repos, %d records\n", processedRepos, processedRecords) 116 117 } 117 118 } 118 119 ··· 124 125 repoCursor = result.Cursor 125 126 } 126 127 127 - fmt.Printf("Backfill: Collection %s complete - %d repos, %d records\n", collection, processedRepos, processedRecords) 128 + slog.Info("Backfill: Collection %s complete - %d repos, %d records\n", collection, processedRepos, processedRecords) 128 129 return nil 129 130 } 130 131 ··· 196 197 } 197 198 198 199 if err := b.processRecord(ctx, did, collection, &record); err != nil { 199 - fmt.Printf("WARNING: Failed to process record %s: %v\n", record.URI, err) 200 + slog.Warn("Backfill: Failed to process record %s: %v\n", record.URI, err) 200 201 continue 201 202 } 202 203 recordCount++ ··· 212 213 213 214 // Reconcile deletions - remove records from DB that no longer exist on PDS 214 215 if err := b.reconcileDeletions(did, collection, foundManifestDigests, foundTags, foundStars); err != nil { 215 - fmt.Printf("WARNING: Failed to reconcile deletions for %s: %v\n", did, err) 216 + slog.Warn("Backfill: Failed to reconcile deletions for %s: %v\n", did, err) 216 217 } 217 218 218 219 // After processing manifests, clean up orphaned tags (tags pointing to non-existent manifests) 219 220 if collection == atproto.ManifestCollection { 220 221 if err := db.CleanupOrphanedTags(b.db, did); err != nil { 221 - fmt.Printf("WARNING: Failed to cleanup orphaned tags for %s: %v\n", did, err) 222 + slog.Warn("Backfill: Failed to cleanup orphaned tags for %s: %v\n", did, err) 222 223 } 223 224 224 225 // Reconcile annotations - ensure they come from newest manifest per repository 225 226 // This fixes out-of-order backfill where older manifests can overwrite newer annotations 226 227 if err := b.reconcileAnnotations(ctx, did, pdsClient); err != nil { 227 - fmt.Printf("WARNING: Failed to reconcile annotations for %s: %v\n", did, err) 228 + slog.Warn("Backfill: Failed to reconcile annotations for %s: %v\n", did, err) 228 229 } 229 230 } 230 231 ··· 249 250 // Log deletions 250 251 deleted := len(dbDigests) - len(foundManifestDigests) 251 252 if deleted > 0 { 252 - fmt.Printf("Backfill: Deleted %d orphaned manifests for %s\n", deleted, did) 253 + slog.Info("Backfill: Deleted %d orphaned manifests for %s\n", deleted, did) 253 254 } 254 255 255 256 case atproto.TagCollection: ··· 267 268 // Log deletions 268 269 deleted := len(dbTags) - len(foundTags) 269 270 if deleted > 0 { 270 - fmt.Printf("Backfill: Deleted %d orphaned tags for %s\n", deleted, did) 271 + slog.Info("Backfill: Deleted %d orphaned tags for %s\n", deleted, did) 271 272 } 272 273 273 274 case atproto.StarCollection: ··· 342 343 343 344 // Retry on connection errors (hold service might still be starting) 344 345 if attempt < maxRetries && strings.Contains(err.Error(), "connection refused") { 345 - fmt.Printf("Backfill: Hold not ready (attempt %d/%d), retrying in 2s...\n", attempt, maxRetries) 346 + slog.Info("Backfill: Hold not ready (attempt %d/%d), retrying in 2s...\n", attempt, maxRetries) 346 347 time.Sleep(2 * time.Second) 347 348 continue 348 349 } ··· 364 365 return fmt.Errorf("failed to cache captain record: %w", err) 365 366 } 366 367 367 - fmt.Printf("Backfill: Cached captain record for hold %s (owner: %s)\n", holdDID, captainRecord.OwnerDID) 368 + slog.Info("Backfill: Cached captain record for hold %s (owner: %s)\n", holdDID, captainRecord.OwnerDID) 368 369 return nil 369 370 } 370 371 ··· 416 417 if err != nil { 417 418 fmt.Printf("WARNING [backfill]: Failed to reconcile annotations for %s/%s: %v\n", did, repo, err) 418 419 } else { 419 - fmt.Printf("Backfill: Reconciled annotations for %s/%s from newest manifest %s\n", did, repo, newestManifest.Digest) 420 + slog.Info("Backfill: Reconciled annotations for %s/%s from newest manifest %s\n", did, repo, newestManifest.Digest) 420 421 } 421 422 } 422 423 }
+3 -2
pkg/appview/jetstream/processor.go
··· 5 5 "database/sql" 6 6 "encoding/json" 7 7 "fmt" 8 + "log/slog" 8 9 "strings" 9 10 "time" 10 11 ··· 111 112 publicClient := atproto.NewClient("https://public.api.bsky.app", "", "") 112 113 profile, err := publicClient.GetActorProfile(ctx, resolvedDID) 113 114 if err != nil { 114 - fmt.Printf("WARNING [processor]: Failed to fetch profile for DID %s: %v\n", resolvedDID, err) 115 + slog.Warn("Failed to fetch profile", "component", "processor", "did", resolvedDID, "error", err) 115 116 // Continue without avatar 116 117 } else { 117 118 avatar = profile.Avatar ··· 307 308 // Convert hold URL/DID to canonical DID 308 309 holdDID := atproto.ResolveHoldDIDFromURL(profileRecord.DefaultHold) 309 310 if holdDID == "" { 310 - fmt.Printf("WARNING [processor]: Invalid hold reference in profile for %s: %s\n", did, profileRecord.DefaultHold) 311 + slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", profileRecord.DefaultHold) 311 312 return nil 312 313 } 313 314
+13 -12
pkg/appview/jetstream/worker.go
··· 8 8 "database/sql" 9 9 "encoding/json" 10 10 "fmt" 11 + "log/slog" 11 12 "net/url" 12 13 "sync" 13 14 "time" ··· 89 90 // Calculate lag (cursor is in microseconds) 90 91 now := time.Now().UnixMicro() 91 92 lagSeconds := float64(now-w.startCursor) / 1_000_000.0 92 - fmt.Printf("Jetstream: Starting from cursor %d (%.1f seconds behind live)\n", w.startCursor, lagSeconds) 93 + slog.Info("Jetstream: Starting from cursor %d (%.1f seconds behind live)\n", w.startCursor, lagSeconds) 93 94 } 94 95 95 96 // Disable compression for now to debug ··· 138 139 } 139 140 defer decoder.Close() 140 141 141 - fmt.Println("Connected to Jetstream, listening for events...") 142 + slog.Info("Connected to Jetstream, listening for events...") 142 143 143 144 // Start heartbeat ticker to show Jetstream is alive 144 145 heartbeatTicker := time.NewTicker(30 * time.Second) ··· 169 170 170 171 // If no pong for 60 seconds, connection is likely dead 171 172 if timeSinceLastPong > 60*time.Second { 172 - fmt.Printf("Jetstream: No pong received for %s (sent %d pings, got %d pongs), closing connection\n", 173 + slog.Info("Jetstream: No pong received for %s (sent %d pings, got %d pongs), closing connection\n", 173 174 timeSinceLastPong, pingsTotal, pongsTotal) 174 175 conn.Close() 175 176 return ··· 178 179 // Send ping with write deadline 179 180 conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) 180 181 if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { 181 - fmt.Printf("Jetstream: Failed to send ping: %v\n", err) 182 + slog.Info("Jetstream: Failed to send ping: %v\n", err) 182 183 conn.Close() 183 184 return 184 185 } ··· 200 201 return ctx.Err() 201 202 case <-heartbeatTicker.C: 202 203 elapsed := time.Since(lastHeartbeat) 203 - fmt.Printf("Jetstream: Alive (processed %d events in last %.0fs)\n", eventCount, elapsed.Seconds()) 204 + slog.Info("Jetstream: Alive (processed %d events in last %.0fs)\n", eventCount, elapsed.Seconds()) 204 205 eventCount = 0 205 206 lastHeartbeat = time.Now() 206 207 default: ··· 236 237 } 237 238 238 239 // Log detailed context about the failure 239 - fmt.Printf("Jetstream: Connection closed after %s\n", connDuration) 240 + slog.Info("Jetstream: Connection closed after %s\n", connDuration) 240 241 fmt.Printf(" - Events in last 30s: %d\n", eventCount) 241 242 fmt.Printf(" - Time since last event: %s\n", timeSinceLastEvent) 242 243 fmt.Printf(" - Ping/Pong: %d/%d (%.1f%% success)\n", pongsTotal, pingsTotal, pongRate) ··· 312 313 // Process based on collection 313 314 switch commit.Collection { 314 315 case atproto.ManifestCollection: 315 - fmt.Printf("Jetstream: Processing manifest event: did=%s, operation=%s, rkey=%s\n", 316 + slog.Info("Jetstream: Processing manifest event: did=%s, operation=%s, rkey=%s\n", 316 317 commit.DID, commit.Operation, commit.RKey) 317 318 return w.processManifest(commit) 318 319 case atproto.TagCollection: 319 - fmt.Printf("Jetstream: Processing tag event: did=%s, operation=%s, rkey=%s\n", 320 + slog.Info("Jetstream: Processing tag event: did=%s, operation=%s, rkey=%s\n", 320 321 commit.DID, commit.Operation, commit.RKey) 321 322 return w.processTag(commit) 322 323 case atproto.StarCollection: 323 - fmt.Printf("Jetstream: Processing star event: did=%s, operation=%s, rkey=%s\n", 324 + slog.Info("Jetstream: Processing star event: did=%s, operation=%s, rkey=%s\n", 324 325 commit.DID, commit.Operation, commit.RKey) 325 326 return w.processStar(commit) 326 327 default: ··· 372 373 if commit.Operation == "delete" { 373 374 // Delete tag - decode rkey back to repository and tag 374 375 repo, tag := atproto.RKeyToRepositoryTag(commit.RKey) 375 - fmt.Printf("Jetstream: Deleting tag: did=%s, repository=%s, tag=%s (from rkey=%s)\n", 376 + slog.Info("Jetstream: Deleting tag: did=%s, repository=%s, tag=%s (from rkey=%s)\n", 376 377 commit.DID, repo, tag, commit.RKey) 377 378 if err := db.DeleteTag(w.db, commit.DID, repo, tag); err != nil { 378 - fmt.Printf("Jetstream: ERROR deleting tag: %v\n", err) 379 + slog.Info("Jetstream: ERROR deleting tag: %v\n", err) 379 380 return err 380 381 } 381 - fmt.Printf("Jetstream: Successfully deleted tag: did=%s, repository=%s, tag=%s\n", 382 + slog.Info("Jetstream: Successfully deleted tag: did=%s, repository=%s, tag=%s\n", 382 383 commit.DID, repo, tag) 383 384 return nil 384 385 }
+10 -9
pkg/appview/middleware/registry.go
··· 4 4 "context" 5 5 "encoding/json" 6 6 "fmt" 7 + "log/slog" 7 8 "strings" 8 9 "sync" 9 10 ··· 160 161 return nil, fmt.Errorf("no PDS endpoint found for %s", identityStr) 161 162 } 162 163 163 - fmt.Printf("DEBUG [registry/middleware]: Resolved identity: did=%s, pds=%s, handle=%s\n", did, pdsEndpoint, handle) 164 + slog.Debug("Resolved identity", "component", "registry/middleware", "did", did, "pds", pdsEndpoint, "handle", handle) 164 165 165 166 // Query for hold DID - either user's hold or default hold service 166 167 holdDID := nr.findHoldDID(ctx, did, pdsEndpoint) ··· 174 175 // This ensures users can push immediately after docker login without web sign-in 175 176 // EnsureCrewMembership is best-effort and logs errors without failing the request 176 177 if holdDID != "" && nr.refresher != nil { 177 - fmt.Printf("DEBUG [registry/middleware]: Auto-reconciling crew membership for DID=%s at hold=%s\n", did, holdDID) 178 + slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID) 178 179 client := atproto.NewClient(pdsEndpoint, did, "") 179 180 storage.EnsureCrewMembership(ctx, client, nr.refresher, holdDID) 180 181 } ··· 185 186 var err error 186 187 serviceToken, err = token.GetOrFetchServiceToken(ctx, nr.refresher, did, holdDID, pdsEndpoint) 187 188 if err != nil { 188 - fmt.Printf("ERROR [registry/middleware]: Failed to get service token for DID=%s: %v\n", did, err) 189 - fmt.Printf("ERROR [registry/middleware]: User needs to re-authenticate via credential helper\n") 189 + slog.Error("Failed to get service token", "component", "registry/middleware", "did", did, "error", err) 190 + slog.Error("User needs to re-authenticate via credential helper", "component", "registry/middleware") 190 191 return nil, nr.authErrorMessage("OAuth session expired") 191 192 } 192 193 } ··· 219 220 apiClient := session.APIClient() 220 221 atprotoClient = atproto.NewClientWithIndigoClient(pdsEndpoint, did, apiClient) 221 222 } else { 222 - fmt.Printf("DEBUG [registry/middleware]: OAuth refresh failed for DID=%s: %v, falling back to Basic Auth\n", did, err) 223 + slog.Debug("OAuth refresh failed, falling back to Basic Auth", "component", "registry/middleware", "did", did, "error", err) 223 224 } 224 225 } 225 226 ··· 227 228 if atprotoClient == nil { 228 229 accessToken, ok := auth.GetGlobalTokenCache().Get(did) 229 230 if !ok { 230 - fmt.Printf("DEBUG [registry/middleware]: No cached access token found for DID=%s (neither OAuth nor Basic Auth)\n", did) 231 + slog.Debug("No cached access token found (neither OAuth nor Basic Auth)", "component", "registry/middleware", "did", did) 231 232 accessToken = "" // Will fail on manifest push, but let it try 232 233 } else { 233 - fmt.Printf("DEBUG [registry/middleware]: Using Basic Auth access token for DID=%s (length=%d)\n", did, len(accessToken)) 234 + slog.Debug("Using Basic Auth access token", "component", "registry/middleware", "did", did, "token_length", len(accessToken)) 234 235 } 235 236 atprotoClient = atproto.NewClient(pdsEndpoint, did, accessToken) 236 237 } ··· 304 305 profile, err := storage.GetProfile(ctx, client) 305 306 if err != nil { 306 307 // Error reading profile (not a 404) - log and continue 307 - fmt.Printf("WARNING: failed to read profile for %s: %v\n", did, err) 308 + slog.Warn("Failed to read profile", "did", did, "error", err) 308 309 } 309 310 310 311 if profile != nil && profile.DefaultHold != "" { ··· 314 315 if nr.isHoldReachable(ctx, profile.DefaultHold) { 315 316 return profile.DefaultHold 316 317 } 317 - fmt.Printf("DEBUG [registry/middleware/testmode]: User's defaultHold %s unreachable, falling back to default\n", profile.DefaultHold) 318 + slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", profile.DefaultHold) 318 319 return nr.defaultHoldDID 319 320 } 320 321 return profile.DefaultHold
+2 -3
pkg/appview/readme/cache.go
··· 7 7 import ( 8 8 "context" 9 9 "database/sql" 10 - "fmt" 10 + "log/slog" 11 11 "time" 12 12 ) 13 13 ··· 54 54 // Store in cache 55 55 if err := c.storeInDB(readmeURL, html); err != nil { 56 56 // Log error but don't fail - we have the content 57 - // In production, you'd use proper logging here 58 - fmt.Printf("Failed to cache README: %v\n", err) 57 + slog.Warn("Failed to cache README", "error", err) 59 58 } 60 59 61 60 return html, nil
+11 -10
pkg/appview/storage/manifest_store.go
··· 7 7 "errors" 8 8 "fmt" 9 9 "io" 10 + "log/slog" 10 11 "maps" 11 12 "net/http" 12 13 "strings" ··· 88 89 if s.ctx.Database != nil { 89 90 go func() { 90 91 if err := s.ctx.Database.IncrementPullCount(s.ctx.DID, s.ctx.Repository); err != nil { 91 - fmt.Printf("WARNING: Failed to increment pull count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err) 92 + slog.Warn("Failed to increment pull count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err) 92 93 } 93 94 }() 94 95 } ··· 143 144 labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest) 144 145 if err != nil { 145 146 // Log error but don't fail the push - labels are optional 146 - fmt.Printf("WARNING: Failed to extract config labels: %v\n", err) 147 + slog.Warn("Failed to extract config labels", "error", err) 147 148 } else { 148 149 // Initialize annotations map if needed 149 150 if manifestRecord.Annotations == nil { ··· 153 154 // Copy labels to annotations (Dockerfile LABELs → manifest annotations) 154 155 maps.Copy(manifestRecord.Annotations, labels) 155 156 156 - fmt.Printf("DEBUG: Extracted %d labels from config blob\n", len(labels)) 157 + slog.Debug("Extracted labels from config blob", "count", len(labels)) 157 158 } 158 159 } 159 160 ··· 168 169 if s.ctx.Database != nil { 169 170 go func() { 170 171 if err := s.ctx.Database.IncrementPushCount(s.ctx.DID, s.ctx.Repository); err != nil { 171 - fmt.Printf("WARNING: Failed to increment push count for %s/%s: %v\n", s.ctx.DID, s.ctx.Repository, err) 172 + slog.Warn("Failed to increment push count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err) 172 173 } 173 174 }() 174 175 } ··· 192 193 if tag != "" && s.ctx.ServiceToken != "" && s.ctx.Handle != "" { 193 194 go func() { 194 195 if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String()); err != nil { 195 - fmt.Printf("WARNING: Failed to notify hold about manifest: %v\n", err) 196 + slog.Warn("Failed to notify hold about manifest", "error", err) 196 197 } 197 198 }() 198 199 } ··· 362 363 // Parse response (optional logging) 363 364 var notifyResp map[string]any 364 365 if err := json.NewDecoder(resp.Body).Decode(&notifyResp); err == nil { 365 - fmt.Printf("INFO: Hold notification successful for %s:%s - %+v\n", s.ctx.Repository, tag, notifyResp) 366 + slog.Info("Hold notification successful", "repository", s.ctx.Repository, "tag", tag, "response", notifyResp) 366 367 } 367 368 368 369 return nil ··· 386 387 return 387 388 } 388 389 389 - fmt.Printf("INFO: Refreshing README cache for %s/%s from %s\n", s.ctx.DID, s.ctx.Repository, readmeURL) 390 + slog.Info("Refreshing README cache", "did", s.ctx.DID, "repository", s.ctx.Repository, "url", readmeURL) 390 391 391 392 // Invalidate the cached entry first 392 393 if err := s.ctx.ReadmeCache.Invalidate(readmeURL); err != nil { 393 - fmt.Printf("WARNING: Failed to invalidate README cache for %s: %v\n", readmeURL, err) 394 + slog.Warn("Failed to invalidate README cache", "url", readmeURL, "error", err) 394 395 // Continue anyway - Get() will still fetch fresh content 395 396 } 396 397 ··· 401 402 402 403 _, err := s.ctx.ReadmeCache.Get(ctxWithTimeout, readmeURL) 403 404 if err != nil { 404 - fmt.Printf("WARNING: Failed to refresh README cache for %s: %v\n", readmeURL, err) 405 + slog.Warn("Failed to refresh README cache", "url", readmeURL, "error", err) 405 406 // Not a critical error - cache will be refreshed on next page view 406 407 return 407 408 } 408 409 409 - fmt.Printf("INFO: README cache refreshed successfully for %s\n", readmeURL) 410 + slog.Info("README cache refreshed successfully", "url", readmeURL) 410 411 }
+5 -4
pkg/appview/storage/profile.go
··· 5 5 "encoding/json" 6 6 "errors" 7 7 "fmt" 8 + "log/slog" 8 9 "sync" 9 10 "time" 10 11 ··· 46 47 return fmt.Errorf("failed to create sailor profile: %w", err) 47 48 } 48 49 49 - fmt.Printf("DEBUG [profile]: Created sailor profile with defaultHold=%s\n", normalizedDID) 50 + slog.Debug("Created sailor profile", "component", "profile", "default_hold", normalizedDID) 50 51 return nil 51 52 } 52 53 ··· 95 96 // Update the profile on the PDS 96 97 profile.UpdatedAt = time.Now() 97 98 if err := UpdateProfile(ctx, client, &profile); err != nil { 98 - fmt.Printf("WARNING [profile]: Failed to persist URL-to-DID migration for %s: %v\n", did, err) 99 + slog.Warn("Failed to persist URL-to-DID migration", "component", "profile", "did", did, "error", err) 99 100 } else { 100 - fmt.Printf("DEBUG [profile]: Persisted defaultHold migration to DID: %s (for DID: %s)\n", migratedDID, did) 101 + slog.Debug("Persisted defaultHold migration to DID", "component", "profile", "migrated_did", migratedDID, "did", did) 101 102 } 102 103 }() 103 104 } ··· 113 114 // This ensures we always store DIDs, even if user provides a URL 114 115 if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) { 115 116 profile.DefaultHold = atproto.ResolveHoldDIDFromURL(profile.DefaultHold) 116 - fmt.Printf("DEBUG [profile]: Normalized defaultHold to DID: %s\n", profile.DefaultHold) 117 + slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", profile.DefaultHold) 117 118 } 118 119 119 120 _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, ProfileRKey, profile)
+19 -19
pkg/appview/storage/proxy_blob_store.go
··· 6 6 "encoding/json" 7 7 "fmt" 8 8 "io" 9 + "log/slog" 9 10 "net/http" 10 11 "sync" 11 12 "time" ··· 41 42 // Resolve DID to URL once at construction time 42 43 holdURL := atproto.ResolveHoldURL(ctx.HoldDID) 43 44 44 - fmt.Printf("DEBUG [proxy_blob_store]: NewProxyBlobStore created with holdDID=%s, holdURL=%s, userDID=%s, repo=%s\n", 45 - ctx.HoldDID, holdURL, ctx.DID, ctx.Repository) 45 + slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", ctx.HoldDID, "hold_url", holdURL, "user_did", ctx.DID, "repo", ctx.Repository) 46 46 47 47 return &ProxyBlobStore{ 48 48 ctx: ctx, ··· 67 67 // Middleware fails fast with HTTP 401 if OAuth session is invalid 68 68 if p.ctx.ServiceToken == "" { 69 69 // Should never happen - middleware validates OAuth before handlers run 70 - fmt.Printf("ERROR [proxy_blob_store]: No service token in context for DID=%s\n", p.ctx.DID) 70 + slog.Error("No service token in context", "component", "proxy_blob_store", "did", p.ctx.DID) 71 71 return nil, fmt.Errorf("no service token available (middleware should have validated)") 72 72 } 73 73 ··· 99 99 return nil // No authorization check if authorizer not configured 100 100 } 101 101 102 - fmt.Printf("[checkWriteAccess] Checking write access for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID) 102 + slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 103 103 allowed, err := p.ctx.Authorizer.CheckWriteAccess(ctx, p.ctx.HoldDID, p.ctx.DID) 104 104 if err != nil { 105 - fmt.Printf("[checkWriteAccess] Authorization check error: %v\n", err) 105 + slog.Error("Authorization check error", "component", "proxy_blob_store", "error", err) 106 106 return fmt.Errorf("authorization check failed: %w", err) 107 107 } 108 108 if !allowed { 109 - fmt.Printf("[checkWriteAccess] Write access DENIED for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID) 109 + slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 110 110 return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.HoldDID)) 111 111 } 112 - fmt.Printf("[checkWriteAccess] Write access ALLOWED for userDID=%s to holdDID=%s\n", p.ctx.DID, p.ctx.HoldDID) 112 + slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 113 113 return nil 114 114 } 115 115 ··· 243 243 // Use Create() flow for all uploads (goes through multipart XRPC endpoints) 244 244 writer, err := p.Create(ctx) 245 245 if err != nil { 246 - fmt.Printf("[proxy_blob_store/Put] Failed to create writer: %v\n", err) 246 + slog.Error("Failed to create writer", "component", "proxy_blob_store/Put", "error", err) 247 247 return distribution.Descriptor{}, err 248 248 } 249 249 250 250 // Write the content 251 251 if _, err := writer.Write(content); err != nil { 252 252 writer.Cancel(ctx) 253 - fmt.Printf("[proxy_blob_store/Put] Failed to write content: %v\n", err) 253 + slog.Error("Failed to write content", "component", "proxy_blob_store/Put", "error", err) 254 254 return distribution.Descriptor{}, err 255 255 } 256 256 ··· 261 261 MediaType: mediaType, 262 262 }) 263 263 if err != nil { 264 - fmt.Printf("[proxy_blob_store/Put] Failed to commit: %v\n", err) 264 + slog.Error("Failed to commit", "component", "proxy_blob_store/Put", "error", err) 265 265 return distribution.Descriptor{}, err 266 266 } 267 267 268 - fmt.Printf("[proxy_blob_store/Put] Upload successful: digest=%s, size=%d\n", dgst, len(content)) 268 + slog.Debug("Upload successful", "component", "proxy_blob_store/Put", "digest", dgst, "size", len(content)) 269 269 return desc, nil 270 270 } 271 271 ··· 393 393 return "", fmt.Errorf("hold service returned empty URL") 394 394 } 395 395 396 - fmt.Printf("DEBUG [proxy_blob_store]: Got presigned HEAD URL from hold service: %s\n", result.URL) 396 + slog.Debug("Got presigned HEAD URL from hold service", "component", "proxy_blob_store", "url", result.URL) 397 397 return result.URL, nil 398 398 } 399 399 ··· 676 676 ETag: etag, 677 677 }) 678 678 679 - fmt.Printf("[flushPart] Part %d uploaded successfully: ETag=%s\n", w.partNumber, etag) 679 + slog.Debug("Part uploaded successfully", "component", "proxy_blob_store/flushPart", "part_number", w.partNumber, "etag", etag) 680 680 681 681 // Reset buffer and increment part number 682 682 w.buffer.Reset() ··· 734 734 735 735 // Flush any remaining buffered data 736 736 if w.buffer.Len() > 0 { 737 - fmt.Printf("[Commit] Flushing final buffer: %d bytes\n", w.buffer.Len()) 737 + slog.Debug("Flushing final buffer", "component", "proxy_blob_store/Commit", "bytes", w.buffer.Len()) 738 738 if err := w.flushPart(); err != nil { 739 739 // Try to abort multipart on error 740 740 tempDigest := fmt.Sprintf("uploads/temp-%s", w.id) ··· 745 745 746 746 // Complete multipart upload - XRPC complete action handles move internally 747 747 // Send the real digest (not tempDigest) so hold can move temp → final location 748 - fmt.Printf("🔒 [Commit] Completing multipart upload: uploadID=%s, parts=%d, digest=%s\n", w.uploadID, len(w.parts), desc.Digest) 748 + slog.Info("Completing multipart upload", "component", "proxy_blob_store/Commit", "upload_id", w.uploadID, "parts", len(w.parts), "digest", desc.Digest) 749 749 if err := w.store.completeMultipartUpload(ctx, desc.Digest.String(), w.uploadID, w.parts); err != nil { 750 750 return distribution.Descriptor{}, fmt.Errorf("failed to complete multipart upload: %w", err) 751 751 } 752 752 753 - fmt.Printf("[Commit] Upload completed successfully: digest=%s, size=%d, parts=%d\n", desc.Digest, w.size, len(w.parts)) 753 + slog.Info("Upload completed successfully", "component", "proxy_blob_store/Commit", "digest", desc.Digest, "size", w.size, "parts", len(w.parts)) 754 754 755 755 return distribution.Descriptor{ 756 756 Digest: desc.Digest, ··· 763 763 func (w *ProxyBlobWriter) Cancel(ctx context.Context) error { 764 764 w.closed = true 765 765 766 - fmt.Printf("[Cancel] Cancelling upload: id=%s\n", w.id) 766 + slog.Debug("Cancelling upload", "component", "proxy_blob_store/Cancel", "id", w.id) 767 767 768 768 // Remove from global uploads map 769 769 globalUploadsMu.Lock() ··· 773 773 // Abort multipart upload 774 774 tempDigest := fmt.Sprintf("uploads/temp-%s", w.id) 775 775 if err := w.store.abortMultipartUpload(ctx, tempDigest, w.uploadID); err != nil { 776 - fmt.Printf("⚠️ [Cancel] Failed to abort multipart upload: %v\n", err) 776 + slog.Warn("Failed to abort multipart upload", "component", "proxy_blob_store/Cancel", "error", err) 777 777 // Continue anyway - we want to mark upload as cancelled 778 778 } 779 779 780 - fmt.Printf("[Cancel] Upload cancelled: id=%s\n", w.id) 780 + slog.Debug("Upload cancelled", "component", "proxy_blob_store/Cancel", "id", w.id) 781 781 return nil 782 782 } 783 783
+5 -9
pkg/appview/storage/routing_repository.go
··· 6 6 7 7 import ( 8 8 "context" 9 - "fmt" 9 + "log/slog" 10 10 "time" 11 11 12 12 "github.com/distribution/distribution/v3" ··· 46 46 if holdDID := r.manifestStore.GetLastFetchedHoldDID(); holdDID != "" { 47 47 // Cache for 10 minutes - should cover typical pull operations 48 48 GetGlobalHoldCache().Set(r.Ctx.DID, r.Ctx.Repository, holdDID, 10*time.Minute) 49 - fmt.Printf("DEBUG [storage/routing]: Cached hold DID: did=%s, repo=%s, hold=%s\n", 50 - r.Ctx.DID, r.Ctx.Repository, holdDID) 49 + slog.Debug("Cached hold DID", "component", "storage/routing", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID) 51 50 } 52 51 }() 53 52 ··· 59 58 func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore { 60 59 // Return cached blob store if available 61 60 if r.blobStore != nil { 62 - fmt.Printf("DEBUG [storage/blobs]: Returning cached blob store for did=%s, repo=%s\n", 63 - r.Ctx.DID, r.Ctx.Repository) 61 + slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository) 64 62 return r.blobStore 65 63 } 66 64 ··· 71 69 if cachedHoldDID, ok := GetGlobalHoldCache().Get(r.Ctx.DID, r.Ctx.Repository); ok { 72 70 // Use cached hold DID from manifest 73 71 holdDID = cachedHoldDID 74 - fmt.Printf("DEBUG [storage/blobs]: Using cached hold from manifest: did=%s, repo=%s, hold=%s\n", 75 - r.Ctx.DID, r.Ctx.Repository, cachedHoldDID) 72 + slog.Debug("Using cached hold from manifest", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", cachedHoldDID) 76 73 } else { 77 74 // No cached hold, use discovery-based DID (for push or first pull) 78 - fmt.Printf("DEBUG [storage/blobs]: Using discovery-based hold: did=%s, repo=%s, hold=%s\n", 79 - r.Ctx.DID, r.Ctx.Repository, holdDID) 75 + slog.Debug("Using discovery-based hold", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID) 80 76 } 81 77 82 78 if holdDID == "" {