A container registry that uses the AT Protocol for manifest storage and S3 for blob storage. atcr.io
docker container atproto go
72
fork

Configure Feed

Select the types of activity you want to include in your feed.

add identity invalidation to logstream. refresh avatar urls on login

+821 -43
+19 -7
cmd/appview/serve.go
··· 265 265 } 266 266 267 267 // Store user in database (with or without avatar) 268 - err = db.UpsertUserIgnoreAvatar(uiDatabase, &db.User{ 269 - DID: did, 270 - Handle: handle, 271 - PDSEndpoint: pdsEndpoint, 272 - Avatar: avatarURL, 273 - LastSeen: time.Now(), 274 - }) 268 + // Use UpsertUser if we successfully fetched an avatar (to update existing users) 269 + // Use UpsertUserIgnoreAvatar if fetch failed (to preserve existing avatars) 270 + if avatarURL != "" { 271 + err = db.UpsertUser(uiDatabase, &db.User{ 272 + DID: did, 273 + Handle: handle, 274 + PDSEndpoint: pdsEndpoint, 275 + Avatar: avatarURL, 276 + LastSeen: time.Now(), 277 + }) 278 + } else { 279 + err = db.UpsertUserIgnoreAvatar(uiDatabase, &db.User{ 280 + DID: did, 281 + Handle: handle, 282 + PDSEndpoint: pdsEndpoint, 283 + Avatar: avatarURL, 284 + LastSeen: time.Now(), 285 + }) 286 + } 275 287 if err != nil { 276 288 slog.Warn("Failed to store user in database", "component", "appview/callback", "error", err) 277 289 return nil // Non-fatal
+9
pkg/appview/db/queries.go
··· 374 374 return err 375 375 } 376 376 377 + // UpdateUserHandle updates a user's handle when an identity change event is received 378 + // This is called when Jetstream receives an identity event indicating a handle change 379 + func UpdateUserHandle(db *sql.DB, did string, newHandle string) error { 380 + _, err := db.Exec(` 381 + UPDATE users SET handle = ?, last_seen = ? WHERE did = ? 382 + `, newHandle, time.Now(), did) 383 + return err 384 + } 385 + 377 386 // GetManifestDigestsForDID returns all manifest digests for a DID 378 387 func GetManifestDigestsForDID(db *sql.DB, did string) ([]string, error) { 379 388 rows, err := db.Query(`
+75
pkg/appview/db/queries_test.go
··· 977 977 // Don't use manifestID1 since it's not accessed after assignment 978 978 _ = manifestID1 979 979 } 980 + 981 + func TestUpdateUserHandle(t *testing.T) { 982 + // Create in-memory test database 983 + db, err := InitDB(":memory:") 984 + if err != nil { 985 + t.Fatalf("Failed to init database: %v", err) 986 + } 987 + defer db.Close() 988 + 989 + // Setup: Create test user 990 + testUser := &User{ 991 + DID: "did:plc:alice123", 992 + Handle: "alice.bsky.social", 993 + PDSEndpoint: "https://bsky.social", 994 + Avatar: "https://example.com/avatar.jpg", 995 + LastSeen: time.Now(), 996 + } 997 + err = UpsertUser(db, testUser) 998 + if err != nil { 999 + t.Fatalf("Failed to create test user: %v", err) 1000 + } 1001 + 1002 + // Test 1: Update handle for existing user 1003 + newHandle := "alice-new.bsky.social" 1004 + err = UpdateUserHandle(db, testUser.DID, newHandle) 1005 + if err != nil { 1006 + t.Fatalf("Failed to update user handle: %v", err) 1007 + } 1008 + 1009 + // Verify handle was updated 1010 + retrieved, err := GetUserByDID(db, testUser.DID) 1011 + if err != nil { 1012 + t.Fatalf("Failed to get user after handle update: %v", err) 1013 + } 1014 + if retrieved == nil { 1015 + t.Fatal("Expected user to be found, got nil") 1016 + } 1017 + if retrieved.Handle != newHandle { 1018 + t.Errorf("Expected handle '%s', got '%s'", newHandle, retrieved.Handle) 1019 + } 1020 + 1021 + // Verify other fields unchanged 1022 + if retrieved.DID != testUser.DID { 1023 + t.Errorf("DID changed unexpectedly: %s -> %s", testUser.DID, retrieved.DID) 1024 + } 1025 + if retrieved.PDSEndpoint != testUser.PDSEndpoint { 1026 + t.Errorf("PDS endpoint changed unexpectedly") 1027 + } 1028 + if retrieved.Avatar != testUser.Avatar { 1029 + t.Errorf("Avatar changed unexpectedly") 1030 + } 1031 + 1032 + // Test 2: Update handle for non-existent user (should not error, but no rows affected) 1033 + err = UpdateUserHandle(db, "did:plc:nonexistent", "new.handle.social") 1034 + if err != nil { 1035 + t.Errorf("Expected no error for non-existent user, got: %v", err) 1036 + } 1037 + 1038 + // Test 3: Update handle multiple times 1039 + handles := []string{"alice1.bsky.social", "alice2.bsky.social", "alice3.bsky.social"} 1040 + for _, handle := range handles { 1041 + err = UpdateUserHandle(db, testUser.DID, handle) 1042 + if err != nil { 1043 + t.Fatalf("Failed to update handle to '%s': %v", handle, err) 1044 + } 1045 + 1046 + retrieved, err = GetUserByDID(db, testUser.DID) 1047 + if err != nil { 1048 + t.Fatalf("Failed to retrieve user: %v", err) 1049 + } 1050 + if retrieved.Handle != handle { 1051 + t.Errorf("Expected handle '%s', got '%s'", handle, retrieved.Handle) 1052 + } 1053 + } 1054 + }
+75 -1
pkg/appview/jetstream/processor.go
··· 87 87 p.userCache.cache[did] = user 88 88 } 89 89 90 - // Upsert to database - preserve existing avatar if fetch failed 90 + // Upsert to database 91 + // Use UpsertUser if we successfully fetched an avatar (to update existing users) 92 + // Use UpsertUserIgnoreAvatar if fetch failed (to preserve existing avatars) 93 + if avatarURL != "" { 94 + return db.UpsertUser(p.db, user) 95 + } 91 96 return db.UpsertUserIgnoreAvatar(p.db, user) 92 97 } 93 98 ··· 275 280 276 281 return nil 277 282 } 283 + 284 + // ProcessIdentity handles identity change events (handle updates) 285 + // This is called when Jetstream receives an identity event indicating a handle change. 286 + // The identity cache is invalidated to ensure the next lookup uses the new handle, 287 + // and the database is updated to reflect the change in the UI. 288 + func (p *Processor) ProcessIdentity(ctx context.Context, did string, newHandle string) error { 289 + // Update handle in database 290 + if err := db.UpdateUserHandle(p.db, did, newHandle); err != nil { 291 + slog.Warn("Failed to update user handle in database", 292 + "component", "processor", 293 + "did", did, 294 + "handle", newHandle, 295 + "error", err) 296 + // Continue to invalidate cache even if DB update fails 297 + } 298 + 299 + // Invalidate cached identity data to force re-resolution on next lookup 300 + if err := atproto.InvalidateIdentity(ctx, did); err != nil { 301 + slog.Warn("Failed to invalidate identity cache", 302 + "component", "processor", 303 + "did", did, 304 + "error", err) 305 + return err 306 + } 307 + 308 + slog.Info("Processed identity change event", 309 + "component", "processor", 310 + "did", did, 311 + "new_handle", newHandle) 312 + 313 + return nil 314 + } 315 + 316 + // ProcessAccount handles account status events (deactivation/reactivation) 317 + // This is called when Jetstream receives an account event indicating status changes. 318 + // 319 + // IMPORTANT: Deactivation events are ambiguous - they could indicate: 320 + // 1. Permanent account deactivation (user deleted account) 321 + // 2. PDS migration (account deactivated at old PDS, reactivated at new PDS) 322 + // 323 + // We DO NOT delete user data on deactivation events. Instead, we invalidate the 324 + // identity cache. On the next resolution attempt: 325 + // - If migrated: Resolution finds the new PDS and updates the database automatically 326 + // - If truly deactivated: Resolution fails and user won't appear in new queries 327 + // 328 + // This approach prevents data loss from PDS migrations while still handling deactivations. 329 + func (p *Processor) ProcessAccount(ctx context.Context, did string, active bool, status string) error { 330 + // Only process deactivation events 331 + if active || status != "deactivated" { 332 + return nil 333 + } 334 + 335 + // Invalidate cached identity data to force re-resolution on next lookup 336 + // This will discover if the account was migrated (new PDS) or truly deactivated (resolution fails) 337 + if err := atproto.InvalidateIdentity(ctx, did); err != nil { 338 + slog.Warn("Failed to invalidate identity cache for deactivated account", 339 + "component", "processor", 340 + "did", did, 341 + "error", err) 342 + return err 343 + } 344 + 345 + slog.Info("Processed account deactivation event - cache invalidated", 346 + "component", "processor", 347 + "did", did, 348 + "status", status) 349 + 350 + return nil 351 + }
+142
pkg/appview/jetstream/processor_test.go
··· 549 549 t.Errorf("Expected 0 annotations for nil annotations, got %d", annotationCount) 550 550 } 551 551 } 552 + 553 + func TestProcessIdentity(t *testing.T) { 554 + db := setupTestDB(t) 555 + defer db.Close() 556 + 557 + processor := NewProcessor(db, false) 558 + 559 + // Setup: Create test user 560 + testDID := "did:plc:alice123" 561 + testHandle := "alice.bsky.social" 562 + testPDS := "https://bsky.social" 563 + _, err := db.Exec(` 564 + INSERT INTO users (did, handle, pds_endpoint, last_seen) 565 + VALUES (?, ?, ?, ?) 566 + `, testDID, testHandle, testPDS, time.Now()) 567 + if err != nil { 568 + t.Fatalf("Failed to insert test user: %v", err) 569 + } 570 + 571 + // Test 1: Process identity change event 572 + newHandle := "alice-new.bsky.social" 573 + err = processor.ProcessIdentity(context.Background(), testDID, newHandle) 574 + // Note: This will fail to invalidate cache since we don't have a real identity directory, 575 + // but we can still verify the database update happened 576 + if err != nil { 577 + t.Logf("Expected cache invalidation error (no real directory): %v", err) 578 + } 579 + 580 + // Verify handle was updated in database 581 + var retrievedHandle string 582 + err = db.QueryRow(` 583 + SELECT handle FROM users WHERE did = ? 584 + `, testDID).Scan(&retrievedHandle) 585 + if err != nil { 586 + t.Fatalf("Failed to query updated user: %v", err) 587 + } 588 + if retrievedHandle != newHandle { 589 + t.Errorf("Expected handle '%s', got '%s'", newHandle, retrievedHandle) 590 + } 591 + 592 + // Test 2: Process identity change for non-existent user 593 + // Should not error (UPDATE just affects 0 rows) 594 + err = processor.ProcessIdentity(context.Background(), "did:plc:nonexistent", "new.handle") 595 + if err != nil { 596 + t.Logf("Expected cache invalidation error: %v", err) 597 + } 598 + 599 + // Test 3: Process multiple identity changes 600 + handles := []string{"alice1.bsky.social", "alice2.bsky.social", "alice3.bsky.social"} 601 + for _, handle := range handles { 602 + err = processor.ProcessIdentity(context.Background(), testDID, handle) 603 + if err != nil { 604 + t.Logf("Expected cache invalidation error: %v", err) 605 + } 606 + 607 + err = db.QueryRow(` 608 + SELECT handle FROM users WHERE did = ? 609 + `, testDID).Scan(&retrievedHandle) 610 + if err != nil { 611 + t.Fatalf("Failed to query user after handle update: %v", err) 612 + } 613 + if retrievedHandle != handle { 614 + t.Errorf("Expected handle '%s', got '%s'", handle, retrievedHandle) 615 + } 616 + } 617 + } 618 + 619 + func TestProcessAccount(t *testing.T) { 620 + db := setupTestDB(t) 621 + defer db.Close() 622 + 623 + processor := NewProcessor(db, false) 624 + 625 + // Setup: Create test user 626 + testDID := "did:plc:bob456" 627 + testHandle := "bob.bsky.social" 628 + testPDS := "https://bsky.social" 629 + _, err := db.Exec(` 630 + INSERT INTO users (did, handle, pds_endpoint, last_seen) 631 + VALUES (?, ?, ?, ?) 632 + `, testDID, testHandle, testPDS, time.Now()) 633 + if err != nil { 634 + t.Fatalf("Failed to insert test user: %v", err) 635 + } 636 + 637 + // Test 1: Process account deactivation event 638 + err = processor.ProcessAccount(context.Background(), testDID, false, "deactivated") 639 + // Note: Cache invalidation will fail without real directory, but that's expected 640 + if err != nil { 641 + t.Logf("Expected cache invalidation error (no real directory): %v", err) 642 + } 643 + 644 + // Verify user still exists in database (we don't delete on deactivation) 645 + var exists bool 646 + err = db.QueryRow(` 647 + SELECT EXISTS(SELECT 1 FROM users WHERE did = ?) 648 + `, testDID).Scan(&exists) 649 + if err != nil { 650 + t.Fatalf("Failed to check if user exists: %v", err) 651 + } 652 + if !exists { 653 + t.Error("User should still exist after deactivation event (no deletion)") 654 + } 655 + 656 + // Test 2: Process account with active=true (should be ignored) 657 + err = processor.ProcessAccount(context.Background(), testDID, true, "active") 658 + if err != nil { 659 + t.Errorf("Expected no error for active account, got: %v", err) 660 + } 661 + 662 + // Test 3: Process account with status != "deactivated" (should be ignored) 663 + err = processor.ProcessAccount(context.Background(), testDID, false, "suspended") 664 + if err != nil { 665 + t.Errorf("Expected no error for non-deactivated status, got: %v", err) 666 + } 667 + 668 + // Test 4: Process account deactivation for non-existent user 669 + err = processor.ProcessAccount(context.Background(), "did:plc:nonexistent", false, "deactivated") 670 + // Cache invalidation will fail, but that's expected 671 + if err != nil { 672 + t.Logf("Expected cache invalidation error: %v", err) 673 + } 674 + 675 + // Test 5: Process multiple deactivation events (idempotent) 676 + for i := 0; i < 3; i++ { 677 + err = processor.ProcessAccount(context.Background(), testDID, false, "deactivated") 678 + if err != nil { 679 + t.Logf("Expected cache invalidation error on iteration %d: %v", i, err) 680 + } 681 + } 682 + 683 + // User should still exist after multiple deactivations 684 + err = db.QueryRow(` 685 + SELECT EXISTS(SELECT 1 FROM users WHERE did = ?) 686 + `, testDID).Scan(&exists) 687 + if err != nil { 688 + t.Fatalf("Failed to check if user exists after multiple deactivations: %v", err) 689 + } 690 + if !exists { 691 + t.Error("User should still exist after multiple deactivation events") 692 + } 693 + }
+75 -27
pkg/appview/jetstream/worker.go
··· 284 284 w.eventCallback(event.TimeUS) 285 285 } 286 286 287 - // Only process commit events 288 - if event.Kind != "commit" { 289 - return nil 290 - } 287 + // Process based on event kind 288 + switch event.Kind { 289 + case "commit": 290 + commit := event.Commit 291 + if commit == nil { 292 + return nil 293 + } 294 + 295 + // Set DID on commit from parent event 296 + commit.DID = event.DID 297 + 298 + // Debug: log first few collections we see to understand what's coming through 299 + if w.debugCollectionCount < 5 { 300 + slog.Debug("Jetstream received collection", "collection", commit.Collection, "did", commit.DID) 301 + w.debugCollectionCount++ 302 + } 291 303 292 - commit := event.Commit 293 - if commit == nil { 294 - return nil 295 - } 304 + // Process based on collection 305 + switch commit.Collection { 306 + case atproto.ManifestCollection: 307 + slog.Info("Jetstream processing manifest event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 308 + return w.processManifest(commit) 309 + case atproto.TagCollection: 310 + slog.Info("Jetstream processing tag event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 311 + return w.processTag(commit) 312 + case atproto.StarCollection: 313 + slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 314 + return w.processStar(commit) 315 + default: 316 + // Ignore other collections 317 + return nil 318 + } 296 319 297 - // Set DID on commit from parent event 298 - commit.DID = event.DID 320 + case "identity": 321 + if event.Identity == nil { 322 + return nil 323 + } 324 + return w.processIdentity(&event) 299 325 300 - // Debug: log first few collections we see to understand what's coming through 301 - if w.debugCollectionCount < 5 { 302 - slog.Debug("Jetstream received collection", "collection", commit.Collection, "did", commit.DID) 303 - w.debugCollectionCount++ 304 - } 326 + case "account": 327 + if event.Account == nil { 328 + return nil 329 + } 330 + return w.processAccount(&event) 305 331 306 - // Process based on collection 307 - switch commit.Collection { 308 - case atproto.ManifestCollection: 309 - slog.Info("Jetstream processing manifest event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 310 - return w.processManifest(commit) 311 - case atproto.TagCollection: 312 - slog.Info("Jetstream processing tag event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 313 - return w.processTag(commit) 314 - case atproto.StarCollection: 315 - slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 316 - return w.processStar(commit) 317 332 default: 318 - // Ignore other collections 333 + // Ignore unknown event kinds 319 334 return nil 320 335 } 321 336 } ··· 419 434 420 435 // Use shared processor for DB operations 421 436 return w.processor.ProcessStar(context.Background(), commit.DID, recordBytes) 437 + } 438 + 439 + // processIdentity processes an identity event (handle change) 440 + func (w *Worker) processIdentity(event *JetstreamEvent) error { 441 + if event.Identity == nil { 442 + return nil 443 + } 444 + 445 + identity := event.Identity 446 + slog.Info("Jetstream processing identity event", 447 + "did", identity.DID, 448 + "handle", identity.Handle, 449 + "seq", identity.Seq) 450 + 451 + // Process via shared processor 452 + return w.processor.ProcessIdentity(context.Background(), identity.DID, identity.Handle) 453 + } 454 + 455 + // processAccount processes an account event (status change) 456 + func (w *Worker) processAccount(event *JetstreamEvent) error { 457 + if event.Account == nil { 458 + return nil 459 + } 460 + 461 + account := event.Account 462 + slog.Info("Jetstream processing account event", 463 + "did", account.DID, 464 + "active", account.Active, 465 + "status", account.Status, 466 + "seq", account.Seq) 467 + 468 + // Process via shared processor 469 + return w.processor.ProcessAccount(context.Background(), account.DID, account.Active, account.Status) 422 470 } 423 471 424 472 // JetstreamEvent represents a Jetstream event
+8 -5
pkg/atproto/directory.go
··· 17 17 directoryOnce sync.Once 18 18 ) 19 19 20 - // GetDirectory returns a shared identity.Directory instance with an 8-hour cache TTL. 21 - // This is based on indigo's DefaultDirectory() but with a reduced cache TTL 22 - // to allow faster recovery from PDS migrations (8h instead of 24h). 20 + // GetDirectory returns a shared identity.Directory instance with a 24-hour cache TTL. 21 + // This is based on indigo's DefaultDirectory() with event-driven cache invalidation. 22 + // 23 + // Cache entries are invalidated via Jetstream events (identity changes, account status) 24 + // which allows for a longer TTL while maintaining freshness. The Purge() method is called 25 + // when identity or account events are received, ensuring the cache reflects real-time changes. 23 26 // 24 27 // Using a shared instance ensures all identity lookups across the application 25 28 // use the same cache, which is more memory-efficient and provides better cache hit rates. ··· 48 51 } 49 52 // Cache configuration: 50 53 // - capacity: 250,000 entries 51 - // - hitTTL: 8 hours (reduced from indigo's default 24h for faster PDS migration recovery) 54 + // - hitTTL: 24 hours (event-driven invalidation via Jetstream provides freshness) 52 55 // - errTTL: 2 minutes 53 56 // - invalidHandleTTL: 5 minutes 54 - cached := identity.NewCacheDirectory(&base, 250_000, time.Hour*8, time.Minute*2, time.Minute*5) 57 + cached := identity.NewCacheDirectory(&base, 250_000, time.Hour*24, time.Minute*2, time.Minute*5) 55 58 sharedDirectory = &cached 56 59 }) 57 60 return sharedDirectory
+21 -3
pkg/atproto/resolver.go
··· 8 8 ) 9 9 10 10 // ResolveDIDToPDS resolves a DID to its PDS endpoint. 11 - // Uses the shared identity directory with 8h cache TTL. 11 + // Uses the shared identity directory with cache TTL and event-driven invalidation. 12 12 func ResolveDIDToPDS(ctx context.Context, did string) (string, error) { 13 13 directory := GetDirectory() 14 14 didParsed, err := syntax.ParseDID(did) ··· 30 30 } 31 31 32 32 // ResolveIdentity resolves an ATProto identifier (handle or DID) to DID, handle, and PDS endpoint. 33 - // Uses the shared identity directory with 8h cache TTL. 33 + // Uses the shared identity directory with cache TTL and event-driven invalidation. 34 34 // 35 35 // If the handle is invalid (handle.invalid), it returns the DID as the handle for display purposes. 36 36 // Returns: did, handle, pdsEndpoint, error ··· 64 64 } 65 65 66 66 // ResolveHandleToDID resolves a handle or DID to just the DID. 67 - // Uses the shared identity directory with 8h cache TTL. 67 + // Uses the shared identity directory with cache TTL and event-driven invalidation. 68 68 // This is useful when you only need the DID and don't care about handle/PDS. 69 69 func ResolveHandleToDID(ctx context.Context, identifier string) (string, error) { 70 70 directory := GetDirectory() ··· 80 80 81 81 return ident.DID.String(), nil 82 82 } 83 + 84 + // InvalidateIdentity purges cached identity data for a DID or handle. 85 + // This should be called when identity changes are detected (e.g., via Jetstream events) 86 + // to ensure the cache is refreshed on the next lookup. 87 + // 88 + // Use cases: 89 + // - Handle changes (identity events from Jetstream) 90 + // - Account deactivation/migration (account events from Jetstream) 91 + // - PDS migrations (deactivation followed by reactivation at new PDS) 92 + func InvalidateIdentity(ctx context.Context, identifier string) error { 93 + directory := GetDirectory() 94 + atID, err := syntax.ParseAtIdentifier(identifier) 95 + if err != nil { 96 + return fmt.Errorf("invalid identifier for cache invalidation: %w", err) 97 + } 98 + 99 + return directory.Purge(ctx, *atID) 100 + }
+397
pkg/logging/logger_test.go
··· 1 + package logging 2 + 3 + import ( 4 + "bytes" 5 + "log/slog" 6 + "strings" 7 + "testing" 8 + ) 9 + 10 + // captureLogOutput runs a function and captures slog output 11 + func captureLogOutput(level string, logFunc func()) string { 12 + var buf bytes.Buffer 13 + 14 + // Save original logger 15 + originalLogger := slog.Default() 16 + defer slog.SetDefault(originalLogger) 17 + 18 + // Parse level 19 + var logLevel slog.Level 20 + switch strings.ToLower(strings.TrimSpace(level)) { 21 + case "debug": 22 + logLevel = slog.LevelDebug 23 + case "info", "": 24 + logLevel = slog.LevelInfo 25 + case "warn", "warning": 26 + logLevel = slog.LevelWarn 27 + case "error": 28 + logLevel = slog.LevelError 29 + default: 30 + logLevel = slog.LevelInfo 31 + } 32 + 33 + // Create logger that writes to buffer 34 + opts := &slog.HandlerOptions{ 35 + Level: logLevel, 36 + } 37 + handler := slog.NewTextHandler(&buf, opts) 38 + slog.SetDefault(slog.New(handler)) 39 + 40 + // Run the function that generates logs 41 + logFunc() 42 + 43 + return buf.String() 44 + } 45 + 46 + func TestInitLogger(t *testing.T) { 47 + // Save original logger to restore after all tests 48 + originalLogger := slog.Default() 49 + defer slog.SetDefault(originalLogger) 50 + 51 + tests := []struct { 52 + name string 53 + level string 54 + shouldLogDebug bool 55 + shouldLogInfo bool 56 + shouldLogWarn bool 57 + shouldLogError bool 58 + }{ 59 + { 60 + name: "debug level logs all", 61 + level: "debug", 62 + shouldLogDebug: true, 63 + shouldLogInfo: true, 64 + shouldLogWarn: true, 65 + shouldLogError: true, 66 + }, 67 + { 68 + name: "info level logs info and above", 69 + level: "info", 70 + shouldLogDebug: false, 71 + shouldLogInfo: true, 72 + shouldLogWarn: true, 73 + shouldLogError: true, 74 + }, 75 + { 76 + name: "warn level logs warn and above", 77 + level: "warn", 78 + shouldLogDebug: false, 79 + shouldLogInfo: false, 80 + shouldLogWarn: true, 81 + shouldLogError: true, 82 + }, 83 + { 84 + name: "error level logs only errors", 85 + level: "error", 86 + shouldLogDebug: false, 87 + shouldLogInfo: false, 88 + shouldLogWarn: false, 89 + shouldLogError: true, 90 + }, 91 + { 92 + name: "empty level defaults to info", 93 + level: "", 94 + shouldLogDebug: false, 95 + shouldLogInfo: true, 96 + shouldLogWarn: true, 97 + shouldLogError: true, 98 + }, 99 + { 100 + name: "invalid level defaults to info", 101 + level: "invalid", 102 + shouldLogDebug: false, 103 + shouldLogInfo: true, 104 + shouldLogWarn: true, 105 + shouldLogError: true, 106 + }, 107 + { 108 + name: "case insensitive - DEBUG", 109 + level: "DEBUG", 110 + shouldLogDebug: true, 111 + shouldLogInfo: true, 112 + shouldLogWarn: true, 113 + shouldLogError: true, 114 + }, 115 + { 116 + name: "case insensitive - WaRn", 117 + level: "WaRn", 118 + shouldLogDebug: false, 119 + shouldLogInfo: false, 120 + shouldLogWarn: true, 121 + shouldLogError: true, 122 + }, 123 + { 124 + name: "whitespace handling - ' info '", 125 + level: " info ", 126 + shouldLogDebug: false, 127 + shouldLogInfo: true, 128 + shouldLogWarn: true, 129 + shouldLogError: true, 130 + }, 131 + { 132 + name: "warning alias for warn", 133 + level: "warning", 134 + shouldLogDebug: false, 135 + shouldLogInfo: false, 136 + shouldLogWarn: true, 137 + shouldLogError: true, 138 + }, 139 + } 140 + 141 + for _, tt := range tests { 142 + t.Run(tt.name, func(t *testing.T) { 143 + output := captureLogOutput(tt.level, func() { 144 + slog.Debug("debug message") 145 + slog.Info("info message") 146 + slog.Warn("warn message") 147 + slog.Error("error message") 148 + }) 149 + 150 + // Check debug 151 + if tt.shouldLogDebug { 152 + if !strings.Contains(output, "debug message") { 153 + t.Errorf("Expected debug message to be logged") 154 + } 155 + } else { 156 + if strings.Contains(output, "debug message") { 157 + t.Errorf("Did not expect debug message to be logged") 158 + } 159 + } 160 + 161 + // Check info 162 + if tt.shouldLogInfo { 163 + if !strings.Contains(output, "info message") { 164 + t.Errorf("Expected info message to be logged") 165 + } 166 + } else { 167 + if strings.Contains(output, "info message") { 168 + t.Errorf("Did not expect info message to be logged") 169 + } 170 + } 171 + 172 + // Check warn 173 + if tt.shouldLogWarn { 174 + if !strings.Contains(output, "warn message") { 175 + t.Errorf("Expected warn message to be logged") 176 + } 177 + } else { 178 + if strings.Contains(output, "warn message") { 179 + t.Errorf("Did not expect warn message to be logged") 180 + } 181 + } 182 + 183 + // Check error 184 + if tt.shouldLogError { 185 + if !strings.Contains(output, "error message") { 186 + t.Errorf("Expected error message to be logged") 187 + } 188 + } else { 189 + if strings.Contains(output, "error message") { 190 + t.Errorf("Did not expect error message to be logged") 191 + } 192 + } 193 + }) 194 + } 195 + } 196 + 197 + func TestInitLogger_LogLevels(t *testing.T) { 198 + // Save original logger 199 + originalLogger := slog.Default() 200 + defer slog.SetDefault(originalLogger) 201 + 202 + // Test that InitLogger actually calls slog.SetDefault 203 + InitLogger("debug") 204 + 205 + // Create a buffer to capture output 206 + var buf bytes.Buffer 207 + handler := slog.NewTextHandler(&buf, &slog.HandlerOptions{ 208 + Level: slog.LevelDebug, 209 + }) 210 + slog.SetDefault(slog.New(handler)) 211 + 212 + // Log at debug level 213 + slog.Debug("test debug message") 214 + 215 + // Verify output contains the message 216 + if !strings.Contains(buf.String(), "test debug message") { 217 + t.Error("Debug message not logged after InitLogger") 218 + } 219 + } 220 + 221 + func TestSetupTestLogger(t *testing.T) { 222 + // Save original logger 223 + originalLogger := slog.Default() 224 + defer slog.SetDefault(originalLogger) 225 + 226 + // Test 1: SetupTestLogger suppresses INFO and DEBUG 227 + cleanup := SetupTestLogger() 228 + 229 + // Create a buffer to capture what SHOULD be discarded 230 + // (but we can't really test io.Discard directly, so we'll test behavior) 231 + 232 + // Log at different levels - since it's set to WARN, debug/info should be suppressed 233 + // We can't capture io.Discard output, but we can verify the logger is configured correctly 234 + logger := slog.Default() 235 + 236 + // Verify handler is configured to discard 237 + if logger == nil { 238 + t.Error("Expected logger to be set") 239 + } 240 + 241 + // Test 2: Cleanup restores original logger 242 + cleanup() 243 + 244 + if slog.Default() != originalLogger { 245 + t.Error("Expected cleanup to restore original logger") 246 + } 247 + } 248 + 249 + func TestSetupTestLogger_LevelFiltering(t *testing.T) { 250 + // Save original logger 251 + originalLogger := slog.Default() 252 + defer slog.SetDefault(originalLogger) 253 + 254 + // Setup test logger (WARN level, io.Discard) 255 + cleanup := SetupTestLogger() 256 + defer cleanup() 257 + 258 + // Replace the handler output with a buffer so we can test 259 + // (This is a bit of a workaround since the real SetupTestLogger uses io.Discard) 260 + var buf bytes.Buffer 261 + handler := slog.NewTextHandler(&buf, &slog.HandlerOptions{ 262 + Level: slog.LevelWarn, 263 + }) 264 + slog.SetDefault(slog.New(handler)) 265 + 266 + // Log at different levels 267 + slog.Debug("debug message") 268 + slog.Info("info message") 269 + slog.Warn("warn message") 270 + slog.Error("error message") 271 + 272 + output := buf.String() 273 + 274 + // Debug and Info should NOT be in output (filtered by WARN level) 275 + if strings.Contains(output, "debug message") { 276 + t.Error("Debug message should be filtered out at WARN level") 277 + } 278 + if strings.Contains(output, "info message") { 279 + t.Error("Info message should be filtered out at WARN level") 280 + } 281 + 282 + // Warn and Error SHOULD be in output 283 + if !strings.Contains(output, "warn message") { 284 + t.Error("Warn message should be logged at WARN level") 285 + } 286 + if !strings.Contains(output, "error message") { 287 + t.Error("Error message should be logged at WARN level") 288 + } 289 + } 290 + 291 + func TestSetupTestLogger_UsageWithTCleanup(t *testing.T) { 292 + // This test demonstrates the intended usage pattern 293 + originalLogger := slog.Default() 294 + 295 + // Simulate using SetupTestLogger in a test 296 + cleanup := SetupTestLogger() 297 + t.Cleanup(cleanup) 298 + 299 + // Logger should be different now 300 + if slog.Default() == originalLogger { 301 + t.Error("Expected logger to be changed after SetupTestLogger") 302 + } 303 + 304 + // When test ends, t.Cleanup will run and restore the logger 305 + // We can't directly test this since it happens after the test function returns, 306 + // but we're verifying the pattern works 307 + } 308 + 309 + func TestSetupTestLogger_MultipleCallsIndependent(t *testing.T) { 310 + // Save original logger 311 + originalLogger := slog.Default() 312 + defer slog.SetDefault(originalLogger) 313 + 314 + // First call 315 + cleanup1 := SetupTestLogger() 316 + logger1 := slog.Default() 317 + 318 + // Second call 319 + cleanup2 := SetupTestLogger() 320 + logger2 := slog.Default() 321 + 322 + // Loggers might be different instances 323 + if logger1 == nil || logger2 == nil { 324 + t.Error("Expected loggers to be set") 325 + } 326 + 327 + // Cleanup in reverse order (like defer) 328 + cleanup2() 329 + cleanup1() 330 + } 331 + 332 + func TestInitLogger_OutputFormat(t *testing.T) { 333 + // Save original logger 334 + originalLogger := slog.Default() 335 + defer slog.SetDefault(originalLogger) 336 + 337 + var buf bytes.Buffer 338 + 339 + // Configure logger with buffer 340 + opts := &slog.HandlerOptions{ 341 + Level: slog.LevelInfo, 342 + } 343 + handler := slog.NewTextHandler(&buf, opts) 344 + slog.SetDefault(slog.New(handler)) 345 + 346 + // Log a message 347 + slog.Info("test message", "key", "value") 348 + 349 + output := buf.String() 350 + 351 + // Verify text format (not JSON) 352 + if !strings.Contains(output, "test message") { 353 + t.Error("Expected message in output") 354 + } 355 + if !strings.Contains(output, "key=value") { 356 + t.Error("Expected key=value in text format") 357 + } 358 + // Should NOT be JSON 359 + if strings.HasPrefix(output, "{") { 360 + t.Error("Expected text format, not JSON") 361 + } 362 + } 363 + 364 + func BenchmarkInitLogger(b *testing.B) { 365 + originalLogger := slog.Default() 366 + defer slog.SetDefault(originalLogger) 367 + 368 + b.ResetTimer() 369 + for i := 0; i < b.N; i++ { 370 + InitLogger("info") 371 + } 372 + } 373 + 374 + func BenchmarkSetupTestLogger(b *testing.B) { 375 + originalLogger := slog.Default() 376 + defer slog.SetDefault(originalLogger) 377 + 378 + b.ResetTimer() 379 + for i := 0; i < b.N; i++ { 380 + cleanup := SetupTestLogger() 381 + cleanup() 382 + } 383 + } 384 + 385 + // Example test showing how to use SetupTestLogger 386 + func ExampleSetupTestLogger() { 387 + // In a test function: 388 + cleanup := SetupTestLogger() 389 + defer cleanup() 390 + 391 + // Now logs at DEBUG and INFO are suppressed 392 + slog.Debug("This won't show") 393 + slog.Info("This won't show either") 394 + slog.Warn("This WILL show") 395 + 396 + // cleanup() will restore the original logger when defer runs 397 + }