A container registry that uses the AT Protocol for manifest storage and S3 for blob storage. atcr.io
docker container atproto go
73
fork

Configure Feed

Select the types of activity you want to include in your feed.

use for range and wg.Go

+105 -257
+1 -2
pkg/appview/config.go
··· 388 388 return checksums 389 389 } 390 390 391 - pairs := strings.Split(checksumsStr, ",") 392 - for _, pair := range pairs { 391 + for pair := range strings.SplitSeq(checksumsStr, ",") { 393 392 parts := strings.SplitN(strings.TrimSpace(pair), ":", 2) 394 393 if len(parts) == 2 { 395 394 platform := strings.TrimSpace(parts[0])
+2 -5
pkg/appview/db/schema.go
··· 225 225 var statements []string 226 226 227 227 // Split on semicolons 228 - parts := strings.Split(query, ";") 229 - 230 - for _, part := range parts { 228 + for part := range strings.SplitSeq(query, ";") { 231 229 // Trim whitespace 232 230 stmt := strings.TrimSpace(part) 233 231 ··· 237 235 } 238 236 239 237 // Skip comment-only statements 240 - lines := strings.Split(stmt, "\n") 241 238 hasCode := false 242 - for _, line := range lines { 239 + for line := range strings.SplitSeq(stmt, "\n") { 243 240 trimmed := strings.TrimSpace(line) 244 241 if trimmed != "" && !strings.HasPrefix(trimmed, "--") { 245 242 hasCode = true
+1 -1
pkg/appview/handlers/opengraph.go
··· 105 105 106 106 if licenses != "" { 107 107 // Show first license if multiple 108 - license := strings.Split(licenses, ",")[0] 108 + license, _, _ := strings.Cut(licenses, ",") 109 109 license = strings.TrimSpace(license) 110 110 card.DrawBadge(license, badgeX, badgeY, ogcard.FontBadge, ogcard.ColorBadgeBg, ogcard.ColorText) 111 111 }
+11 -14
pkg/appview/handlers/repository.go
··· 89 89 continue 90 90 } 91 91 92 - wg.Add(1) 93 - go func(idx int) { 94 - defer wg.Done() 95 - 96 - endpoint := manifests[idx].HoldEndpoint 92 + wg.Go(func() { 93 + endpoint := manifests[i].HoldEndpoint 97 94 98 95 // Try to get cached status first (instant) 99 96 if cached := h.HealthChecker.GetCachedStatus(endpoint); cached != nil { 100 97 mu.Lock() 101 - manifests[idx].Reachable = cached.Reachable 102 - manifests[idx].Pending = false 98 + manifests[i].Reachable = cached.Reachable 99 + manifests[i].Pending = false 103 100 mu.Unlock() 104 101 return 105 102 } ··· 110 107 mu.Lock() 111 108 if ctx.Err() == context.DeadlineExceeded { 112 109 // Timeout - mark as pending for HTMX polling 113 - manifests[idx].Reachable = false 114 - manifests[idx].Pending = true 110 + manifests[i].Reachable = false 111 + manifests[i].Pending = true 115 112 } else if err != nil { 116 113 // Error - mark as unreachable 117 - manifests[idx].Reachable = false 118 - manifests[idx].Pending = false 114 + manifests[i].Reachable = false 115 + manifests[i].Pending = false 119 116 } else { 120 117 // Success 121 - manifests[idx].Reachable = reachable 122 - manifests[idx].Pending = false 118 + manifests[i].Reachable = reachable 119 + manifests[i].Pending = false 123 120 } 124 121 mu.Unlock() 125 - }(i) 122 + }) 126 123 } 127 124 128 125 // Wait for all checks to complete or timeout
+7 -14
pkg/appview/holdhealth/worker.go
··· 53 53 54 54 // Start begins the background worker 55 55 func (w *Worker) Start(ctx context.Context) { 56 - w.wg.Add(1) 57 - go func() { 58 - defer w.wg.Done() 59 - 56 + w.wg.Go(func() { 60 57 slog.Info("Hold health worker starting background health checks") 61 58 62 59 // Wait for services to be ready (Docker startup race condition) ··· 89 86 w.checker.Cleanup() 90 87 } 91 88 } 92 - }() 89 + }) 93 90 } 94 91 95 92 // Stop gracefully stops the worker ··· 154 151 var statsMu sync.Mutex 155 152 156 153 for _, endpoint := range uniqueEndpoints { 157 - wg.Add(1) 158 - 159 - go func(ep string) { 160 - defer wg.Done() 161 - 154 + wg.Go(func() { 162 155 // Acquire semaphore 163 156 sem <- struct{}{} 164 157 defer func() { <-sem }() 165 158 166 159 // Check health 167 - isReachable, err := w.checker.CheckHealth(ctx, ep) 160 + isReachable, err := w.checker.CheckHealth(ctx, endpoint) 168 161 169 162 // Update cache 170 - w.checker.SetStatus(ep, isReachable, err) 163 + w.checker.SetStatus(endpoint, isReachable, err) 171 164 172 165 // Update stats 173 166 statsMu.Lock() ··· 175 168 reachable++ 176 169 } else { 177 170 unreachable++ 178 - slog.Warn("Hold health worker hold unreachable", "endpoint", ep, "error", err) 171 + slog.Warn("Hold health worker hold unreachable", "endpoint", endpoint, "error", err) 179 172 } 180 173 statsMu.Unlock() 181 - }(endpoint) 174 + }) 182 175 } 183 176 184 177 // Wait for all checks to complete
+1 -3
pkg/appview/licenses/licenses.go
··· 129 129 licensesStr = strings.ReplaceAll(licensesStr, " OR ", ",") 130 130 licensesStr = strings.ReplaceAll(licensesStr, ";", ",") 131 131 132 - parts := strings.Split(licensesStr, ",") 133 - 134 132 var result []LicenseInfo 135 133 seen := make(map[string]bool) // Deduplicate 136 134 137 - for _, part := range parts { 135 + for part := range strings.SplitSeq(licensesStr, ",") { 138 136 part = strings.TrimSpace(part) 139 137 if part == "" { 140 138 continue
+5 -8
pkg/appview/middleware/auth_test.go
··· 358 358 var wg sync.WaitGroup 359 359 var mu sync.Mutex // Protect results map 360 360 361 - for i := range 10 { 362 - wg.Add(1) 363 - go func(index int, sessionID string) { 364 - defer wg.Done() 365 - 361 + for i := range results { 362 + wg.Go(func() { 366 363 req := httptest.NewRequest("GET", "/test", nil) 367 364 req.AddCookie(&http.Cookie{ 368 365 Name: "atcr_session", 369 - Value: sessionID, 366 + Value: sessionIDs[i], 370 367 }) 371 368 w := httptest.NewRecorder() 372 369 373 370 wrappedHandler.ServeHTTP(w, req) 374 371 375 372 mu.Lock() 376 - results[index] = w.Code 373 + results[i] = w.Code 377 374 mu.Unlock() 378 - }(i, sessionIDs[i]) 375 + }) 379 376 } 380 377 381 378 wg.Wait()
+2 -4
pkg/appview/storage/profile_test.go
··· 341 341 // Make 5 concurrent GetProfile calls 342 342 var wg sync.WaitGroup 343 343 for range 5 { 344 - wg.Add(1) 345 - go func() { 346 - defer wg.Done() 344 + wg.Go(func() { 347 345 _, err := GetProfile(context.Background(), client) 348 346 if err != nil { 349 347 t.Errorf("GetProfile() error = %v", err) 350 348 } 351 - }() 349 + }) 352 350 } 353 351 354 352 wg.Wait()
+42 -45
pkg/appview/storage/routing_repository.go
··· 7 7 import ( 8 8 "context" 9 9 "log/slog" 10 + "sync" 10 11 11 12 "github.com/distribution/distribution/v3" 12 13 ) ··· 18 19 // RoutingRepository routes manifests to ATProto and blobs to external hold service 19 20 // The registry (AppView) is stateless and NEVER stores blobs locally 20 21 // NOTE: A fresh instance is created per-request (see middleware/registry.go) 21 - // so no mutex is needed - each request has its own instance 22 22 type RoutingRepository struct { 23 23 distribution.Repository 24 - Ctx *RegistryContext // All context and services (exported for token updates) 25 - manifestStore *ManifestStore // Manifest store instance (lazy-initialized) 26 - blobStore *ProxyBlobStore // Blob store instance (lazy-initialized) 24 + Ctx *RegistryContext // All context and services (exported for token updates) 25 + manifestStore *ManifestStore // Manifest store instance (lazy-initialized) 26 + manifestStoreOnce sync.Once // Ensures thread-safe lazy initialization 27 + blobStore *ProxyBlobStore // Blob store instance (lazy-initialized) 28 + blobStoreOnce sync.Once // Ensures thread-safe lazy initialization 27 29 } 28 30 29 31 // NewRoutingRepository creates a new routing repository ··· 36 38 37 39 // Manifests returns the ATProto-backed manifest service 38 40 func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { 39 - // Lazy-initialize manifest store (no mutex needed - one instance per request) 40 - if r.manifestStore == nil { 41 + r.manifestStoreOnce.Do(func() { 41 42 // Ensure blob store is created first (needed for label extraction during push) 42 43 blobStore := r.Blobs(ctx) 43 44 r.manifestStore = NewManifestStore(r.Ctx, blobStore) 44 - } 45 + }) 45 46 return r.manifestStore, nil 46 47 } 47 48 48 49 // Blobs returns a proxy blob store that routes to external hold service 49 50 // The registry (AppView) NEVER stores blobs locally - all blobs go through hold service 50 51 func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore { 51 - // Return cached blob store if available (no mutex needed - one instance per request) 52 - if r.blobStore != nil { 53 - slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository) 54 - return r.blobStore 55 - } 52 + r.blobStoreOnce.Do(func() { 53 + // Determine if this is a pull (GET/HEAD) or push (PUT/POST/etc) operation 54 + // Pull operations use the historical hold DID from the database (blobs are where they were pushed) 55 + // Push operations use the discovery-based hold DID from user's profile/default 56 + // This allows users to change their default hold and have new pushes go there 57 + isPull := false 58 + if method, ok := ctx.Value(HTTPRequestMethod).(string); ok { 59 + isPull = method == "GET" || method == "HEAD" 60 + } 56 61 57 - // Determine if this is a pull (GET/HEAD) or push (PUT/POST/etc) operation 58 - // Pull operations use the historical hold DID from the database (blobs are where they were pushed) 59 - // Push operations use the discovery-based hold DID from user's profile/default 60 - // This allows users to change their default hold and have new pushes go there 61 - isPull := false 62 - if method, ok := ctx.Value(HTTPRequestMethod).(string); ok { 63 - isPull = method == "GET" || method == "HEAD" 64 - } 62 + holdDID := r.Ctx.HoldDID // Default to discovery-based DID 63 + holdSource := "discovery" 65 64 66 - holdDID := r.Ctx.HoldDID // Default to discovery-based DID 67 - holdSource := "discovery" 65 + // Only query database for pull operations 66 + if isPull && r.Ctx.Database != nil { 67 + // Query database for the latest manifest's hold DID 68 + if dbHoldDID, err := r.Ctx.Database.GetLatestHoldDIDForRepo(r.Ctx.DID, r.Ctx.Repository); err == nil && dbHoldDID != "" { 69 + // Use hold DID from database (pull case - use historical reference) 70 + holdDID = dbHoldDID 71 + holdSource = "database" 72 + slog.Debug("Using hold from database manifest (pull)", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", dbHoldDID) 73 + } else if err != nil { 74 + // Log error but don't fail - fall back to discovery-based DID 75 + slog.Warn("Failed to query database for hold DID", "component", "storage/blobs", "error", err) 76 + } 77 + // If dbHoldDID is empty (no manifests yet), fall through to use discovery-based DID 78 + } 68 79 69 - // Only query database for pull operations 70 - if isPull && r.Ctx.Database != nil { 71 - // Query database for the latest manifest's hold DID 72 - if dbHoldDID, err := r.Ctx.Database.GetLatestHoldDIDForRepo(r.Ctx.DID, r.Ctx.Repository); err == nil && dbHoldDID != "" { 73 - // Use hold DID from database (pull case - use historical reference) 74 - holdDID = dbHoldDID 75 - holdSource = "database" 76 - slog.Debug("Using hold from database manifest (pull)", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", dbHoldDID) 77 - } else if err != nil { 78 - // Log error but don't fail - fall back to discovery-based DID 79 - slog.Warn("Failed to query database for hold DID", "component", "storage/blobs", "error", err) 80 + if holdDID == "" { 81 + // This should never happen if middleware is configured correctly 82 + panic("hold DID not set in RegistryContext - ensure default_hold_did is configured in middleware") 80 83 } 81 - // If dbHoldDID is empty (no manifests yet), fall through to use discovery-based DID 82 - } 83 84 84 - if holdDID == "" { 85 - // This should never happen if middleware is configured correctly 86 - panic("hold DID not set in RegistryContext - ensure default_hold_did is configured in middleware") 87 - } 85 + slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID, "source", holdSource) 88 86 89 - slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID, "source", holdSource) 90 - 91 - // Update context with the correct hold DID (may be from database or discovered) 92 - r.Ctx.HoldDID = holdDID 87 + // Update context with the correct hold DID (may be from database or discovered) 88 + r.Ctx.HoldDID = holdDID 93 89 94 - // Create and cache proxy blob store 95 - r.blobStore = NewProxyBlobStore(r.Ctx) 90 + // Create and cache proxy blob store 91 + r.blobStore = NewProxyBlobStore(r.Ctx) 92 + }) 96 93 return r.blobStore 97 94 } 98 95
+6 -10
pkg/appview/storage/routing_repository_test.go
··· 318 318 319 319 // Concurrent access to Manifests() 320 320 for i := 0; i < numGoroutines; i++ { 321 - wg.Add(1) 322 - go func(index int) { 323 - defer wg.Done() 321 + wg.Go(func() { 324 322 store, err := repo.Manifests(context.Background()) 325 323 require.NoError(t, err) 326 - manifestStores[index] = store 327 - }(i) 324 + manifestStores[i] = store 325 + }) 328 326 } 329 327 330 328 wg.Wait() ··· 341 339 342 340 // Concurrent access to Blobs() 343 341 for i := 0; i < numGoroutines; i++ { 344 - wg.Add(1) 345 - go func(index int) { 346 - defer wg.Done() 347 - blobStores[index] = repo.Blobs(context.Background()) 348 - }(i) 342 + wg.Go(func() { 343 + blobStores[i] = repo.Blobs(context.Background()) 344 + }) 349 345 } 350 346 351 347 wg.Wait()
+5 -9
pkg/atproto/directory_test.go
··· 29 29 t.Run("concurrent access is thread-safe", func(t *testing.T) { 30 30 const numGoroutines = 100 31 31 var wg sync.WaitGroup 32 - wg.Add(numGoroutines) 33 32 34 33 // Channel to collect all directory instances 35 34 instances := make(chan any, numGoroutines) 36 35 37 36 // Launch many goroutines concurrently accessing GetDirectory 38 37 for range numGoroutines { 39 - go func() { 40 - defer wg.Done() 38 + wg.Go(func() { 41 39 dir := GetDirectory() 42 40 instances <- dir 43 - }() 41 + }) 44 42 } 45 43 46 44 // Wait for all goroutines to complete ··· 120 118 121 119 const numGoroutines = 50 122 120 var wg sync.WaitGroup 123 - wg.Add(numGoroutines) 124 121 125 122 instances := make([]any, numGoroutines) 126 123 var mu sync.Mutex 127 124 128 125 // Simulate many goroutines trying to get the directory simultaneously 129 126 for i := 0; i < numGoroutines; i++ { 130 - go func(idx int) { 131 - defer wg.Done() 127 + wg.Go(func() { 132 128 dir := GetDirectory() 133 129 mu.Lock() 134 - instances[idx] = dir 130 + instances[i] = dir 135 131 mu.Unlock() 136 - }(i) 132 + }) 137 133 } 138 134 139 135 wg.Wait()
+5 -7
pkg/auth/token/issuer_test.go
··· 378 378 // Issue tokens concurrently 379 379 const numGoroutines = 10 380 380 var wg sync.WaitGroup 381 - wg.Add(numGoroutines) 382 381 383 382 tokens := make([]string, numGoroutines) 384 383 errors := make([]error, numGoroutines) 385 384 386 385 for i := 0; i < numGoroutines; i++ { 387 - go func(idx int) { 388 - defer wg.Done() 389 - subject := "did:plc:user" + string(rune('0'+idx)) 386 + wg.Go(func() { 387 + subject := "did:plc:user" + string(rune('0'+i)) 390 388 token, err := issuer.Issue(subject, nil, AuthMethodOAuth) 391 - tokens[idx] = token 392 - errors[idx] = err 393 - }(i) 389 + tokens[i] = token 390 + errors[i] = err 391 + }) 394 392 } 395 393 396 394 wg.Wait()
-90
pkg/hold/oci/helpers_test.go
··· 5 5 ) 6 6 7 7 // Tests for helper functions 8 - 9 - func TestBlobPath_SHA256(t *testing.T) { 10 - tests := []struct { 11 - name string 12 - digest string 13 - expected string 14 - }{ 15 - { 16 - name: "standard sha256 digest", 17 - digest: "sha256:abc123def456", 18 - expected: "/docker/registry/v2/blobs/sha256/ab/abc123def456/data", 19 - }, 20 - { 21 - name: "short hash (less than 2 chars)", 22 - digest: "sha256:a", 23 - expected: "/docker/registry/v2/blobs/sha256/a/data", 24 - }, 25 - { 26 - name: "exactly 2 char hash", 27 - digest: "sha256:ab", 28 - expected: "/docker/registry/v2/blobs/sha256/ab/ab/data", 29 - }, 30 - } 31 - 32 - for _, tt := range tests { 33 - t.Run(tt.name, func(t *testing.T) { 34 - result := blobPath(tt.digest) 35 - if result != tt.expected { 36 - t.Errorf("Expected %s, got %s", tt.expected, result) 37 - } 38 - }) 39 - } 40 - } 41 - 42 - func TestBlobPath_TempUpload(t *testing.T) { 43 - tests := []struct { 44 - name string 45 - digest string 46 - expected string 47 - }{ 48 - { 49 - name: "temp upload path", 50 - digest: "uploads/temp-uuid-123", 51 - expected: "/docker/registry/v2/uploads/temp-uuid-123/data", 52 - }, 53 - { 54 - name: "temp upload with different uuid", 55 - digest: "uploads/temp-abc-def-456", 56 - expected: "/docker/registry/v2/uploads/temp-abc-def-456/data", 57 - }, 58 - } 59 - 60 - for _, tt := range tests { 61 - t.Run(tt.name, func(t *testing.T) { 62 - result := blobPath(tt.digest) 63 - if result != tt.expected { 64 - t.Errorf("Expected %s, got %s", tt.expected, result) 65 - } 66 - }) 67 - } 68 - } 69 - 70 - func TestBlobPath_MalformedDigest(t *testing.T) { 71 - tests := []struct { 72 - name string 73 - digest string 74 - expected string 75 - }{ 76 - { 77 - name: "no colon in digest", 78 - digest: "malformed-digest", 79 - expected: "/docker/registry/v2/blobs/malformed-digest/data", 80 - }, 81 - { 82 - name: "empty digest", 83 - digest: "", 84 - expected: "/docker/registry/v2/blobs//data", 85 - }, 86 - } 87 - 88 - for _, tt := range tests { 89 - t.Run(tt.name, func(t *testing.T) { 90 - result := blobPath(tt.digest) 91 - if result != tt.expected { 92 - t.Errorf("Expected %s, got %s", tt.expected, result) 93 - } 94 - }) 95 - } 96 - } 97 - 98 8 func TestNormalizeETag(t *testing.T) { 99 9 tests := []struct { 100 10 name string
+15 -43
pkg/hold/oci/multipart.go
··· 12 12 "time" 13 13 14 14 "atcr.io/pkg/atproto" 15 - "github.com/aws/aws-sdk-go/service/s3" 15 + "atcr.io/pkg/s3" 16 + awss3 "github.com/aws/aws-sdk-go/service/s3" 16 17 "github.com/google/uuid" 17 18 ) 18 19 ··· 237 238 if h.s3Service.Client == nil { 238 239 return "", S3Native, fmt.Errorf("S3 not configured") 239 240 } 240 - path := blobPath(digest) 241 + path := s3.BlobPath(digest) 241 242 s3Key := strings.TrimPrefix(path, "/") 242 243 if h.s3Service.PathPrefix != "" { 243 244 s3Key = h.s3Service.PathPrefix + "/" + s3Key 244 245 } 245 246 246 - result, err := h.s3Service.Client.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{ 247 + result, err := h.s3Service.Client.CreateMultipartUploadWithContext(ctx, &awss3.CreateMultipartUploadInput{ 247 248 Bucket: &h.s3Service.Bucket, 248 249 Key: &s3Key, 249 250 }) ··· 280 281 return nil, fmt.Errorf("S3 not configured") 281 282 } 282 283 283 - path := blobPath(session.Digest) 284 + path := s3.BlobPath(session.Digest) 284 285 s3Key := strings.TrimPrefix(path, "/") 285 286 if h.s3Service.PathPrefix != "" { 286 287 s3Key = h.s3Service.PathPrefix + "/" + s3Key 287 288 } 288 289 pnum := int64(partNumber) 289 - req, _ := h.s3Service.Client.UploadPartRequest(&s3.UploadPartInput{ 290 + req, _ := h.s3Service.Client.UploadPartRequest(&awss3.UploadPartInput{ 290 291 Bucket: &h.s3Service.Bucket, 291 292 Key: &s3Key, 292 293 UploadId: &session.S3UploadID, ··· 342 343 343 344 // Convert to S3 CompletedPart format 344 345 // IMPORTANT: S3 requires ETags to be quoted in the CompleteMultipartUpload XML 345 - s3Parts := make([]*s3.CompletedPart, len(parts)) 346 + s3Parts := make([]*awss3.CompletedPart, len(parts)) 346 347 for i, p := range parts { 347 348 etag := normalizeETag(p.ETag) 348 349 pnum := int64(p.PartNumber) 349 - s3Parts[i] = &s3.CompletedPart{ 350 + s3Parts[i] = &awss3.CompletedPart{ 350 351 PartNumber: &pnum, 351 352 ETag: &etag, 352 353 } 353 354 } 354 - sourcePath := blobPath(session.Digest) 355 + sourcePath := s3.BlobPath(session.Digest) 355 356 s3Key := strings.TrimPrefix(sourcePath, "/") 356 357 if h.s3Service.PathPrefix != "" { 357 358 s3Key = h.s3Service.PathPrefix + "/" + s3Key 358 359 } 359 360 360 - _, err = h.s3Service.Client.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{ 361 + _, err = h.s3Service.Client.CompleteMultipartUploadWithContext(ctx, &awss3.CompleteMultipartUploadInput{ 361 362 Bucket: &h.s3Service.Bucket, 362 363 Key: &s3Key, 363 364 UploadId: &session.S3UploadID, 364 - MultipartUpload: &s3.CompletedMultipartUpload{ 365 + MultipartUpload: &awss3.CompletedMultipartUpload{ 365 366 Parts: s3Parts, 366 367 }, 367 368 }) ··· 374 375 "parts", len(s3Parts)) 375 376 376 377 // Verify the blob exists at temp location before moving 377 - destPath := blobPath(finalDigest) 378 + destPath := s3.BlobPath(finalDigest) 378 379 slog.Debug("About to move blob", 379 380 "source", sourcePath, 380 381 "dest", destPath) ··· 412 413 } 413 414 414 415 // Write assembled blob to final digest location (not temp) 415 - path := blobPath(finalDigest) 416 + path := s3.BlobPath(finalDigest) 416 417 writer, err := h.driver.Writer(ctx, path, false) 417 418 if err != nil { 418 419 return fmt.Errorf("failed to create writer: %w", err) ··· 448 449 if h.s3Service.Client == nil { 449 450 return fmt.Errorf("S3 not configured") 450 451 } 451 - path := blobPath(session.Digest) 452 + path := s3.BlobPath(session.Digest) 452 453 s3Key := strings.TrimPrefix(path, "/") 453 454 if h.s3Service.PathPrefix != "" { 454 455 s3Key = h.s3Service.PathPrefix + "/" + s3Key 455 456 } 456 457 457 - _, err := h.s3Service.Client.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{ 458 + _, err := h.s3Service.Client.AbortMultipartUploadWithContext(ctx, &awss3.AbortMultipartUploadInput{ 458 459 Bucket: &h.s3Service.Bucket, 459 460 Key: &s3Key, 460 461 UploadId: &session.S3UploadID, ··· 499 500 // Add quotes 500 501 return fmt.Sprintf("\"%s\"", etag) 501 502 } 502 - 503 - // blobPath converts a digest (e.g., "sha256:abc123...") or temp path to a storage path 504 - // Distribution stores blobs as: /docker/registry/v2/blobs/{algorithm}/{xx}/{hash}/data 505 - // where xx is the first 2 characters of the hash for directory sharding 506 - // NOTE: Path must start with / for filesystem driver 507 - // This is used for OCI container layers (content-addressed, globally deduplicated) 508 - func blobPath(digest string) string { 509 - // Handle temp paths (start with uploads/temp-) 510 - if strings.HasPrefix(digest, "uploads/temp-") { 511 - return fmt.Sprintf("/docker/registry/v2/%s/data", digest) 512 - } 513 - 514 - // Split digest into algorithm and hash 515 - parts := strings.SplitN(digest, ":", 2) 516 - if len(parts) != 2 { 517 - // Fallback for malformed digest 518 - return fmt.Sprintf("/docker/registry/v2/blobs/%s/data", digest) 519 - } 520 - 521 - algorithm := parts[0] 522 - hash := parts[1] 523 - 524 - // Use first 2 characters for sharding 525 - if len(hash) < 2 { 526 - return fmt.Sprintf("/docker/registry/v2/blobs/%s/%s/data", algorithm, hash) 527 - } 528 - 529 - return fmt.Sprintf("/docker/registry/v2/blobs/%s/%s/%s/data", algorithm, hash[:2], hash) 530 - }
+2 -2
pkg/hold/pds/xrpc.go
··· 217 217 hostname := h.pds.PublicURL 218 218 hostname = strings.TrimPrefix(hostname, "http://") 219 219 hostname = strings.TrimPrefix(hostname, "https://") 220 - hostname = strings.Split(hostname, "/")[0] // Remove path 221 - hostname = strings.Split(hostname, ":")[0] // Remove port 220 + hostname, _, _ = strings.Cut(hostname, "/") // Remove path 221 + hostname, _, _ = strings.Cut(hostname, ":") // Remove port 222 222 223 223 response := map[string]any{ 224 224 "did": h.pds.DID(),