MIRROR: javascript for 馃悳's, a tiny runtime with big ambitions
1const std = @import("std");
2
3const c = @cImport({
4 @cInclude("lmdb.h");
5});
6
7extern fn strip_npm_metadata(json_data: [*]const u8, json_len: usize, out_len: *usize) ?[*]u8;
8extern fn strip_metadata_free(ptr: [*]u8) void;
9
10pub const CacheEntry = struct {
11 integrity: [64]u8,
12 path: []const u8,
13 unpacked_size: u64,
14 file_count: u32,
15 cached_at: i64,
16 allocator: ?std.mem.Allocator = null,
17
18 pub fn deinit(self: *CacheEntry) void {
19 if (self.allocator) |alloc| alloc.free(self.path);
20 }
21};
22
23const SerializedEntry = extern struct {
24 unpacked_size: u64,
25 file_count: u32,
26 cached_at: i64,
27 path_len: u32,
28};
29
30fn check(rc: c_int) !void {
31 if (rc != 0) return error.MdbError;
32}
33
34pub const CacheDB = struct {
35 env: *c.MDB_env,
36 dbi_primary: c.MDB_dbi,
37 dbi_secondary: c.MDB_dbi,
38 dbi_metadata: c.MDB_dbi,
39 cache_dir: []const u8,
40 allocator: std.mem.Allocator,
41
42 const MAP_SIZE: usize = 8 * 1024 * 1024 * 1024;
43 const METADATA_TTL_SECS: i64 = 24 * 60 * 60;
44
45 pub fn open(cache_dir: []const u8) !*CacheDB {
46 const allocator = std.heap.c_allocator;
47
48 std.fs.cwd().makePath(cache_dir) catch |err| switch (err) {
49 error.PathAlreadyExists => {},
50 else => return error.CacheError,
51 };
52
53 const packages_path = try std.fmt.allocPrintSentinel(allocator, "{s}/cache", .{cache_dir}, 0);
54 defer allocator.free(packages_path);
55 std.fs.cwd().makePath(packages_path) catch |err| switch (err) {
56 error.PathAlreadyExists => {},
57 else => return error.CacheError,
58 };
59
60 var env: ?*c.MDB_env = null;
61 if (c.mdb_env_create(&env) != 0) {
62 return error.DatabaseOpen;
63 }
64 errdefer c.mdb_env_close(env);
65
66 try check(c.mdb_env_set_mapsize(env, MAP_SIZE));
67 try check(c.mdb_env_set_maxdbs(env, 3));
68
69 const db_path = try std.fmt.allocPrintSentinel(allocator, "{s}/index.lmdb", .{cache_dir}, 0);
70 defer allocator.free(db_path);
71
72 const flags: c_uint = c.MDB_NOSUBDIR | c.MDB_NOSYNC;
73 if (c.mdb_env_open(env, db_path.ptr, flags, 0o644) != 0) {
74 return error.DatabaseOpen;
75 }
76
77 const self = try allocator.create(CacheDB);
78 errdefer allocator.destroy(self);
79
80 self.* = .{
81 .env = env.?,
82 .dbi_primary = 0,
83 .dbi_secondary = 0,
84 .dbi_metadata = 0,
85 .cache_dir = try allocator.dupe(u8, cache_dir),
86 .allocator = allocator,
87 };
88
89 try self.openDatabases();
90 self.autoPruneMetadata();
91
92 return self;
93 }
94
95 fn autoPruneMetadata(self: *CacheDB) void {
96 var txn: ?*c.MDB_txn = null;
97 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) return;
98
99 self.pruneExpiredMetadata(txn.?);
100 _ = c.mdb_txn_commit(txn);
101 }
102
103 fn openDatabases(self: *CacheDB) !void {
104 var txn: ?*c.MDB_txn = null;
105
106 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
107 return error.DatabaseOpen;
108 } errdefer c.mdb_txn_abort(txn);
109
110 if (c.mdb_dbi_open(txn, "primary", c.MDB_CREATE, &self.dbi_primary) != 0) return error.DatabaseOpen;
111 if (c.mdb_dbi_open(txn, "secondary", c.MDB_CREATE, &self.dbi_secondary) != 0) return error.DatabaseOpen;
112 if (c.mdb_dbi_open(txn, "metadata", c.MDB_CREATE, &self.dbi_metadata) != 0) return error.DatabaseOpen;
113 if (c.mdb_txn_commit(txn) != 0) return error.DatabaseOpen;
114 }
115
116 pub fn close(self: *CacheDB) void {
117 c.mdb_dbi_close(self.env, self.dbi_primary);
118 c.mdb_dbi_close(self.env, self.dbi_secondary);
119 c.mdb_dbi_close(self.env, self.dbi_metadata);
120 c.mdb_env_close(self.env);
121
122 self.allocator.free(self.cache_dir);
123 self.allocator.destroy(self);
124 }
125
126 fn makeIntegrityKey(integrity: *const [64]u8) [66]u8 {
127 var key: [66]u8 = undefined;
128 key[0] = 'i'; key[1] = ':';
129 @memcpy(key[2..66], integrity);
130 return key;
131 }
132
133 fn makeNameKey(allocator: std.mem.Allocator, name: []const u8, version: []const u8) ![]u8 {
134 return std.fmt.allocPrint(allocator, "n:{s}@{s}", .{ name, version });
135 }
136
137 pub fn lookup(self: *CacheDB, integrity: *const [64]u8) ?CacheEntry {
138 var txn: ?*c.MDB_txn = null;
139 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
140 return null;
141 } defer c.mdb_txn_abort(txn);
142
143 const key_bytes = makeIntegrityKey(integrity);
144 var key = c.MDB_val{
145 .mv_size = key_bytes.len,
146 .mv_data = @constCast(&key_bytes),
147 };
148 var value: c.MDB_val = undefined;
149
150 if (c.mdb_get(txn, self.dbi_primary, &key, &value) != 0) return null;
151 return deserializeEntry(integrity, value, self.allocator);
152 }
153
154 pub fn hasIntegrity(self: *CacheDB, integrity: *const [64]u8) bool {
155 var txn: ?*c.MDB_txn = null;
156 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
157 return false;
158 } defer c.mdb_txn_abort(txn);
159
160 const key_bytes = makeIntegrityKey(integrity);
161 var key = c.MDB_val{
162 .mv_size = key_bytes.len,
163 .mv_data = @constCast(&key_bytes),
164 };
165 var value: c.MDB_val = undefined;
166
167 return c.mdb_get(txn, self.dbi_primary, &key, &value) == 0;
168 }
169
170 fn deserializeEntry(integrity: *const [64]u8, value: c.MDB_val, allocator: std.mem.Allocator) ?CacheEntry {
171 if (value.mv_size < @sizeOf(SerializedEntry)) return null;
172
173 const data: [*]const u8 = @ptrCast(value.mv_data);
174
175 var header: SerializedEntry = undefined;
176 @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]);
177
178 const path_start = @sizeOf(SerializedEntry);
179 if (value.mv_size < path_start + header.path_len) return null;
180
181 const path = allocator.dupe(u8, data[path_start..][0..header.path_len]) catch return null;
182
183 return CacheEntry{
184 .integrity = integrity.*,
185 .path = path,
186 .unpacked_size = header.unpacked_size,
187 .file_count = header.file_count,
188 .cached_at = header.cached_at,
189 .allocator = allocator,
190 };
191 }
192
193 pub fn lookupByName(self: *CacheDB, name: []const u8, version: []const u8) ?CacheEntry {
194 var txn: ?*c.MDB_txn = null;
195 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
196 return null;
197 } defer c.mdb_txn_abort(txn);
198
199 const name_key = makeNameKey(self.allocator, name, version) catch return null;
200 defer self.allocator.free(name_key);
201
202 var key = c.MDB_val{
203 .mv_size = name_key.len,
204 .mv_data = @constCast(name_key.ptr),
205 };
206 var value: c.MDB_val = undefined;
207
208 if (c.mdb_get(txn, self.dbi_secondary, &key, &value) != 0) return null;
209 if (value.mv_size != 64) return null;
210
211 const integrity: *const [64]u8 = @ptrCast(value.mv_data);
212 return self.lookup(integrity);
213 }
214
215 pub const BatchHit = struct {
216 index: u32,
217 file_count: u32,
218 };
219
220 pub fn batchLookup(
221 self: *CacheDB,
222 integrities: []const [64]u8,
223 allocator: std.mem.Allocator,
224 ) !struct {
225 items: []BatchHit,
226 allocator: std.mem.Allocator,
227 pub fn deinit(s: *@This()) void {
228 s.allocator.free(s.items);
229 }
230 } {
231 var hits = std.ArrayListUnmanaged(BatchHit){};
232 errdefer hits.deinit(allocator);
233
234 var txn: ?*c.MDB_txn = null;
235 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
236 return .{ .items = &.{}, .allocator = allocator };
237 } defer c.mdb_txn_abort(txn);
238
239 for (integrities, 0..) |integrity, i| {
240 const key_bytes = makeIntegrityKey(&integrity);
241 var key = c.MDB_val{
242 .mv_size = key_bytes.len,
243 .mv_data = @constCast(&key_bytes),
244 };
245 var value: c.MDB_val = undefined;
246 if (c.mdb_get(txn, self.dbi_primary, &key, &value) == 0) {
247 var file_count: u32 = 0;
248 if (value.mv_size >= @sizeOf(SerializedEntry)) {
249 const data: [*]const u8 = @ptrCast(value.mv_data);
250 var header: SerializedEntry = undefined;
251 @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]);
252 file_count = header.file_count;
253 }
254 try hits.append(allocator, .{ .index = @intCast(i), .file_count = file_count });
255 }
256 }
257
258 return .{ .items = hits.toOwnedSlice(allocator) catch &.{}, .allocator = allocator };
259 }
260
261 pub fn insert(self: *CacheDB, entry: *const CacheEntry, name: ?[]const u8, version: ?[]const u8) !void {
262 var txn: ?*c.MDB_txn = null;
263 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
264 return error.InsertError;
265 } errdefer c.mdb_txn_abort(txn);
266
267 try self.insertInTxn(txn.?, entry, name, version);
268 if (c.mdb_txn_commit(txn) != 0) return error.InsertError;
269 }
270
271 fn insertInTxn(self: *CacheDB, txn: *c.MDB_txn, entry: *const CacheEntry, name: ?[]const u8, version: ?[]const u8) !void {
272 const value_size = @sizeOf(SerializedEntry) + entry.path.len;
273 const value_buf = try self.allocator.alloc(u8, value_size);
274 defer self.allocator.free(value_buf);
275
276 const header: *SerializedEntry = @ptrCast(@alignCast(value_buf.ptr));
277 header.* = .{
278 .unpacked_size = entry.unpacked_size,
279 .file_count = entry.file_count,
280 .cached_at = entry.cached_at,
281 .path_len = @intCast(entry.path.len),
282 };
283
284 @memcpy(value_buf[@sizeOf(SerializedEntry)..], entry.path);
285 const key_bytes = makeIntegrityKey(&entry.integrity);
286
287 var key = c.MDB_val{
288 .mv_size = key_bytes.len,
289 .mv_data = @constCast(&key_bytes),
290 };
291
292 var value = c.MDB_val{
293 .mv_size = value_size,
294 .mv_data = value_buf.ptr,
295 };
296
297 if (c.mdb_put(txn, self.dbi_primary, &key, &value, 0) != 0) {
298 return error.InsertError;
299 }
300
301 if (name != null and version != null) {
302 const name_key = try makeNameKey(self.allocator, name.?, version.?);
303 defer self.allocator.free(name_key);
304
305 var sec_key = c.MDB_val{
306 .mv_size = name_key.len,
307 .mv_data = @constCast(name_key.ptr),
308 };
309 var sec_value = c.MDB_val{
310 .mv_size = 64,
311 .mv_data = @constCast(&entry.integrity),
312 };
313
314 if (c.mdb_put(txn, self.dbi_secondary, &sec_key, &sec_value, 0) != 0) {
315 return error.InsertError;
316 }
317 }
318 }
319
320 pub fn batchInsert(self: *CacheDB, entries: []const CacheEntry) !void {
321 var txn: ?*c.MDB_txn = null;
322 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
323 return error.InsertError;
324 } errdefer c.mdb_txn_abort(txn);
325
326 for (entries) |*entry| {
327 try self.insertInTxn(txn.?, entry, null, null);
328 } if (c.mdb_txn_commit(txn) != 0) return error.InsertError;
329 }
330
331 pub const NamedCacheEntry = struct {
332 entry: CacheEntry,
333 name: []const u8,
334 version: []const u8,
335 };
336
337 pub fn batchInsertNamed(self: *CacheDB, entries: []const NamedCacheEntry) !void {
338 if (entries.len == 0) return;
339
340 var txn: ?*c.MDB_txn = null;
341 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
342 return error.InsertError;
343 } errdefer c.mdb_txn_abort(txn);
344
345 for (entries) |item| {
346 self.insertInTxn(txn.?, &item.entry, item.name, item.version) catch continue;
347 } if (c.mdb_txn_commit(txn) != 0) return error.InsertError;
348 }
349
350 pub fn delete(self: *CacheDB, integrity: *const [64]u8) !void {
351 var txn: ?*c.MDB_txn = null;
352 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
353 return error.DeleteError;
354 } errdefer c.mdb_txn_abort(txn);
355
356 const key_bytes = makeIntegrityKey(integrity);
357 var key = c.MDB_val{
358 .mv_size = key_bytes.len,
359 .mv_data = @constCast(&key_bytes),
360 };
361
362 _ = c.mdb_del(txn, self.dbi_primary, &key, null);
363 if (c.mdb_txn_commit(txn) != 0) return error.DeleteError;
364 }
365
366 pub fn getPackagePath(self: *CacheDB, integrity: *const [64]u8, allocator: std.mem.Allocator) ![]u8 {
367 const hex = std.fmt.bytesToHex(integrity.*, .lower);
368 return std.fmt.allocPrint(allocator, "{s}/cache/{s}", .{ self.cache_dir, hex });
369 }
370
371 pub fn sync(self: *CacheDB) void {
372 _ = c.mdb_env_sync(self.env, 1);
373 }
374
375 pub fn stats(self: *CacheDB) !struct { entries: usize, db_size: usize, cache_size: usize } {
376 var txn: ?*c.MDB_txn = null;
377 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
378 return error.DatabaseError;
379 } defer c.mdb_txn_abort(txn);
380
381 var db_stat: c.MDB_stat = undefined;
382 _ = c.mdb_stat(txn, self.dbi_primary, &db_stat);
383
384 var env_info: c.MDB_envinfo = undefined;
385 _ = c.mdb_env_info(self.env, &env_info);
386
387 const db_size = self.getDbFileSize();
388 const cache_size = self.calculateCacheSize();
389
390 return .{
391 .entries = db_stat.ms_entries,
392 .db_size = db_size,
393 .cache_size = cache_size,
394 };
395 }
396
397 const BLOCK_SIZE: usize = 4096;
398
399 inline fn alignToBlock(size: u64) usize {
400 const s: usize = @intCast(size);
401 return ((s + BLOCK_SIZE - 1) / BLOCK_SIZE) * BLOCK_SIZE;
402 }
403
404 fn getDbFileSize(self: *CacheDB) usize {
405 const db_path = std.fmt.allocPrint(self.allocator, "{s}/index.lmdb", .{self.cache_dir}) catch return 0;
406 defer self.allocator.free(db_path);
407
408 const stat = std.fs.cwd().statFile(db_path) catch return 0;
409 return alignToBlock(stat.size);
410 }
411
412 fn calculateCacheSize(self: *CacheDB) usize {
413 const cache_path = std.fmt.allocPrint(self.allocator, "{s}/cache", .{self.cache_dir}) catch return 0;
414 defer self.allocator.free(cache_path);
415
416 var total: usize = 0;
417 var dir = std.fs.cwd().openDir(cache_path, .{ .iterate = true }) catch return 0;
418 defer dir.close();
419
420 var iter = dir.iterate();
421 while (iter.next() catch null) |entry| {
422 if (entry.kind == .directory) {
423 total += self.getDirSize(dir, entry.name);
424 } else if (entry.kind == .file) {
425 const stat = dir.statFile(entry.name) catch continue;
426 total += alignToBlock(stat.size);
427 }
428 }
429 return total;
430 }
431
432 fn getDirSize(self: *CacheDB, parent: std.fs.Dir, name: []const u8) usize {
433 var subdir = parent.openDir(name, .{ .iterate = true }) catch return 0;
434 defer subdir.close();
435
436 var total: usize = 0;
437 var iter = subdir.iterate();
438 while (iter.next() catch null) |entry| {
439 if (entry.kind == .directory) {
440 total += self.getDirSize(subdir, entry.name);
441 } else if (entry.kind == .file) {
442 const stat = subdir.statFile(entry.name) catch continue;
443 total += alignToBlock(stat.size);
444 }
445 }
446 return total;
447 }
448
449 fn makeMetadataKey(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
450 return std.fmt.allocPrint(allocator, "m:{s}", .{name});
451 }
452
453 pub fn lookupMetadata(self: *CacheDB, name: []const u8, allocator: std.mem.Allocator) ?[]u8 {
454 var txn: ?*c.MDB_txn = null;
455 if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) {
456 return null;
457 } defer c.mdb_txn_abort(txn);
458
459 const meta_key = makeMetadataKey(self.allocator, name) catch return null;
460 defer self.allocator.free(meta_key);
461
462 var key = c.MDB_val{
463 .mv_size = meta_key.len,
464 .mv_data = @constCast(meta_key.ptr),
465 };
466 var value: c.MDB_val = undefined;
467
468 if (c.mdb_get(txn, self.dbi_metadata, &key, &value) != 0) return null;
469 if (value.mv_size < @sizeOf(i64)) return null;
470
471 const data: [*]const u8 = @ptrCast(value.mv_data);
472 var cached_at: i64 = undefined;
473 @memcpy(std.mem.asBytes(&cached_at), data[0..@sizeOf(i64)]);
474
475 const now = std.time.timestamp();
476 if (now - cached_at > METADATA_TTL_SECS) return null;
477
478 const json_data = data[@sizeOf(i64)..value.mv_size];
479 return allocator.dupe(u8, json_data) catch null;
480 }
481
482 pub fn insertMetadata(self: *CacheDB, name: []const u8, json_data: []const u8) !void {
483 var stripped_len: usize = 0;
484 const stripped_ptr = strip_npm_metadata(json_data.ptr, json_data.len, &stripped_len);
485 defer if (stripped_ptr) |p| strip_metadata_free(p);
486
487 const data_to_store = if (stripped_ptr) |p| p[0..stripped_len] else json_data;
488
489 var txn: ?*c.MDB_txn = null;
490 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) {
491 return error.InsertError;
492 }
493 errdefer c.mdb_txn_abort(txn);
494
495 const meta_key = try makeMetadataKey(self.allocator, name);
496 defer self.allocator.free(meta_key);
497
498 const value_size = @sizeOf(i64) + data_to_store.len;
499 const value_buf = try self.allocator.alloc(u8, value_size);
500 defer self.allocator.free(value_buf);
501
502 const now: i64 = std.time.timestamp();
503 @memcpy(value_buf[0..@sizeOf(i64)], std.mem.asBytes(&now));
504 @memcpy(value_buf[@sizeOf(i64)..], data_to_store);
505
506 var key = c.MDB_val{
507 .mv_size = meta_key.len,
508 .mv_data = @constCast(meta_key.ptr),
509 };
510
511 var value = c.MDB_val{
512 .mv_size = value_size,
513 .mv_data = value_buf.ptr,
514 };
515
516 if (c.mdb_put(txn, self.dbi_metadata, &key, &value, 0) != 0) return error.InsertError;
517 if (c.mdb_txn_commit(txn) != 0) return error.InsertError;
518 }
519
520 const PruneCollections = struct {
521 keys: std.ArrayListUnmanaged([66]u8) = .{},
522 paths: std.ArrayListUnmanaged([]const u8) = .{},
523
524 fn deinit(self: *PruneCollections, allocator: std.mem.Allocator) void {
525 for (self.paths.items) |p| allocator.free(p);
526 self.paths.deinit(allocator);
527 self.keys.deinit(allocator);
528 }
529 };
530
531 inline fn collectExpiredEntries(
532 self: *CacheDB,
533 txn: *c.MDB_txn,
534 cutoff: i64,
535 collections: *PruneCollections,
536 ) !void {
537 var cursor: ?*c.MDB_cursor = null;
538 if (c.mdb_cursor_open(txn, self.dbi_primary, &cursor) != 0) return error.DatabaseError;
539 defer c.mdb_cursor_close(cursor);
540
541 var key: c.MDB_val = undefined;
542 var value: c.MDB_val = undefined;
543 var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST);
544
545 while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) {
546 if (value.mv_size < @sizeOf(SerializedEntry)) continue;
547
548 const data: [*]const u8 = @ptrCast(value.mv_data);
549 var header: SerializedEntry = undefined;
550 @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]);
551
552 if (header.cached_at >= cutoff) continue;
553 if (key.mv_size != 66) continue;
554
555 const key_data: [*]const u8 = @ptrCast(key.mv_data);
556 var key_copy: [66]u8 = undefined;
557 @memcpy(&key_copy, key_data[0..66]);
558 collections.keys.append(self.allocator, key_copy) catch continue;
559
560 const path_start = @sizeOf(SerializedEntry);
561 if (value.mv_size >= path_start + header.path_len) {
562 const path = self.allocator.dupe(u8, data[path_start..][0..header.path_len]) catch continue;
563 collections.paths.append(self.allocator, path) catch self.allocator.free(path);
564 }
565 }
566 }
567
568 inline fn deletePrimaryEntries(self: *CacheDB, txn: *c.MDB_txn, keys: []const [66]u8) u32 {
569 var pruned: u32 = 0;
570 for (keys) |*key_bytes| {
571 var del_key = c.MDB_val{ .mv_size = 66, .mv_data = @constCast(key_bytes) };
572 if (c.mdb_del(txn, self.dbi_primary, &del_key, null) == 0) pruned += 1;
573 }
574 return pruned;
575 }
576
577 inline fn pruneStaleSecondaryEntries(self: *CacheDB, txn: *c.MDB_txn) void {
578 var cursor: ?*c.MDB_cursor = null;
579 if (c.mdb_cursor_open(txn, self.dbi_secondary, &cursor) != 0) return;
580 defer c.mdb_cursor_close(cursor);
581
582 var to_delete = std.ArrayListUnmanaged([]u8){};
583 defer {
584 for (to_delete.items) |k| self.allocator.free(k);
585 to_delete.deinit(self.allocator);
586 }
587
588 var key: c.MDB_val = undefined;
589 var value: c.MDB_val = undefined;
590 var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST);
591
592 while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) {
593 if (value.mv_size != 64) continue;
594
595 const integrity: *const [64]u8 = @ptrCast(value.mv_data);
596 const int_key = makeIntegrityKey(integrity);
597 var check_key = c.MDB_val{ .mv_size = int_key.len, .mv_data = @constCast(&int_key) };
598 var check_val: c.MDB_val = undefined;
599
600 if (c.mdb_get(txn, self.dbi_primary, &check_key, &check_val) == 0) continue;
601
602 const key_data: [*]const u8 = @ptrCast(key.mv_data);
603 const key_copy = self.allocator.dupe(u8, key_data[0..key.mv_size]) catch continue;
604 to_delete.append(self.allocator, key_copy) catch self.allocator.free(key_copy);
605 }
606
607 for (to_delete.items) |sec_key| {
608 var del_key = c.MDB_val{ .mv_size = sec_key.len, .mv_data = @ptrCast(sec_key.ptr) };
609 _ = c.mdb_del(txn, self.dbi_secondary, &del_key, null);
610 }
611 }
612
613 inline fn deletePackageFiles(paths: []const []const u8) void {
614 for (paths) |path| std.fs.cwd().deleteTree(path) catch {};
615 }
616
617 inline fn pruneExpiredMetadata(self: *CacheDB, txn: *c.MDB_txn) void {
618 const now = std.time.timestamp();
619
620 var cursor: ?*c.MDB_cursor = null;
621 if (c.mdb_cursor_open(txn, self.dbi_metadata, &cursor) != 0) return;
622 defer c.mdb_cursor_close(cursor);
623
624 var to_delete = std.ArrayListUnmanaged([]u8){};
625 defer {
626 for (to_delete.items) |k| self.allocator.free(k);
627 to_delete.deinit(self.allocator);
628 }
629
630 var key: c.MDB_val = undefined;
631 var value: c.MDB_val = undefined;
632 var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST);
633
634 while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) {
635 if (value.mv_size < @sizeOf(i64)) continue;
636
637 const data: [*]const u8 = @ptrCast(value.mv_data);
638 var cached_at: i64 = undefined;
639 @memcpy(std.mem.asBytes(&cached_at), data[0..@sizeOf(i64)]);
640
641 if (now - cached_at <= METADATA_TTL_SECS) continue;
642
643 const key_data: [*]const u8 = @ptrCast(key.mv_data);
644 const key_copy = self.allocator.dupe(u8, key_data[0..key.mv_size]) catch continue;
645 to_delete.append(self.allocator, key_copy) catch self.allocator.free(key_copy);
646 }
647
648 for (to_delete.items) |meta_key| {
649 var del_key = c.MDB_val{ .mv_size = meta_key.len, .mv_data = @ptrCast(meta_key.ptr) };
650 _ = c.mdb_del(txn, self.dbi_metadata, &del_key, null);
651 }
652 }
653
654 pub fn prune(self: *CacheDB, max_age_days: u32) !u32 {
655 const now = std.time.timestamp();
656 const max_age_secs: i64 = @as(i64, max_age_days) * 24 * 60 * 60;
657 const cutoff = now - max_age_secs;
658
659 var txn: ?*c.MDB_txn = null;
660 if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) return error.DatabaseError;
661 errdefer c.mdb_txn_abort(txn);
662
663 var collections = PruneCollections{};
664 defer collections.deinit(self.allocator);
665
666 try self.collectExpiredEntries(txn.?, cutoff, &collections);
667 const pruned = self.deletePrimaryEntries(txn.?, collections.keys.items);
668
669 self.pruneStaleSecondaryEntries(txn.?);
670 self.pruneExpiredMetadata(txn.?);
671
672 if (c.mdb_txn_commit(txn) != 0) return error.DatabaseError;
673 deletePackageFiles(collections.paths.items);
674
675 return pruned;
676 }
677};