MIRROR: javascript for 馃悳's, a tiny runtime with big ambitions
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 402 lines 14 kB view raw
1const std = @import("std"); 2const builtin = @import("builtin"); 3 4pub const MAGIC: u32 = 0x504B474C; 5pub const VERSION: u32 = 1; 6 7pub const StringRef = extern struct { 8 offset: u32, 9 len: u32, 10 11 pub fn slice(self: StringRef, string_table: []const u8) []const u8 { 12 const offset: usize = self.offset; 13 const len: usize = self.len; 14 if (offset >= string_table.len) return ""; 15 const end = @min(offset + len, string_table.len); 16 return string_table[offset..end]; 17 } 18 19 pub const empty: StringRef = .{ .offset = 0, .len = 0 }; 20}; 21 22pub const Header = extern struct { 23 magic: u32 = MAGIC, 24 version: u32 = VERSION, 25 package_count: u32 = 0, 26 dependency_count: u32 = 0, 27 string_table_offset: u32 = 0, 28 string_table_size: u32 = 0, 29 package_array_offset: u32 = 0, 30 dependency_array_offset: u32 = 0, 31 hash_table_offset: u32 = 0, 32 hash_table_size: u32 = 0, 33 _reserved: [24]u8 = [_]u8{0} ** 24, 34 35 pub fn validate(self: *const Header) bool { 36 return self.magic == MAGIC and self.version == VERSION; 37 } 38}; 39 40pub const PackageFlags = packed struct(u8) { 41 dev: bool = false, 42 optional: bool = false, 43 peer: bool = false, 44 bundled: bool = false, 45 has_bin: bool = false, 46 has_scripts: bool = false, 47 direct: bool = false, 48 _reserved: u1 = 0, 49}; 50 51pub const Package = extern struct { 52 name: StringRef, 53 version_major: u64, 54 version_minor: u64, 55 version_patch: u64, 56 prerelease: StringRef, 57 integrity: [64]u8, 58 tarball_url: StringRef, 59 parent_path: StringRef, 60 deps_start: u32, 61 deps_count: u32, 62 flags: PackageFlags, 63 _padding: [3]u8 = .{ 0, 0, 0 }, 64 65 comptime { 66 std.debug.assert(@sizeOf(Package) == 136); 67 } 68 69 pub fn versionString(self: *const Package, allocator: std.mem.Allocator, string_table: []const u8) ![]u8 { 70 const prerelease_str = self.prerelease.slice(string_table); 71 if (prerelease_str.len > 0) { 72 return std.fmt.allocPrint(allocator, "{d}.{d}.{d}-{s}", .{ 73 self.version_major, 74 self.version_minor, 75 self.version_patch, 76 prerelease_str, 77 }); 78 } 79 return std.fmt.allocPrint(allocator, "{d}.{d}.{d}", .{ 80 self.version_major, 81 self.version_minor, 82 self.version_patch, 83 }); 84 } 85 86 pub fn integrityHex(self: *const Package) [128]u8 { 87 return std.fmt.bytesToHex(self.integrity, .lower); 88 } 89 90 pub fn installPath(self: *const Package, allocator: std.mem.Allocator, string_table: []const u8) ![]u8 { 91 const parent = self.parent_path.slice(string_table); 92 const name = self.name.slice(string_table); 93 if (parent.len > 0) return std.fmt.allocPrint(allocator, "{s}/node_modules/{s}", .{ parent, name }); 94 return allocator.dupe(u8, name); 95 } 96 97 pub fn isNested(self: *const Package) bool { 98 return self.parent_path.len > 0; 99 } 100}; 101 102pub const DependencyFlags = packed struct(u8) { 103 peer: bool = false, 104 dev: bool = false, 105 optional: bool = false, 106 _reserved: u5 = 0, 107}; 108 109pub const Dependency = extern struct { 110 package_index: u32, 111 constraint: StringRef, 112 flags: DependencyFlags = .{}, 113 _padding: [3]u8 = .{ 0, 0, 0 }, 114}; 115 116pub const HashBucket = extern struct { 117 name_hash: u32, 118 package_index: u32, 119 pub const empty: HashBucket = .{ 120 .name_hash = 0, 121 .package_index = std.math.maxInt(u32) 122 }; 123}; 124 125pub fn djb2Hash(str: []const u8) u32 { 126 var hash: u32 = 5381; 127 for (str) |c| hash = ((hash << 5) +% hash) +% c; 128 return hash; 129} 130 131pub const Lockfile = struct { 132 data: 133 if (builtin.os.tag == .windows) []align(@alignOf(Header)) u8 134 else []align(std.heap.page_size_min) const u8, 135 header: *const Header, 136 string_table: []const u8, 137 packages: []const Package, 138 dependencies: []const Dependency, 139 hash_table: []const HashBucket, 140 141 pub fn open(path: []const u8) !Lockfile { 142 const file = try std.fs.cwd().openFile(path, .{}); 143 defer file.close(); 144 145 const stat = try file.stat(); 146 if (stat.size < @sizeOf(Header)) { 147 return error.InvalidLockfile; 148 } 149 150 if (comptime builtin.os.tag == .windows) { 151 const data = try std.heap.c_allocator.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(@alignOf(Header)), stat.size); 152 errdefer std.heap.c_allocator.free(data); 153 154 const bytes_read = try file.readAll(data); 155 if (bytes_read != stat.size) { 156 std.heap.c_allocator.free(data); 157 return error.InvalidLockfile; 158 } 159 160 return initFromDataWindows(data); 161 } else { 162 const data = try std.posix.mmap( 163 null, stat.size, 164 std.posix.PROT.READ, 165 .{ .TYPE = .PRIVATE }, 166 file.handle, 0, 167 ); 168 169 return initFromData(data); 170 } 171 } 172 173 fn initFromDataWindows(data: []align(@alignOf(Header)) u8) !Lockfile { 174 if (data.len < @sizeOf(Header)) return error.InvalidLockfile; 175 176 const header: *const Header = @ptrCast(@alignCast(data.ptr)); 177 if (!header.validate()) return error.InvalidLockfile; 178 179 if (header.string_table_offset + header.string_table_size > data.len or 180 header.package_array_offset + header.package_count * @sizeOf(Package) > data.len or 181 header.dependency_array_offset + header.dependency_count * @sizeOf(Dependency) > data.len or 182 header.hash_table_offset + header.hash_table_size * @sizeOf(HashBucket) > data.len) 183 { return error.InvalidLockfile; } 184 185 return .{ 186 .data = data, 187 .header = header, 188 .string_table = data[header.string_table_offset..][0..header.string_table_size], 189 .packages = @as([*]const Package, @ptrCast(@alignCast(data.ptr + header.package_array_offset)))[0..header.package_count], 190 .dependencies = @as([*]const Dependency, @ptrCast(@alignCast(data.ptr + header.dependency_array_offset)))[0..header.dependency_count], 191 .hash_table = @as([*]const HashBucket, @ptrCast(@alignCast(data.ptr + header.hash_table_offset)))[0..header.hash_table_size], 192 }; 193 } 194 195 pub fn initFromData(data: []align(std.heap.page_size_min) const u8) !Lockfile { 196 if (data.len < @sizeOf(Header)) { 197 return error.InvalidLockfile; 198 } 199 200 const header: *const Header = @ptrCast(@alignCast(data.ptr)); 201 if (!header.validate()) { 202 return error.InvalidLockfile; 203 } 204 205 if (header.string_table_offset + header.string_table_size > data.len or 206 header.package_array_offset + header.package_count * @sizeOf(Package) > data.len or 207 header.dependency_array_offset + header.dependency_count * @sizeOf(Dependency) > data.len or 208 header.hash_table_offset + header.hash_table_size * @sizeOf(HashBucket) > data.len) 209 { return error.InvalidLockfile; } 210 211 const string_table = data[header.string_table_offset..][0..header.string_table_size]; 212 213 const packages_ptr: [*]const Package = @ptrCast(@alignCast(data.ptr + header.package_array_offset)); 214 const packages = packages_ptr[0..header.package_count]; 215 216 const deps_ptr: [*]const Dependency = @ptrCast(@alignCast(data.ptr + header.dependency_array_offset)); 217 const dependencies = deps_ptr[0..header.dependency_count]; 218 219 const hash_ptr: [*]const HashBucket = @ptrCast(@alignCast(data.ptr + header.hash_table_offset)); 220 const hash_table = hash_ptr[0..header.hash_table_size]; 221 222 return .{ 223 .data = data, 224 .header = header, 225 .string_table = string_table, 226 .packages = packages, 227 .dependencies = dependencies, 228 .hash_table = hash_table, 229 }; 230 } 231 232 pub fn close(self: *Lockfile) void { 233 if (comptime builtin.os.tag == .windows) { 234 std.heap.c_allocator.free(self.data); 235 } else std.posix.munmap(self.data); 236 self.* = undefined; 237 } 238 239 pub fn lookupPackage(self: *const Lockfile, name: []const u8) ?*const Package { 240 if (self.hash_table.len == 0) return null; 241 242 const hash = djb2Hash(name); 243 var index = hash % @as(u32, @intCast(self.hash_table.len)); 244 var probes: u32 = 0; 245 246 while (probes < self.hash_table.len) : (probes += 1) { 247 const bucket = &self.hash_table[index]; 248 if (bucket.package_index == std.math.maxInt(u32)) return null; 249 if (bucket.name_hash == hash) { 250 const pkg = &self.packages[bucket.package_index]; 251 const pkg_name = pkg.name.slice(self.string_table); 252 if (std.mem.eql(u8, pkg_name, name)) return pkg; 253 } 254 index = (index + 1) % @as(u32, @intCast(self.hash_table.len)); 255 } 256 return null; 257 } 258 259 pub fn getPackageDeps(self: *const Lockfile, pkg: *const Package) []const Dependency { 260 if (pkg.deps_count == 0) return &[_]Dependency{}; 261 if (pkg.deps_start >= self.dependencies.len or 262 pkg.deps_start + pkg.deps_count > self.dependencies.len) return &[_]Dependency{}; 263 return self.dependencies[pkg.deps_start..][0..pkg.deps_count]; 264 } 265}; 266 267pub const LockfileWriter = struct { 268 allocator: std.mem.Allocator, 269 packages: std.ArrayListUnmanaged(Package), 270 dependencies: std.ArrayListUnmanaged(Dependency), 271 string_builder: std.ArrayListUnmanaged(u8), 272 string_offsets: std.StringHashMap(StringRef), 273 274 pub fn init(allocator: std.mem.Allocator) LockfileWriter { 275 return .{ 276 .allocator = allocator, 277 .packages = .{}, 278 .dependencies = .{}, 279 .string_builder = .{}, 280 .string_offsets = std.StringHashMap(StringRef).init(allocator), 281 }; 282 } 283 284 pub fn deinit(self: *LockfileWriter) void { 285 self.packages.deinit(self.allocator); 286 self.dependencies.deinit(self.allocator); 287 self.string_builder.deinit(self.allocator); 288 var key_iter = self.string_offsets.keyIterator(); 289 while (key_iter.next()) |key| { 290 self.allocator.free(key.*); 291 } 292 self.string_offsets.deinit(); 293 } 294 295 pub fn internString(self: *LockfileWriter, str: []const u8) !StringRef { 296 if (str.len == 0) return StringRef.empty; 297 if (self.string_offsets.get(str)) |ref| return ref; 298 299 if (self.string_builder.items.len > std.math.maxInt(u32)) return error.StringTableTooLarge; 300 if (str.len > std.math.maxInt(u32)) return error.StringTooLarge; 301 302 const offset: u32 = @intCast(self.string_builder.items.len); 303 try self.string_builder.appendSlice(self.allocator, str); 304 305 const ref = StringRef{ .offset = offset, .len = @intCast(str.len) }; 306 const stored_str = try self.allocator.dupe(u8, str); 307 308 errdefer self.allocator.free(stored_str); 309 try self.string_offsets.put(stored_str, ref); 310 311 return ref; 312 } 313 314 pub fn addPackage(self: *LockfileWriter, pkg: Package) !u32 { 315 const index: u32 = @intCast(self.packages.items.len); 316 try self.packages.append(self.allocator, pkg); 317 return index; 318 } 319 320 pub fn addDependency(self: *LockfileWriter, dep: Dependency) !void { 321 try self.dependencies.append(self.allocator, dep); 322 } 323 324 fn alignOffset(offset: u32, alignment: u32) u32 { 325 const rem = offset % alignment; 326 return if (rem == 0) offset else offset + (alignment - rem); 327 } 328 329 pub fn write(self: *LockfileWriter, path: []const u8) !void { 330 const file = try std.fs.cwd().createFile(path, .{}); 331 defer file.close(); 332 333 const header_size: u32 = @sizeOf(Header); 334 const string_table_offset = header_size; 335 const string_table_size: u32 = @intCast(self.string_builder.items.len); 336 337 const package_array_offset = alignOffset(string_table_offset + string_table_size, @alignOf(Package)); 338 const package_pad_size = package_array_offset - (string_table_offset + string_table_size); 339 const package_array_size: u32 = @intCast(self.packages.items.len * @sizeOf(Package)); 340 341 const dependency_array_offset = alignOffset(package_array_offset + package_array_size, @alignOf(Dependency)); 342 const dep_pad_size = dependency_array_offset - (package_array_offset + package_array_size); 343 const dependency_array_size: u32 = @intCast(self.dependencies.items.len * @sizeOf(Dependency)); 344 345 const hash_table_size: u32 = @intCast(@max(1, self.packages.items.len * 10 / 7)); 346 var hash_table = try self.allocator.alloc(HashBucket, hash_table_size); 347 defer self.allocator.free(hash_table); 348 349 for (hash_table) |*bucket| { 350 bucket.* = HashBucket.empty; 351 } 352 353 for (self.packages.items, 0..) |pkg, i| { 354 const name = pkg.name.slice(self.string_builder.items); 355 const hash = djb2Hash(name); 356 var index = hash % hash_table_size; 357 var probes: u32 = 0; 358 359 while (hash_table[index].package_index != std.math.maxInt(u32) and probes < hash_table_size) : (probes += 1) { 360 index = (index + 1) % hash_table_size; 361 } 362 if (probes >= hash_table_size) return error.HashTableFull; 363 364 hash_table[index] = .{ 365 .name_hash = hash, 366 .package_index = @intCast(i), 367 }; 368 } 369 370 const hash_table_offset = alignOffset(dependency_array_offset + dependency_array_size, @alignOf(HashBucket)); 371 const hash_pad_size = hash_table_offset - (dependency_array_offset + dependency_array_size); 372 373 const header = Header{ 374 .package_count = @intCast(self.packages.items.len), 375 .dependency_count = @intCast(self.dependencies.items.len), 376 .string_table_offset = string_table_offset, 377 .string_table_size = string_table_size, 378 .package_array_offset = package_array_offset, 379 .dependency_array_offset = dependency_array_offset, 380 .hash_table_offset = hash_table_offset, 381 .hash_table_size = hash_table_size, 382 }; 383 384 try file.writeAll(std.mem.asBytes(&header)); 385 try file.writeAll(self.string_builder.items); 386 387 if (package_pad_size > 0) { 388 const padding = [_]u8{0} ** 8; 389 try file.writeAll(padding[0..package_pad_size]); 390 } try file.writeAll(std.mem.sliceAsBytes(self.packages.items)); 391 392 if (dep_pad_size > 0) { 393 const padding = [_]u8{0} ** 8; 394 try file.writeAll(padding[0..dep_pad_size]); 395 } try file.writeAll(std.mem.sliceAsBytes(self.dependencies.items)); 396 397 if (hash_pad_size > 0) { 398 const padding = [_]u8{0} ** 8; 399 try file.writeAll(padding[0..hash_pad_size]); 400 } try file.writeAll(std.mem.sliceAsBytes(hash_table)); 401 } 402};