MIRROR: javascript for ๐Ÿœ's, a tiny runtime with big ambitions
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat: add package manager (#9)

+12519 -911
+156
.github/actions/build-native-deps/action.yml
··· 1 + name: Build Native Dependencies 2 + description: Build llhttp, libuv, libsodium, mbedtls, zlib as needed 3 + 4 + inputs: 5 + deps: 6 + description: Space-separated list of deps to build (llhttp libuv libsodium mbedtls zlib) 7 + required: true 8 + prefix: 9 + description: Install prefix 10 + required: true 11 + cc: 12 + description: C compiler 13 + required: true 14 + cxx: 15 + description: C++ compiler 16 + required: false 17 + default: '' 18 + ar: 19 + description: AR tool 20 + required: false 21 + default: ar 22 + ranlib: 23 + description: RANLIB tool 24 + required: false 25 + default: ranlib 26 + cmake_generator: 27 + description: CMake generator (Ninja, MinGW Makefiles, etc) 28 + required: false 29 + default: '' 30 + llhttp_version: 31 + required: true 32 + libuv_version: 33 + required: true 34 + libsodium_version: 35 + required: true 36 + mbedtls_version: 37 + required: true 38 + zlib_version: 39 + required: true 40 + 41 + runs: 42 + using: composite 43 + steps: 44 + - name: Compute deps hash 45 + id: deps-hash 46 + shell: bash 47 + run: echo "hash=$(echo '${{ inputs.deps }}' | tr ' ' '-')" >> $GITHUB_OUTPUT 48 + 49 + - name: Cache native deps 50 + id: cache 51 + uses: actions/cache@v4 52 + with: 53 + path: ${{ inputs.prefix }} 54 + key: deps-${{ runner.os }}-${{ runner.arch }}-${{ steps.deps-hash.outputs.hash }}-${{ hashFiles('.github/versions.json') }}-v1 55 + 56 + - name: Prepare build environment 57 + if: steps.cache.outputs.cache-hit != 'true' 58 + shell: bash 59 + run: | 60 + mkdir -p ${{ inputs.prefix }} 61 + 62 + - name: Build llhttp 63 + if: steps.cache.outputs.cache-hit != 'true' && contains(inputs.deps, 'llhttp') 64 + shell: bash 65 + run: | 66 + if [[ "$RUNNER_OS" == "Windows" ]]; then 67 + export PATH="$(cygpath "$RUNNER_TEMP/msys64/mingw64/bin"):$PATH" 68 + PREFIX=$(cygpath -m "${{ inputs.prefix }}") 69 + else 70 + PREFIX="${{ inputs.prefix }}" 71 + fi 72 + 73 + git clone --depth 1 --branch release/v${{ inputs.llhttp_version }} https://github.com/nodejs/llhttp.git /tmp/llhttp 74 + cd /tmp/llhttp 75 + CMAKE_GENERATOR="${{ inputs.cmake_generator }}" 76 + if [[ "$RUNNER_OS" != "macOS" ]]; then 77 + export AR="${{ inputs.ar }}" 78 + export RANLIB="${{ inputs.ranlib }}" 79 + fi 80 + CC=${{ inputs.cc }} CXX=${{ inputs.cxx || inputs.cc }} cmake -B build \ 81 + ${CMAKE_GENERATOR:+-G "$CMAKE_GENERATOR"} \ 82 + -DCMAKE_INSTALL_PREFIX="$PREFIX" \ 83 + -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON \ 84 + -DCMAKE_BUILD_TYPE=MinSizeRel 85 + cmake --build build 86 + cmake --install build 87 + echo "=== PKG_CONFIG files in $PREFIX/lib/pkgconfig ===" 88 + ls -la "$PREFIX/lib/pkgconfig/" || echo "pkgconfig dir not found" 89 + cp "$PREFIX/lib/pkgconfig/libllhttp.pc" "$PREFIX/lib/pkgconfig/llhttp.pc" 90 + echo "=== llhttp.pc contents ===" 91 + cat "$PREFIX/lib/pkgconfig/llhttp.pc" 92 + echo "=== After copy ===" 93 + ls -la "$PREFIX/lib/pkgconfig/" 94 + 95 + - name: Build libuv 96 + if: steps.cache.outputs.cache-hit != 'true' && contains(inputs.deps, 'libuv') 97 + shell: bash 98 + run: | 99 + git clone --depth 1 --branch v${{ inputs.libuv_version }} https://github.com/libuv/libuv.git /tmp/libuv 100 + cd /tmp/libuv 101 + CMAKE_GENERATOR="${{ inputs.cmake_generator }}" 102 + if [[ "$RUNNER_OS" != "macOS" ]]; then 103 + export AR="${{ inputs.ar }}" 104 + export RANLIB="${{ inputs.ranlib }}" 105 + fi 106 + CC=${{ inputs.cc }} cmake -B build \ 107 + ${CMAKE_GENERATOR:+-G "$CMAKE_GENERATOR"} \ 108 + -DCMAKE_INSTALL_PREFIX=${{ inputs.prefix }} \ 109 + -DBUILD_TESTING=OFF -DLIBUV_BUILD_SHARED=OFF \ 110 + -DCMAKE_BUILD_TYPE=MinSizeRel 111 + cmake --build build 112 + cmake --install build 113 + 114 + - name: Build libsodium 115 + if: steps.cache.outputs.cache-hit != 'true' && contains(inputs.deps, 'libsodium') 116 + shell: bash 117 + run: | 118 + git clone --depth 1 --branch ${{ inputs.libsodium_version }}-RELEASE https://github.com/jedisct1/libsodium.git /tmp/libsodium 119 + cd /tmp/libsodium 120 + ./configure CC=${{ inputs.cc }} --prefix=${{ inputs.prefix }} --disable-shared --enable-static 121 + make -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) 122 + make install 123 + 124 + - name: Build mbedtls 125 + if: steps.cache.outputs.cache-hit != 'true' && contains(inputs.deps, 'mbedtls') 126 + shell: bash 127 + run: | 128 + git clone --depth 1 --branch mbedtls-${{ inputs.mbedtls_version }} --recurse-submodules https://github.com/Mbed-TLS/mbedtls.git /tmp/mbedtls 129 + cd /tmp/mbedtls 130 + CC=${{ inputs.cc }} cmake -B build \ 131 + -DCMAKE_INSTALL_PREFIX=${{ inputs.prefix }} \ 132 + -DENABLE_PROGRAMS=OFF -DENABLE_TESTING=OFF \ 133 + -DCMAKE_BUILD_TYPE=Release \ 134 + -DUSE_STATIC_MBEDTLS_LIBRARY=ON -DUSE_SHARED_MBEDTLS_LIBRARY=OFF 135 + cmake --build build -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) 136 + cmake --install build 137 + 138 + - name: Build zlib 139 + if: steps.cache.outputs.cache-hit != 'true' && contains(inputs.deps, 'zlib') 140 + shell: bash 141 + run: | 142 + git clone --depth 1 --branch v${{ inputs.zlib_version }} https://github.com/madler/zlib.git /tmp/zlib 143 + cd /tmp/zlib 144 + CMAKE_GENERATOR="${{ inputs.cmake_generator }}" 145 + if [[ "$RUNNER_OS" != "macOS" ]]; then 146 + export AR="${{ inputs.ar }}" 147 + export RANLIB="${{ inputs.ranlib }}" 148 + fi 149 + CC=${{ inputs.cc }} cmake -B build \ 150 + ${CMAKE_GENERATOR:+-G "$CMAKE_GENERATOR"} \ 151 + -DCMAKE_INSTALL_PREFIX=${{ inputs.prefix }} \ 152 + -DBUILD_SHARED_LIBS=OFF \ 153 + -DCMAKE_BUILD_TYPE=MinSizeRel 154 + cmake --build build 155 + cmake --install build 156 + rm -f ${{ inputs.prefix }}/lib/libz.so* 2>/dev/null || true
+161
.github/actions/build-project/action.yml
··· 1 + name: Build Project 2 + description: Run meson build with appropriate settings 3 + 4 + outputs: 5 + version: 6 + description: Build version string 7 + value: ${{ steps.version.outputs.version }} 8 + 9 + inputs: 10 + cc: 11 + description: C compiler 12 + required: true 13 + cxx: 14 + description: C++ compiler 15 + required: false 16 + default: '' 17 + ar: 18 + description: AR tool 19 + required: false 20 + default: '' 21 + ranlib: 22 + description: RANLIB tool 23 + required: false 24 + default: '' 25 + ld: 26 + description: Linker 27 + required: false 28 + default: '' 29 + strip: 30 + description: Strip tool 31 + required: false 32 + default: strip 33 + deps_prefix: 34 + description: Prefix where deps are installed 35 + required: true 36 + build_timestamp: 37 + description: Build timestamp 38 + required: true 39 + tls_library: 40 + description: TLS library (openssl or mbedtls) 41 + required: false 42 + default: 'openssl' 43 + static_link: 44 + description: Enable static linking 45 + required: false 46 + default: 'false' 47 + extra_meson_args: 48 + description: Additional meson arguments 49 + required: false 50 + default: '' 51 + binary_name: 52 + description: Output binary name (ant or ant.exe) 53 + required: false 54 + default: 'ant' 55 + 56 + runs: 57 + using: composite 58 + steps: 59 + - name: npm install 60 + shell: bash 61 + run: npm ci 62 + working-directory: src/tools 63 + 64 + - name: Cache vendor deps 65 + uses: actions/cache@v4 66 + with: 67 + path: | 68 + vendor/*/ 69 + build/vendor 70 + key: vendor-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('vendor/*.wrap') }} 71 + 72 + - name: Configure and build 73 + shell: bash 74 + run: | 75 + if [[ "$RUNNER_OS" == "Windows" ]]; then 76 + MSYS2_PATH=$(cygpath "$RUNNER_TEMP/msys64/mingw64/bin") 77 + export PATH="$MSYS2_PATH:$PATH" 78 + DEPS_PREFIX=$(cygpath -m "${{ inputs.deps_prefix }}") 79 + export PKG_CONFIG_PATH="$DEPS_PREFIX/lib/pkgconfig;$PKG_CONFIG_PATH" 80 + export CMAKE_PREFIX_PATH="$DEPS_PREFIX;$CMAKE_PREFIX_PATH" 81 + else 82 + DEPS_PREFIX="${{ inputs.deps_prefix }}" 83 + export PKG_CONFIG_PATH="$DEPS_PREFIX/lib/pkgconfig:$PKG_CONFIG_PATH" 84 + export CMAKE_PREFIX_PATH="$DEPS_PREFIX:$CMAKE_PREFIX_PATH" 85 + fi 86 + export CC="${{ inputs.cc }}" 87 + 88 + echo "=== Debug: PKG_CONFIG_PATH ===" 89 + echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH" 90 + echo "=== Debug: Contents of $DEPS_PREFIX/lib/pkgconfig ===" 91 + ls -la "$DEPS_PREFIX/lib/pkgconfig/" 2>/dev/null || echo "Directory not found" 92 + echo "=== Debug: pkg-config --variable pc_path pkg-config ===" 93 + pkg-config --variable pc_path pkg-config || true 94 + echo "=== Debug: pkg-config --modversion llhttp ===" 95 + pkg-config --modversion llhttp || echo "llhttp not found by pkg-config" 96 + 97 + if [[ -n "${{ inputs.cxx }}" ]]; then 98 + export CXX="${{ inputs.cxx }}" 99 + fi 100 + if [[ -n "${{ inputs.ar }}" ]]; then 101 + export AR="${{ inputs.ar }}" 102 + export CMAKE_AR="${{ inputs.ar }}" 103 + fi 104 + if [[ -n "${{ inputs.ranlib }}" ]]; then 105 + export RANLIB="${{ inputs.ranlib }}" 106 + export CMAKE_RANLIB="${{ inputs.ranlib }}" 107 + fi 108 + if [[ -n "${{ inputs.ld }}" ]]; then 109 + export CC_LD="${{ inputs.ld }}" 110 + fi 111 + 112 + MESON_ARGS="-Db_lto=true --buildtype=release" 113 + MESON_ARGS="$MESON_ARGS -Dbuild_timestamp=${{ inputs.build_timestamp }}" 114 + 115 + if [[ "${{ inputs.tls_library }}" == "mbedtls" ]]; then 116 + MESON_ARGS="$MESON_ARGS -Dtls_library=mbedtls" 117 + fi 118 + 119 + if [[ "${{ inputs.static_link }}" == "true" ]]; then 120 + MESON_ARGS="$MESON_ARGS --prefer-static -Dstatic_link=true" 121 + fi 122 + 123 + if [[ -n "${{ inputs.extra_meson_args }}" ]]; then 124 + MESON_ARGS="$MESON_ARGS ${{ inputs.extra_meson_args }}" 125 + fi 126 + 127 + echo "Running: meson setup build $MESON_ARGS" 128 + meson setup build $MESON_ARGS 129 + meson compile -C build 130 + 131 + - name: Verify build 132 + id: version 133 + shell: bash 134 + run: | 135 + if [[ "$RUNNER_OS" == "Windows" ]]; then 136 + MSYS2_PATH=$(cygpath "$RUNNER_TEMP/msys64/mingw64/bin") 137 + export PATH="$MSYS2_PATH:$PATH" 138 + fi 139 + VERSION=$(./build/${{ inputs.binary_name }} --version-raw) 140 + echo "Version: $VERSION" 141 + echo "version=$VERSION" >> $GITHUB_OUTPUT 142 + 143 + - name: Strip binary 144 + shell: bash 145 + run: | 146 + if [[ "$RUNNER_OS" == "Windows" ]]; then 147 + MSYS2_PATH=$(cygpath "$RUNNER_TEMP/msys64/mingw64/bin") 148 + export PATH="$MSYS2_PATH:$PATH" 149 + fi 150 + ${{ inputs.strip }} build/${{ inputs.binary_name }} 151 + 152 + - name: Bundle Windows DLLs 153 + if: runner.os == 'Windows' 154 + shell: bash 155 + run: | 156 + MINGW_BIN=$(cygpath "$RUNNER_TEMP/msys64/mingw64/bin") 157 + cp "$MINGW_BIN/libssl-3-x64.dll" build/ 158 + cp "$MINGW_BIN/libcrypto-3-x64.dll" build/ 159 + cp "$MINGW_BIN/libsodium-26.dll" build/ 160 + echo "Bundled DLLs:" 161 + ls -la build/*.dll
+88
.github/actions/setup-llvm/action.yml
··· 1 + name: Setup LLVM 2 + description: Install LLVM/Clang toolchain 3 + 4 + inputs: 5 + version: 6 + description: LLVM major version 7 + required: true 8 + os_type: 9 + description: Target OS type (linux-glibc, linux-musl, macos, windows) 10 + required: true 11 + 12 + outputs: 13 + cc: 14 + description: C compiler path 15 + value: ${{ steps.paths.outputs.cc }} 16 + cxx: 17 + description: C++ compiler path 18 + value: ${{ steps.paths.outputs.cxx }} 19 + ar: 20 + description: AR path 21 + value: ${{ steps.paths.outputs.ar }} 22 + ranlib: 23 + description: RANLIB path 24 + value: ${{ steps.paths.outputs.ranlib }} 25 + strip: 26 + description: Strip path 27 + value: ${{ steps.paths.outputs.strip }} 28 + ld: 29 + description: Linker path 30 + value: ${{ steps.paths.outputs.ld }} 31 + 32 + runs: 33 + using: composite 34 + steps: 35 + - name: Install LLVM (Linux glibc) 36 + if: inputs.os_type == 'linux-glibc' 37 + shell: bash 38 + env: 39 + DEBIAN_FRONTEND: noninteractive 40 + run: | 41 + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc 42 + echo "deb https://apt.llvm.org/jammy/ llvm-toolchain-jammy-${{ inputs.version }} main" > /etc/apt/sources.list.d/llvm.list 43 + apt-get update 44 + apt-get install -y clang-${{ inputs.version }} lld-${{ inputs.version }} llvm-${{ inputs.version }} 45 + 46 + # linux-musl: LLVM installed via apk in workflow setup step 47 + # macos: LLVM installed via brew in workflow setup step 48 + # windows: Using MinGW toolchain 49 + 50 + - name: Set output paths 51 + id: paths 52 + shell: bash 53 + run: | 54 + case "${{ inputs.os_type }}" in 55 + linux-glibc) 56 + echo "cc=clang-${{ inputs.version }}" >> $GITHUB_OUTPUT 57 + echo "cxx=clang++-${{ inputs.version }}" >> $GITHUB_OUTPUT 58 + echo "ar=llvm-ar-${{ inputs.version }}" >> $GITHUB_OUTPUT 59 + echo "ranlib=llvm-ranlib-${{ inputs.version }}" >> $GITHUB_OUTPUT 60 + echo "strip=llvm-strip-${{ inputs.version }}" >> $GITHUB_OUTPUT 61 + echo "ld=lld-${{ inputs.version }}" >> $GITHUB_OUTPUT 62 + ;; 63 + linux-musl) 64 + echo "cc=clang" >> $GITHUB_OUTPUT 65 + echo "cxx=clang++" >> $GITHUB_OUTPUT 66 + echo "ar=llvm-ar" >> $GITHUB_OUTPUT 67 + echo "ranlib=llvm-ranlib" >> $GITHUB_OUTPUT 68 + echo "strip=llvm-strip" >> $GITHUB_OUTPUT 69 + echo "ld=lld" >> $GITHUB_OUTPUT 70 + ;; 71 + macos) 72 + LLVM_PREFIX=$(brew --prefix llvm) 73 + echo "cc=$LLVM_PREFIX/bin/clang" >> $GITHUB_OUTPUT 74 + echo "cxx=$LLVM_PREFIX/bin/clang++" >> $GITHUB_OUTPUT 75 + echo "ar=$LLVM_PREFIX/bin/llvm-ar" >> $GITHUB_OUTPUT 76 + echo "ranlib=$LLVM_PREFIX/bin/llvm-ranlib" >> $GITHUB_OUTPUT 77 + echo "strip=strip" >> $GITHUB_OUTPUT 78 + echo "ld=" >> $GITHUB_OUTPUT 79 + ;; 80 + windows) 81 + echo "cc=gcc" >> $GITHUB_OUTPUT 82 + echo "cxx=g++" >> $GITHUB_OUTPUT 83 + echo "ar=ar" >> $GITHUB_OUTPUT 84 + echo "ranlib=ranlib" >> $GITHUB_OUTPUT 85 + echo "strip=strip" >> $GITHUB_OUTPUT 86 + echo "ld=" >> $GITHUB_OUTPUT 87 + ;; 88 + esac
+54
.github/actions/setup-zig/action.yml
··· 1 + name: Setup Zig 2 + description: Install Zig compiler 3 + 4 + inputs: 5 + version: 6 + description: Zig version 7 + required: true 8 + 9 + runs: 10 + using: composite 11 + steps: 12 + - name: Determine platform 13 + id: platform 14 + shell: bash 15 + run: | 16 + case "${{ runner.os }}-${{ runner.arch }}" in 17 + Linux-X64) echo "slug=x86_64-linux" >> $GITHUB_OUTPUT; echo "ext=tar.xz" >> $GITHUB_OUTPUT ;; 18 + Linux-ARM64) echo "slug=aarch64-linux" >> $GITHUB_OUTPUT; echo "ext=tar.xz" >> $GITHUB_OUTPUT ;; 19 + macOS-X64) echo "slug=x86_64-macos" >> $GITHUB_OUTPUT; echo "ext=tar.xz" >> $GITHUB_OUTPUT ;; 20 + macOS-ARM64) echo "slug=aarch64-macos" >> $GITHUB_OUTPUT; echo "ext=tar.xz" >> $GITHUB_OUTPUT ;; 21 + Windows-X64) echo "slug=x86_64-windows" >> $GITHUB_OUTPUT; echo "ext=zip" >> $GITHUB_OUTPUT ;; 22 + *) echo "Unsupported platform: ${{ runner.os }}-${{ runner.arch }}" >&2; exit 1 ;; 23 + esac 24 + echo "install_path=$RUNNER_TEMP/zig" >> $GITHUB_OUTPUT 25 + 26 + - name: Cache Zig 27 + id: cache 28 + uses: actions/cache@v4 29 + with: 30 + path: ${{ steps.platform.outputs.install_path }} 31 + key: zig-${{ steps.platform.outputs.slug }}-${{ inputs.version }} 32 + 33 + - name: Download and install Zig 34 + if: steps.cache.outputs.cache-hit != 'true' 35 + shell: bash 36 + run: | 37 + cd "$RUNNER_TEMP" 38 + URL="https://ziglang.org/download/${{ inputs.version }}/zig-${{ steps.platform.outputs.slug }}-${{ inputs.version }}.${{ steps.platform.outputs.ext }}" 39 + echo "Downloading: $URL" 40 + curl -fSL -o zig.${{ steps.platform.outputs.ext }} "$URL" 41 + ls -la zig.${{ steps.platform.outputs.ext }} 42 + if [[ "${{ steps.platform.outputs.ext }}" == "zip" ]]; then 43 + unzip -q zig.${{ steps.platform.outputs.ext }} 44 + else 45 + tar -xJf zig.${{ steps.platform.outputs.ext }} 46 + fi 47 + 48 + INSTALL_PATH="${{ steps.platform.outputs.install_path }}" 49 + mkdir -p "$(dirname "$INSTALL_PATH")" 50 + mv zig-${{ steps.platform.outputs.slug }}-${{ inputs.version }} "$INSTALL_PATH" 51 + 52 + - name: Add to PATH 53 + shell: bash 54 + run: echo "${{ steps.platform.outputs.install_path }}" >> $GITHUB_PATH
+133
.github/versions.json
··· 1 + { 2 + "tools": { 3 + "zig": "0.15.2", 4 + "llvm": "21", 5 + "node": "22" 6 + }, 7 + "dependencies": { 8 + "llhttp": "9.2.1", 9 + "libuv": "1.51.0", 10 + "libsodium": "1.0.20", 11 + "mbedtls": "3.6.5", 12 + "zlib": "1.3.1" 13 + }, 14 + "targets": { 15 + "linux-glibc-x64": { 16 + "os_type": "linux-glibc", 17 + "arch": "x86_64", 18 + "zig_target": "x86_64-linux-gnu", 19 + "rust_target": "x86_64-unknown-linux-gnu", 20 + "artifact_name": "ant-linux-x64", 21 + "binary_name": "ant", 22 + "deps": ["llhttp", "libuv"], 23 + "tls_library": "openssl", 24 + "static_link": false, 25 + "cmake_generator": null, 26 + "extra_meson_args": null 27 + }, 28 + "linux-glibc-aarch64": { 29 + "os_type": "linux-glibc", 30 + "arch": "aarch64", 31 + "zig_target": "aarch64-linux-gnu", 32 + "rust_target": "aarch64-unknown-linux-gnu", 33 + "artifact_name": "ant-linux-aarch64", 34 + "binary_name": "ant", 35 + "deps": ["llhttp", "libuv"], 36 + "tls_library": "openssl", 37 + "static_link": false, 38 + "cmake_generator": null, 39 + "extra_meson_args": null 40 + }, 41 + "linux-musl-x64": { 42 + "os_type": "linux-musl", 43 + "arch": "x86_64", 44 + "zig_target": "x86_64-linux-musl", 45 + "rust_target": "x86_64-unknown-linux-musl", 46 + "artifact_name": "ant-linux-x64-musl", 47 + "binary_name": "ant", 48 + "deps": ["llhttp", "libuv", "zlib"], 49 + "tls_library": "openssl", 50 + "static_link": true, 51 + "cmake_generator": "Ninja", 52 + "extra_meson_args": null 53 + }, 54 + "linux-musl-aarch64": { 55 + "os_type": "linux-musl", 56 + "arch": "aarch64", 57 + "zig_target": "aarch64-linux-musl", 58 + "rust_target": "aarch64-unknown-linux-musl", 59 + "artifact_name": "ant-linux-aarch64-musl", 60 + "binary_name": "ant", 61 + "deps": ["llhttp", "libuv", "zlib"], 62 + "tls_library": "openssl", 63 + "static_link": true, 64 + "cmake_generator": "Ninja", 65 + "extra_meson_args": null 66 + }, 67 + "macos-x64": { 68 + "os_type": "macos", 69 + "arch": "x86_64", 70 + "zig_target": "x86_64-macos", 71 + "rust_target": "x86_64-apple-darwin", 72 + "artifact_name": "ant-darwin-x64", 73 + "binary_name": "ant", 74 + "deps": ["llhttp", "libsodium"], 75 + "tls_library": "openssl", 76 + "static_link": false, 77 + "cmake_generator": null, 78 + "extra_meson_args": null 79 + }, 80 + "macos-aarch64": { 81 + "os_type": "macos", 82 + "arch": "aarch64", 83 + "zig_target": "aarch64-macos", 84 + "rust_target": "aarch64-apple-darwin", 85 + "artifact_name": "ant-darwin-aarch64", 86 + "binary_name": "ant", 87 + "deps": ["llhttp", "libsodium"], 88 + "tls_library": "openssl", 89 + "static_link": false, 90 + "cmake_generator": null, 91 + "extra_meson_args": null 92 + }, 93 + "macos-x64-mbedtls": { 94 + "os_type": "macos", 95 + "arch": "x86_64", 96 + "zig_target": "x86_64-macos", 97 + "rust_target": "x86_64-apple-darwin", 98 + "artifact_name": "ant-darwin-x64-mbedtls", 99 + "binary_name": "ant", 100 + "deps": ["llhttp", "libsodium", "mbedtls"], 101 + "tls_library": "mbedtls", 102 + "static_link": false, 103 + "cmake_generator": null, 104 + "extra_meson_args": null 105 + }, 106 + "macos-aarch64-mbedtls": { 107 + "os_type": "macos", 108 + "arch": "aarch64", 109 + "zig_target": "aarch64-macos", 110 + "rust_target": "aarch64-apple-darwin", 111 + "artifact_name": "ant-darwin-aarch64-mbedtls", 112 + "binary_name": "ant", 113 + "deps": ["llhttp", "libsodium", "mbedtls"], 114 + "tls_library": "mbedtls", 115 + "static_link": false, 116 + "cmake_generator": null, 117 + "extra_meson_args": null 118 + }, 119 + "windows-x64": { 120 + "os_type": "windows", 121 + "arch": "x86_64", 122 + "zig_target": "x86_64-windows-gnu", 123 + "rust_target": "x86_64-pc-windows-gnu", 124 + "artifact_name": "ant-windows-x64", 125 + "binary_name": "ant.exe", 126 + "deps": ["llhttp"], 127 + "tls_library": "openssl", 128 + "static_link": false, 129 + "cmake_generator": "MinGW Makefiles", 130 + "extra_meson_args": "-Dc_std=gnu2x" 131 + } 132 + } 133 + }
-95
.github/workflows/build-linux-glibc-aarch64.yml
··· 1 - name: Build Linux aarch64 [glibc] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - linux-aarch64: 13 - runs-on: ubuntu-24.04-arm 14 - container: ubuntu:22.04 15 - steps: 16 - - name: Install git and clang-21 17 - env: 18 - DEBIAN_FRONTEND: noninteractive 19 - run: | 20 - apt-get update 21 - apt-get install -y git ca-certificates gnupg wget software-properties-common curl 22 - wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc 23 - echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-21 main" > /etc/apt/sources.list.d/llvm.list 24 - curl -fsSL https://deb.nodesource.com/setup_22.x | bash - 25 - apt-get update 26 - apt-get install -y clang-21 lld-21 llvm-21 nodejs 27 - git config --global --add safe.directory "$GITHUB_WORKSPACE" 28 - - uses: actions/checkout@v4 29 - with: 30 - submodules: recursive 31 - - env: 32 - DEBIAN_FRONTEND: noninteractive 33 - run: | 34 - apt-get install -y python3-pip ninja-build cmake pkg-config \ 35 - uuid-dev libssl-dev libsodium-dev 36 - pip3 install meson 37 - - name: Cache native dependencies 38 - id: cache-deps 39 - uses: actions/cache@v4 40 - with: 41 - path: ~/deps-cache 42 - key: deps-linux-glibc-aarch64-llhttp-9.2.1-libuv-1.51.0-v1 43 - - name: Build llhttp 44 - if: steps.cache-deps.outputs.cache-hit != 'true' 45 - run: | 46 - export CFLAGS="-Os -flto" 47 - export AR=llvm-ar-21 48 - export RANLIB=llvm-ranlib-21 49 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 50 - cd /tmp/llhttp 51 - CC=clang-21 CXX=clang++-21 cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=MinSizeRel 52 - cmake --build build 53 - cmake --install build 54 - - name: Build libuv 55 - if: steps.cache-deps.outputs.cache-hit != 'true' 56 - run: | 57 - export CFLAGS="-Os -flto" 58 - export AR=llvm-ar-21 59 - export RANLIB=llvm-ranlib-21 60 - git clone --depth 1 --branch v1.51.0 https://github.com/libuv/libuv.git /tmp/libuv 61 - cd /tmp/libuv 62 - CC=clang-21 cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_TESTING=OFF -DLIBUV_BUILD_SHARED=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 63 - cmake --build build 64 - cmake --install build 65 - - run: npm ci 66 - working-directory: src/tools 67 - - name: Install Rust 68 - run: | 69 - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 70 - echo "$HOME/.cargo/bin" >> $GITHUB_PATH 71 - - uses: actions/cache@v4 72 - with: 73 - path: | 74 - ~/.cargo/registry 75 - ~/.cargo/git 76 - build/oxc-target 77 - key: cargo-linux-arm64-${{ hashFiles('src/strip/Cargo.lock') }} 78 - restore-keys: cargo-linux-arm64- 79 - - name: Cache vendor dependencies 80 - uses: actions/cache@v4 81 - with: 82 - path: | 83 - vendor/*/ 84 - build/vendor 85 - key: vendor-linux-glibc-aarch64-${{ hashFiles('vendor/*.wrap') }} 86 - - run: | 87 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 88 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 89 - CC=clang-21 CC_LD=lld-21 meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 90 - - run: ./build/ant --version 91 - - run: llvm-strip-21 build/ant 92 - - uses: actions/upload-artifact@v4 93 - with: 94 - name: ant-linux-aarch64 95 - path: build/ant
-95
.github/workflows/build-linux-glibc-x64.yml
··· 1 - name: Build Linux x64 [glibc] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - linux-x64: 13 - runs-on: ubuntu-24.04 14 - container: ubuntu:22.04 15 - steps: 16 - - name: Install git and clang-21 17 - env: 18 - DEBIAN_FRONTEND: noninteractive 19 - run: | 20 - apt-get update 21 - apt-get install -y git ca-certificates gnupg wget software-properties-common curl 22 - wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc 23 - echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-21 main" > /etc/apt/sources.list.d/llvm.list 24 - curl -fsSL https://deb.nodesource.com/setup_22.x | bash - 25 - apt-get update 26 - apt-get install -y clang-21 lld-21 llvm-21 nodejs 27 - git config --global --add safe.directory "$GITHUB_WORKSPACE" 28 - - uses: actions/checkout@v4 29 - with: 30 - submodules: recursive 31 - - env: 32 - DEBIAN_FRONTEND: noninteractive 33 - run: | 34 - apt-get install -y python3-pip ninja-build cmake pkg-config \ 35 - uuid-dev libssl-dev libsodium-dev 36 - pip3 install meson 37 - - name: Cache native dependencies 38 - id: cache-deps 39 - uses: actions/cache@v4 40 - with: 41 - path: ~/deps-cache 42 - key: deps-linux-glibc-x64-llhttp-9.2.1-libuv-1.51.0-v1 43 - - name: Build llhttp 44 - if: steps.cache-deps.outputs.cache-hit != 'true' 45 - run: | 46 - export CFLAGS="-Os -flto" 47 - export AR=llvm-ar-21 48 - export RANLIB=llvm-ranlib-21 49 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 50 - cd /tmp/llhttp 51 - CC=clang-21 CXX=clang++-21 cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=MinSizeRel 52 - cmake --build build 53 - cmake --install build 54 - - name: Build libuv 55 - if: steps.cache-deps.outputs.cache-hit != 'true' 56 - run: | 57 - export CFLAGS="-Os -flto" 58 - export AR=llvm-ar-21 59 - export RANLIB=llvm-ranlib-21 60 - git clone --depth 1 --branch v1.51.0 https://github.com/libuv/libuv.git /tmp/libuv 61 - cd /tmp/libuv 62 - CC=clang-21 cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_TESTING=OFF -DLIBUV_BUILD_SHARED=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 63 - cmake --build build 64 - cmake --install build 65 - - run: npm ci 66 - working-directory: src/tools 67 - - name: Install Rust 68 - run: | 69 - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 70 - echo "$HOME/.cargo/bin" >> $GITHUB_PATH 71 - - uses: actions/cache@v4 72 - with: 73 - path: | 74 - ~/.cargo/registry 75 - ~/.cargo/git 76 - build/oxc-target 77 - key: cargo-linux-amd64-${{ hashFiles('src/strip/Cargo.lock') }} 78 - restore-keys: cargo-linux-amd64- 79 - - name: Cache vendor dependencies 80 - uses: actions/cache@v4 81 - with: 82 - path: | 83 - vendor/*/ 84 - build/vendor 85 - key: vendor-linux-glibc-x64-${{ hashFiles('vendor/*.wrap') }} 86 - - run: | 87 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 88 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 89 - CC=clang-21 CC_LD=lld-21 meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 90 - - run: ./build/ant --version 91 - - run: llvm-strip-21 build/ant 92 - - uses: actions/upload-artifact@v4 93 - with: 94 - name: ant-linux-x64 95 - path: build/ant
-90
.github/workflows/build-linux-musl-aarch64.yml
··· 1 - name: Build Linux aarch64 [musl] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - linux-musl-aarch64: 13 - runs-on: ubuntu-24.04-arm 14 - steps: 15 - - uses: actions/checkout@v4 16 - with: 17 - submodules: recursive 18 - - name: Cache native dependencies 19 - id: cache-deps 20 - uses: actions/cache@v4 21 - with: 22 - path: ~/deps-cache 23 - key: deps-linux-musl-aarch64-llhttp-9.2.1-libuv-1.51.0-zlib-1.3.1-v1 24 - - name: Cache vendor dependencies 25 - uses: actions/cache@v4 26 - with: 27 - path: | 28 - vendor/*/ 29 - build/vendor 30 - key: vendor-linux-musl-aarch64-${{ hashFiles('vendor/*.wrap') }} 31 - - name: Cache cargo 32 - uses: actions/cache@v4 33 - with: 34 - path: | 35 - ~/.cargo-cache/registry 36 - ~/.cargo-cache/git 37 - build/oxc-target 38 - key: cargo-linux-musl-aarch64-${{ hashFiles('src/strip/Cargo.lock') }} 39 - restore-keys: cargo-linux-musl-aarch64- 40 - - name: Build in Alpine container 41 - run: | 42 - mkdir -p ~/.cargo-cache 43 - docker run --rm -v "$PWD":/work -v "$HOME/deps-cache":/deps-cache -v "$HOME/.cargo-cache":/root/.cargo -w /work alpine:edge sh -c ' 44 - apk add --no-cache git clang lld llvm meson ninja cmake pkgconf curl npm nodejs \ 45 - musl-dev openssl-dev openssl-libs-static libsodium-dev libsodium-static \ 46 - util-linux-dev util-linux-static linux-headers libunwind-dev libunwind-static rust cargo 47 - 48 - git config --global --add safe.directory /work 49 - 50 - export CFLAGS="-Os -flto" 51 - export AR=llvm-ar 52 - export RANLIB=llvm-ranlib 53 - 54 - if [ ! -f /deps-cache/lib/libllhttp.a ]; then 55 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 56 - cd /tmp/llhttp 57 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=MinSizeRel 58 - cmake --build build 59 - cmake --install build 60 - fi 61 - 62 - if [ ! -f /deps-cache/lib/libuv.a ]; then 63 - git clone --depth 1 --branch v1.51.0 https://github.com/libuv/libuv.git /tmp/libuv 64 - cd /tmp/libuv 65 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=/deps-cache -DBUILD_TESTING=OFF -DLIBUV_BUILD_SHARED=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 66 - cmake --build build 67 - cmake --install build 68 - fi 69 - 70 - if [ ! -f /deps-cache/lib/libz.a ]; then 71 - git clone --depth 1 --branch v1.3.1 https://github.com/madler/zlib.git /tmp/zlib 72 - cd /tmp/zlib 73 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=/deps-cache -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 74 - cmake --build build 75 - cmake --install build 76 - rm -f /deps-cache/lib/libz.so* 77 - fi 78 - 79 - cd /work 80 - npm ci --prefix src/tools 81 - export PKG_CONFIG_PATH="/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 82 - export CMAKE_PREFIX_PATH="/deps-cache:$CMAKE_PREFIX_PATH" 83 - CC=clang CC_LD=lld meson setup build --prefer-static -Db_lto=true --buildtype=release -Dstatic_link=true -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 84 - llvm-strip build/ant 85 - ' 86 - - run: ./build/ant --version 87 - - uses: actions/upload-artifact@v4 88 - with: 89 - name: ant-linux-aarch64-musl 90 - path: build/ant
-96
.github/workflows/build-linux-musl-x64.yml
··· 1 - name: Build Linux x64 [musl] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - linux-musl-x64: 13 - runs-on: ubuntu-24.04 14 - container: alpine:edge 15 - steps: 16 - - name: Install dependencies 17 - run: | 18 - apk add --no-cache git clang lld llvm meson ninja cmake pkgconf curl npm nodejs \ 19 - musl-dev openssl-dev openssl-libs-static libsodium-dev libsodium-static \ 20 - util-linux-dev util-linux-static linux-headers libunwind-dev libunwind-static \ 21 - tar zstd 22 - git config --global --add safe.directory "$GITHUB_WORKSPACE" 23 - - uses: actions/checkout@v4 24 - with: 25 - submodules: recursive 26 - - name: Cache native dependencies 27 - id: cache-deps 28 - uses: actions/cache@v4 29 - with: 30 - path: ~/deps-cache 31 - key: deps-linux-musl-x64-llhttp-9.2.1-libuv-1.51.0-zlib-1.3.1-v1 32 - - name: Build llhttp 33 - if: steps.cache-deps.outputs.cache-hit != 'true' 34 - run: | 35 - export CFLAGS="-Os -flto" 36 - export AR=llvm-ar 37 - export RANLIB=llvm-ranlib 38 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 39 - cd /tmp/llhttp 40 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=MinSizeRel 41 - cmake --build build 42 - cmake --install build 43 - - name: Build libuv 44 - if: steps.cache-deps.outputs.cache-hit != 'true' 45 - run: | 46 - export CFLAGS="-Os -flto" 47 - export AR=llvm-ar 48 - export RANLIB=llvm-ranlib 49 - git clone --depth 1 --branch v1.51.0 https://github.com/libuv/libuv.git /tmp/libuv 50 - cd /tmp/libuv 51 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_TESTING=OFF -DLIBUV_BUILD_SHARED=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 52 - cmake --build build 53 - cmake --install build 54 - - name: Build zlib 55 - if: steps.cache-deps.outputs.cache-hit != 'true' 56 - run: | 57 - export CFLAGS="-Os -flto" 58 - export AR=llvm-ar 59 - export RANLIB=llvm-ranlib 60 - git clone --depth 1 --branch v1.3.1 https://github.com/madler/zlib.git /tmp/zlib 61 - cd /tmp/zlib 62 - CC=clang cmake -G Ninja -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=MinSizeRel 63 - cmake --build build 64 - cmake --install build 65 - rm -f $HOME/deps-cache/lib/libz.so* 66 - - run: npm ci 67 - working-directory: src/tools 68 - - name: Install Rust 69 - run: | 70 - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-host x86_64-unknown-linux-musl 71 - echo "$HOME/.cargo/bin" >> $GITHUB_PATH 72 - - uses: actions/cache@v4 73 - with: 74 - path: | 75 - ~/.cargo/registry 76 - ~/.cargo/git 77 - build/oxc-target 78 - key: cargo-linux-musl-amd64-${{ hashFiles('src/strip/Cargo.lock') }} 79 - restore-keys: cargo-linux-musl-amd64- 80 - - name: Cache vendor dependencies 81 - uses: actions/cache@v4 82 - with: 83 - path: | 84 - vendor/*/ 85 - build/vendor 86 - key: vendor-linux-musl-x64-${{ hashFiles('vendor/*.wrap') }} 87 - - run: | 88 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 89 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 90 - CC=clang CC_LD=lld meson setup build --prefer-static -Db_lto=true --buildtype=release -Dstatic_link=true -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 91 - - run: ./build/ant --version 92 - - run: llvm-strip build/ant 93 - - uses: actions/upload-artifact@v4 94 - with: 95 - name: ant-linux-x64-musl 96 - path: build/ant
-96
.github/workflows/build-macos-aarch64-mbedtls.yml
··· 1 - name: Build macOS aarch64 [mbedtls] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - macos-aarch64-mbedtls: 13 - runs-on: macos-15 14 - steps: 15 - - uses: actions/checkout@v4 16 - with: 17 - submodules: recursive 18 - - uses: actions/setup-node@v4 19 - with: 20 - node-version: '22' 21 - - run: brew install meson ninja llvm 22 - - name: Cache native dependencies 23 - id: cache-deps 24 - uses: actions/cache@v4 25 - with: 26 - path: ~/deps-cache 27 - key: deps-macos-aarch64-mbedtls-libsodium-1.0.20-mbedtls-3.6.5-llhttp-9.2.1-v1 28 - - name: Build libsodium 29 - if: steps.cache-deps.outputs.cache-hit != 'true' 30 - run: | 31 - export LLVM_PREFIX=$(brew --prefix llvm) 32 - git clone --depth 1 --branch 1.0.20-RELEASE https://github.com/jedisct1/libsodium.git /tmp/libsodium 33 - cd /tmp/libsodium 34 - ./configure CC=$LLVM_PREFIX/bin/clang --prefix=$HOME/deps-cache --disable-shared --enable-static 35 - make -j$(sysctl -n hw.ncpu) 36 - make install 37 - - name: Build mbedTLS 38 - if: steps.cache-deps.outputs.cache-hit != 'true' 39 - run: | 40 - export LLVM_PREFIX=$(brew --prefix llvm) 41 - git clone --depth 1 --branch mbedtls-3.6.5 --recurse-submodules https://github.com/Mbed-TLS/mbedtls.git /tmp/mbedtls 42 - cd /tmp/mbedtls 43 - CC=$LLVM_PREFIX/bin/clang cmake -B build \ 44 - -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache \ 45 - -DENABLE_PROGRAMS=OFF \ 46 - -DENABLE_TESTING=OFF \ 47 - -DCMAKE_BUILD_TYPE=Release \ 48 - -DUSE_STATIC_MBEDTLS_LIBRARY=ON \ 49 - -DUSE_SHARED_MBEDTLS_LIBRARY=OFF 50 - cmake --build build -j$(sysctl -n hw.ncpu) 51 - cmake --install build 52 - - name: Build llhttp 53 - if: steps.cache-deps.outputs.cache-hit != 'true' 54 - run: | 55 - export LLVM_PREFIX=$(brew --prefix llvm) 56 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 57 - cd /tmp/llhttp 58 - CC=$LLVM_PREFIX/bin/clang cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=Release 59 - cmake --build build 60 - cmake --install build 61 - - run: npm ci 62 - working-directory: src/tools 63 - - uses: dtolnay/rust-toolchain@stable 64 - - uses: Swatinem/rust-cache@v2 65 - with: 66 - workspaces: src/strip 67 - - name: Cache oxc build 68 - uses: actions/cache@v4 69 - with: 70 - path: build/oxc-target 71 - key: oxc-macos-aarch64-mbedtls-${{ hashFiles('src/strip/Cargo.lock') }} 72 - restore-keys: oxc-macos-aarch64-mbedtls- 73 - - name: Cache vendor dependencies 74 - uses: actions/cache@v4 75 - with: 76 - path: | 77 - vendor/*/ 78 - build/vendor 79 - key: vendor-macos-aarch64-${{ hashFiles('vendor/*.wrap') }} 80 - - run: | 81 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 82 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 83 - export LLVM_PREFIX=$(brew --prefix llvm) 84 - export CC=$LLVM_PREFIX/bin/clang 85 - export AR=$LLVM_PREFIX/bin/llvm-ar 86 - export RANLIB=$LLVM_PREFIX/bin/llvm-ranlib 87 - export CMAKE_C_COMPILER=$CC 88 - export CMAKE_AR=$AR 89 - export CMAKE_RANLIB=$RANLIB 90 - meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} -Dtls_library=mbedtls && meson compile -C build 91 - - run: ./build/ant --version 92 - - run: strip build/ant 93 - - uses: actions/upload-artifact@v4 94 - with: 95 - name: ant-darwin-aarch64-mbedtls 96 - path: build/ant
-81
.github/workflows/build-macos-aarch64.yml
··· 1 - name: Build macOS aarch64 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - macos-aarch64: 13 - runs-on: macos-15 14 - steps: 15 - - uses: actions/checkout@v4 16 - with: 17 - submodules: recursive 18 - - uses: actions/setup-node@v4 19 - with: 20 - node-version: '22' 21 - - run: brew install meson ninja openssl@3 llvm 22 - - name: Cache native dependencies 23 - id: cache-deps 24 - uses: actions/cache@v4 25 - with: 26 - path: ~/deps-cache 27 - key: deps-macos-aarch64-libsodium-1.0.20-llhttp-9.2.1-v1 28 - - name: Build libsodium 29 - if: steps.cache-deps.outputs.cache-hit != 'true' 30 - run: | 31 - export LLVM_PREFIX=$(brew --prefix llvm) 32 - git clone --depth 1 --branch 1.0.20-RELEASE https://github.com/jedisct1/libsodium.git /tmp/libsodium 33 - cd /tmp/libsodium 34 - ./configure CC=$LLVM_PREFIX/bin/clang --prefix=$HOME/deps-cache --disable-shared --enable-static 35 - make -j$(sysctl -n hw.ncpu) 36 - make install 37 - - name: Build llhttp 38 - if: steps.cache-deps.outputs.cache-hit != 'true' 39 - run: | 40 - export LLVM_PREFIX=$(brew --prefix llvm) 41 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 42 - cd /tmp/llhttp 43 - CC=$LLVM_PREFIX/bin/clang cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=Release 44 - cmake --build build 45 - cmake --install build 46 - - run: npm ci 47 - working-directory: src/tools 48 - - uses: dtolnay/rust-toolchain@stable 49 - - uses: Swatinem/rust-cache@v2 50 - with: 51 - workspaces: src/strip 52 - - name: Cache oxc build 53 - uses: actions/cache@v4 54 - with: 55 - path: build/oxc-target 56 - key: oxc-macos-aarch64-${{ hashFiles('src/strip/Cargo.lock') }} 57 - restore-keys: oxc-macos-aarch64- 58 - - name: Cache vendor dependencies 59 - uses: actions/cache@v4 60 - with: 61 - path: | 62 - vendor/*/ 63 - build/vendor 64 - key: vendor-macos-aarch64-${{ hashFiles('vendor/*.wrap') }} 65 - - run: | 66 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 67 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 68 - export LLVM_PREFIX=$(brew --prefix llvm) 69 - export CC=$LLVM_PREFIX/bin/clang 70 - export AR=$LLVM_PREFIX/bin/llvm-ar 71 - export RANLIB=$LLVM_PREFIX/bin/llvm-ranlib 72 - export CMAKE_C_COMPILER=$CC 73 - export CMAKE_AR=$AR 74 - export CMAKE_RANLIB=$RANLIB 75 - meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 76 - - run: ./build/ant --version 77 - - run: strip build/ant 78 - - uses: actions/upload-artifact@v4 79 - with: 80 - name: ant-darwin-aarch64 81 - path: build/ant
-96
.github/workflows/build-macos-x64-mbedtls.yml
··· 1 - name: Build macOS x64 [mbedtls] 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - macos-x64-mbedtls: 13 - runs-on: macos-15-intel 14 - steps: 15 - - uses: actions/checkout@v4 16 - with: 17 - submodules: recursive 18 - - uses: actions/setup-node@v4 19 - with: 20 - node-version: '22' 21 - - run: brew install meson ninja llvm 22 - - name: Cache native dependencies 23 - id: cache-deps 24 - uses: actions/cache@v4 25 - with: 26 - path: ~/deps-cache 27 - key: deps-macos-x64-mbedtls-libsodium-1.0.20-mbedtls-3.6.5-llhttp-9.2.1-v1 28 - - name: Build libsodium 29 - if: steps.cache-deps.outputs.cache-hit != 'true' 30 - run: | 31 - export LLVM_PREFIX=$(brew --prefix llvm) 32 - git clone --depth 1 --branch 1.0.20-RELEASE https://github.com/jedisct1/libsodium.git /tmp/libsodium 33 - cd /tmp/libsodium 34 - ./configure CC=$LLVM_PREFIX/bin/clang --prefix=$HOME/deps-cache --disable-shared --enable-static 35 - make -j$(sysctl -n hw.ncpu) 36 - make install 37 - - name: Build mbedTLS 38 - if: steps.cache-deps.outputs.cache-hit != 'true' 39 - run: | 40 - export LLVM_PREFIX=$(brew --prefix llvm) 41 - git clone --depth 1 --branch mbedtls-3.6.5 --recurse-submodules https://github.com/Mbed-TLS/mbedtls.git /tmp/mbedtls 42 - cd /tmp/mbedtls 43 - CC=$LLVM_PREFIX/bin/clang cmake -B build \ 44 - -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache \ 45 - -DENABLE_PROGRAMS=OFF \ 46 - -DENABLE_TESTING=OFF \ 47 - -DCMAKE_BUILD_TYPE=Release \ 48 - -DUSE_STATIC_MBEDTLS_LIBRARY=ON \ 49 - -DUSE_SHARED_MBEDTLS_LIBRARY=OFF 50 - cmake --build build -j$(sysctl -n hw.ncpu) 51 - cmake --install build 52 - - name: Build llhttp 53 - if: steps.cache-deps.outputs.cache-hit != 'true' 54 - run: | 55 - export LLVM_PREFIX=$(brew --prefix llvm) 56 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 57 - cd /tmp/llhttp 58 - CC=$LLVM_PREFIX/bin/clang cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=Release 59 - cmake --build build 60 - cmake --install build 61 - - run: npm ci 62 - working-directory: src/tools 63 - - uses: dtolnay/rust-toolchain@stable 64 - - uses: Swatinem/rust-cache@v2 65 - with: 66 - workspaces: src/strip 67 - - name: Cache oxc build 68 - uses: actions/cache@v4 69 - with: 70 - path: build/oxc-target 71 - key: oxc-macos-x64-mbedtls-${{ hashFiles('src/strip/Cargo.lock') }} 72 - restore-keys: oxc-macos-x64-mbedtls- 73 - - name: Cache vendor dependencies 74 - uses: actions/cache@v4 75 - with: 76 - path: | 77 - vendor/*/ 78 - build/vendor 79 - key: vendor-macos-x64-${{ hashFiles('vendor/*.wrap') }} 80 - - run: | 81 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 82 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 83 - export LLVM_PREFIX=$(brew --prefix llvm) 84 - export CC=$LLVM_PREFIX/bin/clang 85 - export AR=$LLVM_PREFIX/bin/llvm-ar 86 - export RANLIB=$LLVM_PREFIX/bin/llvm-ranlib 87 - export CMAKE_C_COMPILER=$CC 88 - export CMAKE_AR=$AR 89 - export CMAKE_RANLIB=$RANLIB 90 - meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} -Dtls_library=mbedtls && meson compile -C build 91 - - run: ./build/ant --version 92 - - run: strip build/ant 93 - - uses: actions/upload-artifact@v4 94 - with: 95 - name: ant-darwin-x64-mbedtls 96 - path: build/ant
-81
.github/workflows/build-macos-x64.yml
··· 1 - name: Build macOS x64 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - macos-x64: 13 - runs-on: macos-15-intel 14 - steps: 15 - - uses: actions/checkout@v4 16 - with: 17 - submodules: recursive 18 - - uses: actions/setup-node@v4 19 - with: 20 - node-version: '22' 21 - - run: brew install meson ninja openssl@3 llvm 22 - - name: Cache native dependencies 23 - id: cache-deps 24 - uses: actions/cache@v4 25 - with: 26 - path: ~/deps-cache 27 - key: deps-macos-x64-libsodium-1.0.20-llhttp-9.2.1-v1 28 - - name: Build libsodium 29 - if: steps.cache-deps.outputs.cache-hit != 'true' 30 - run: | 31 - export LLVM_PREFIX=$(brew --prefix llvm) 32 - git clone --depth 1 --branch 1.0.20-RELEASE https://github.com/jedisct1/libsodium.git /tmp/libsodium 33 - cd /tmp/libsodium 34 - ./configure CC=$LLVM_PREFIX/bin/clang --prefix=$HOME/deps-cache --disable-shared --enable-static 35 - make -j$(sysctl -n hw.ncpu) 36 - make install 37 - - name: Build llhttp 38 - if: steps.cache-deps.outputs.cache-hit != 'true' 39 - run: | 40 - export LLVM_PREFIX=$(brew --prefix llvm) 41 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 42 - cd /tmp/llhttp 43 - CC=$LLVM_PREFIX/bin/clang cmake -B build -DCMAKE_INSTALL_PREFIX=$HOME/deps-cache -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON -DCMAKE_BUILD_TYPE=Release 44 - cmake --build build 45 - cmake --install build 46 - - run: npm ci 47 - working-directory: src/tools 48 - - uses: dtolnay/rust-toolchain@stable 49 - - uses: Swatinem/rust-cache@v2 50 - with: 51 - workspaces: src/strip 52 - - name: Cache oxc build 53 - uses: actions/cache@v4 54 - with: 55 - path: build/oxc-target 56 - key: oxc-macos-x64-${{ hashFiles('src/strip/Cargo.lock') }} 57 - restore-keys: oxc-macos-x64- 58 - - name: Cache vendor dependencies 59 - uses: actions/cache@v4 60 - with: 61 - path: | 62 - vendor/*/ 63 - build/vendor 64 - key: vendor-macos-x64-${{ hashFiles('vendor/*.wrap') }} 65 - - run: | 66 - export PKG_CONFIG_PATH="$HOME/deps-cache/lib/pkgconfig:$PKG_CONFIG_PATH" 67 - export CMAKE_PREFIX_PATH="$HOME/deps-cache:$CMAKE_PREFIX_PATH" 68 - export LLVM_PREFIX=$(brew --prefix llvm) 69 - export CC=$LLVM_PREFIX/bin/clang 70 - export AR=$LLVM_PREFIX/bin/llvm-ar 71 - export RANLIB=$LLVM_PREFIX/bin/llvm-ranlib 72 - export CMAKE_C_COMPILER=$CC 73 - export CMAKE_AR=$AR 74 - export CMAKE_RANLIB=$RANLIB 75 - meson setup build -Db_lto=true --buildtype=release -Dbuild_timestamp=${{ inputs.build_timestamp }} && meson compile -C build 76 - - run: ./build/ant --version 77 - - run: strip build/ant 78 - - uses: actions/upload-artifact@v4 79 - with: 80 - name: ant-darwin-x64 81 - path: build/ant
+232
.github/workflows/build-platform.yml
··· 1 + name: Build Platform 2 + 3 + on: 4 + workflow_call: 5 + inputs: 6 + platform: 7 + description: 'Platform key from versions.json targets' 8 + required: true 9 + type: string 10 + build_timestamp: 11 + required: true 12 + type: string 13 + outputs: 14 + version: 15 + description: Build version 16 + value: ${{ jobs.build.outputs.version }} 17 + 18 + jobs: 19 + build: 20 + outputs: 21 + version: ${{ steps.build.outputs.version }} 22 + # runs-on/container/shell must be hardcoded 23 + runs-on: |- 24 + ${{ case( 25 + inputs.platform == 'linux-glibc-x64', 'ubuntu-24.04', 26 + inputs.platform == 'linux-glibc-aarch64', 'ubuntu-24.04-arm', 27 + inputs.platform == 'linux-musl-x64', 'ubuntu-24.04', 28 + inputs.platform == 'linux-musl-aarch64', 'ubuntu-24.04-arm', 29 + inputs.platform == 'macos-x64', 'macos-15-intel', 30 + inputs.platform == 'macos-x64-mbedtls', 'macos-15-intel', 31 + inputs.platform == 'macos-aarch64', 'macos-15', 32 + inputs.platform == 'macos-aarch64-mbedtls', 'macos-15', 33 + inputs.platform == 'windows-x64', 'windows-latest', 34 + 'ubuntu-24.04' 35 + ) }} 36 + 37 + container: |- 38 + ${{ case( 39 + startsWith(inputs.platform, 'linux-glibc'), 'ubuntu:22.04', 40 + startsWith(inputs.platform, 'linux-musl'), 'alpine:edge', 41 + '' 42 + ) }} 43 + 44 + defaults: 45 + run: 46 + shell: |- 47 + ${{ case( 48 + startsWith(inputs.platform, 'linux-glibc'), 'bash', 49 + startsWith(inputs.platform, 'linux-musl'), 'sh', 50 + startsWith(inputs.platform, 'macos'), 'bash', 51 + inputs.platform == 'windows-x64', 'msys2 {0}', 52 + 'bash' 53 + ) }} 54 + 55 + steps: 56 + # === ALPINE ARM64 WORKAROUND (must be first) === 57 + - name: Setup Alpine ARM64 support 58 + uses: laverdet/alpine-arm64@v1 59 + if: startsWith(inputs.platform, 'linux-musl') 60 + 61 + # === LINUX GLIBC SETUP === 62 + - name: Setup Linux glibc environment 63 + if: startsWith(inputs.platform, 'linux-glibc') 64 + env: 65 + DEBIAN_FRONTEND: noninteractive 66 + run: | 67 + apt-get update 68 + apt-get install -y git ca-certificates gnupg wget software-properties-common curl \ 69 + python3-pip ninja-build cmake pkg-config uuid-dev libssl-dev libsodium-dev jq 70 + curl -fsSL https://deb.nodesource.com/setup_22.x | bash - 71 + apt-get install -y nodejs 72 + pip3 install meson 73 + git config --global --add safe.directory "$GITHUB_WORKSPACE" 74 + 75 + # === LINUX MUSL SETUP === 76 + - name: Setup Linux musl environment 77 + if: startsWith(inputs.platform, 'linux-musl') 78 + run: | 79 + apk add --no-cache git clang lld llvm meson ninja cmake pkgconf curl npm nodejs \ 80 + musl-dev openssl-dev openssl-libs-static libsodium-dev libsodium-static \ 81 + util-linux-dev util-linux-static linux-headers libunwind-dev libunwind-static \ 82 + tar xz zstd jq bash 83 + git config --global --add safe.directory "$GITHUB_WORKSPACE" 84 + 85 + # === MACOS SETUP === 86 + - name: Setup macOS environment 87 + if: startsWith(inputs.platform, 'macos') 88 + run: | 89 + for pkg in meson ninja llvm jq; do 90 + brew list "$pkg" &>/dev/null || brew install "$pkg" 91 + done 92 + if [[ "${{ inputs.platform }}" != *"mbedtls"* ]]; then 93 + brew list openssl@3 &>/dev/null || brew install openssl@3 94 + fi 95 + 96 + # === WINDOWS SETUP === 97 + - name: Disable Windows Defender real-time monitoring 98 + if: inputs.platform == 'windows-x64' 99 + shell: pwsh 100 + run: Set-MpPreference -DisableRealtimeMonitoring $true 101 + 102 + - name: Setup Windows MSYS2 103 + if: inputs.platform == 'windows-x64' 104 + uses: msys2/setup-msys2@v2 105 + with: 106 + msystem: MINGW64 107 + cache: true 108 + install: >- 109 + mingw-w64-x86_64-toolchain mingw-w64-x86_64-meson mingw-w64-x86_64-ninja 110 + mingw-w64-x86_64-cmake mingw-w64-x86_64-openssl mingw-w64-x86_64-libsodium 111 + mingw-w64-x86_64-lld mingw-w64-x86_64-nodejs git jq 112 + 113 + - name: Checkout 114 + uses: actions/checkout@v4 115 + with: 116 + submodules: recursive 117 + 118 + # === LOAD TARGET CONFIG === 119 + - name: Load target configuration 120 + id: config 121 + shell: bash 122 + run: | 123 + CONFIG=$(jq -r '.targets["${{ inputs.platform }}"]' .github/versions.json) 124 + TOOLS=$(jq -r '.tools' .github/versions.json) 125 + DEPS=$(jq -r '.dependencies' .github/versions.json) 126 + 127 + # Tools 128 + echo "zig_version=$(echo $TOOLS | jq -r '.zig')" >> $GITHUB_OUTPUT 129 + echo "llvm_version=$(echo $TOOLS | jq -r '.llvm')" >> $GITHUB_OUTPUT 130 + echo "node_version=$(echo $TOOLS | jq -r '.node')" >> $GITHUB_OUTPUT 131 + 132 + # Target config 133 + echo "os_type=$(echo $CONFIG | jq -r '.os_type')" >> $GITHUB_OUTPUT 134 + echo "arch=$(echo $CONFIG | jq -r '.arch')" >> $GITHUB_OUTPUT 135 + echo "zig_target=$(echo $CONFIG | jq -r '.zig_target')" >> $GITHUB_OUTPUT 136 + echo "rust_target=$(echo $CONFIG | jq -r '.rust_target')" >> $GITHUB_OUTPUT 137 + echo "artifact_name=$(echo $CONFIG | jq -r '.artifact_name')" >> $GITHUB_OUTPUT 138 + echo "binary_name=$(echo $CONFIG | jq -r '.binary_name')" >> $GITHUB_OUTPUT 139 + echo "tls_library=$(echo $CONFIG | jq -r '.tls_library')" >> $GITHUB_OUTPUT 140 + echo "static_link=$(echo $CONFIG | jq -r '.static_link')" >> $GITHUB_OUTPUT 141 + echo "cmake_generator=$(echo $CONFIG | jq -r '.cmake_generator // empty')" >> $GITHUB_OUTPUT 142 + echo "extra_meson_args=$(echo $CONFIG | jq -r '.extra_meson_args // empty')" >> $GITHUB_OUTPUT 143 + 144 + # Deps as space-separated string 145 + echo "deps=$(echo $CONFIG | jq -r '.deps | join(" ")')" >> $GITHUB_OUTPUT 146 + 147 + # Dep versions 148 + echo "llhttp_version=$(echo $DEPS | jq -r '.llhttp')" >> $GITHUB_OUTPUT 149 + echo "libuv_version=$(echo $DEPS | jq -r '.libuv')" >> $GITHUB_OUTPUT 150 + echo "libsodium_version=$(echo $DEPS | jq -r '.libsodium')" >> $GITHUB_OUTPUT 151 + echo "mbedtls_version=$(echo $DEPS | jq -r '.mbedtls')" >> $GITHUB_OUTPUT 152 + echo "zlib_version=$(echo $DEPS | jq -r '.zlib')" >> $GITHUB_OUTPUT 153 + 154 + # === SETUP ZIG === 155 + - name: Setup Zig 156 + uses: ./.github/actions/setup-zig 157 + with: 158 + version: ${{ steps.config.outputs.zig_version }} 159 + 160 + # === SETUP LLVM === 161 + - name: Setup LLVM 162 + id: llvm 163 + uses: ./.github/actions/setup-llvm 164 + with: 165 + version: ${{ steps.config.outputs.llvm_version }} 166 + os_type: ${{ steps.config.outputs.os_type }} 167 + 168 + # === SETUP RUST === 169 + - name: Setup Rust 170 + uses: dtolnay/rust-toolchain@stable 171 + with: 172 + targets: ${{ steps.config.outputs.rust_target }} 173 + 174 + - name: Cache Rust 175 + uses: Swatinem/rust-cache@v2 176 + with: 177 + workspaces: src/strip -> ../../build/oxc-target 178 + 179 + # === BUILD NATIVE DEPS === 180 + - name: Build native dependencies 181 + uses: ./.github/actions/build-native-deps 182 + with: 183 + deps: ${{ steps.config.outputs.deps }} 184 + prefix: ${{ runner.temp }}/deps-cache 185 + cc: ${{ steps.llvm.outputs.cc }} 186 + cxx: ${{ steps.llvm.outputs.cxx }} 187 + ar: ${{ steps.llvm.outputs.ar }} 188 + ranlib: ${{ steps.llvm.outputs.ranlib }} 189 + cmake_generator: ${{ steps.config.outputs.cmake_generator }} 190 + llhttp_version: ${{ steps.config.outputs.llhttp_version }} 191 + libuv_version: ${{ steps.config.outputs.libuv_version }} 192 + libsodium_version: ${{ steps.config.outputs.libsodium_version }} 193 + mbedtls_version: ${{ steps.config.outputs.mbedtls_version }} 194 + zlib_version: ${{ steps.config.outputs.zlib_version }} 195 + 196 + # === BUILD PROJECT === 197 + - name: Build project 198 + id: build 199 + uses: ./.github/actions/build-project 200 + with: 201 + cc: ${{ steps.llvm.outputs.cc }} 202 + cxx: ${{ steps.llvm.outputs.cxx }} 203 + ar: ${{ steps.llvm.outputs.ar }} 204 + ranlib: ${{ steps.llvm.outputs.ranlib }} 205 + ld: ${{ steps.llvm.outputs.ld }} 206 + strip: ${{ steps.llvm.outputs.strip }} 207 + deps_prefix: ${{ runner.temp }}/deps-cache 208 + build_timestamp: ${{ inputs.build_timestamp }} 209 + tls_library: ${{ steps.config.outputs.tls_library }} 210 + static_link: ${{ steps.config.outputs.static_link }} 211 + extra_meson_args: ${{ steps.config.outputs.extra_meson_args }} 212 + binary_name: ${{ steps.config.outputs.binary_name }} 213 + 214 + # === UPLOAD ARTIFACTS === 215 + - name: Upload binary 216 + uses: actions/upload-artifact@v4 217 + with: 218 + name: ${{ steps.config.outputs.artifact_name }} 219 + path: | 220 + build/${{ steps.config.outputs.binary_name }} 221 + build/*.dll 222 + 223 + - name: Save version info 224 + shell: bash 225 + run: echo "${{ steps.build.outputs.version }}" > version.txt 226 + 227 + - name: Upload version info 228 + uses: actions/upload-artifact@v4 229 + with: 230 + name: version-${{ inputs.platform }} 231 + path: version.txt 232 + retention-days: 1
+35
.github/workflows/build-single.yml
··· 1 + name: Build Single Platform 2 + 3 + on: 4 + workflow_dispatch: 5 + inputs: 6 + platform: 7 + description: 'Target platform' 8 + required: true 9 + type: choice 10 + options: 11 + - linux-glibc-x64 12 + - linux-glibc-aarch64 13 + - linux-musl-x64 14 + - linux-musl-aarch64 15 + - macos-x64 16 + - macos-aarch64 17 + - macos-x64-mbedtls 18 + - macos-aarch64-mbedtls 19 + - windows-x64 20 + 21 + jobs: 22 + prepare: 23 + runs-on: ubuntu-latest 24 + outputs: 25 + timestamp: ${{ steps.timestamp.outputs.value }} 26 + steps: 27 + - id: timestamp 28 + run: echo "value=$(date +%s)" >> $GITHUB_OUTPUT 29 + 30 + build: 31 + needs: prepare 32 + uses: ./.github/workflows/build-platform.yml 33 + with: 34 + platform: ${{ inputs.platform }} 35 + build_timestamp: ${{ needs.prepare.outputs.timestamp }}
-80
.github/workflows/build-windows-x64.yml
··· 1 - name: Build Windows x64 2 - 3 - on: 4 - workflow_dispatch: 5 - workflow_call: 6 - inputs: 7 - build_timestamp: 8 - required: true 9 - type: string 10 - 11 - jobs: 12 - windows-x64: 13 - runs-on: windows-latest 14 - defaults: 15 - run: 16 - shell: msys2 {0} 17 - steps: 18 - - uses: actions/checkout@v4 19 - with: 20 - submodules: recursive 21 - - uses: msys2/setup-msys2@v2 22 - with: 23 - msystem: MINGW64 24 - install: >- 25 - mingw-w64-x86_64-toolchain 26 - mingw-w64-x86_64-meson 27 - mingw-w64-x86_64-ninja 28 - mingw-w64-x86_64-cmake 29 - mingw-w64-x86_64-openssl 30 - mingw-w64-x86_64-libsodium 31 - mingw-w64-x86_64-nodejs 32 - git 33 - - name: Cache native dependencies 34 - id: cache-deps 35 - uses: actions/cache@v4 36 - with: 37 - path: ${{ runner.temp }}/deps-cache 38 - key: deps-windows-x64-llhttp-9.2.1-v1 39 - - name: Build llhttp 40 - if: steps.cache-deps.outputs.cache-hit != 'true' 41 - run: | 42 - DEPS_CACHE=$(cygpath "${{ runner.temp }}/deps-cache") 43 - git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git /tmp/llhttp 44 - cd /tmp/llhttp 45 - cmake -G "MinGW Makefiles" -B build -DCMAKE_INSTALL_PREFIX=$DEPS_CACHE -DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON 46 - cmake --build build 47 - cmake --install build 48 - ln -sf $DEPS_CACHE/lib/pkgconfig/libllhttp.pc $DEPS_CACHE/lib/pkgconfig/llhttp.pc 49 - - run: npm ci 50 - working-directory: src/tools 51 - - uses: dtolnay/rust-toolchain@stable 52 - with: 53 - targets: x86_64-pc-windows-gnu 54 - - uses: Swatinem/rust-cache@v2 55 - with: 56 - workspaces: src/strip 57 - - name: Cache oxc build 58 - uses: actions/cache@v4 59 - with: 60 - path: build/oxc-target 61 - key: oxc-windows-x64-${{ hashFiles('src/strip/Cargo.lock') }} 62 - restore-keys: oxc-windows-x64- 63 - - name: Cache vendor dependencies 64 - uses: actions/cache@v4 65 - with: 66 - path: | 67 - vendor/*/ 68 - build/vendor 69 - key: vendor-windows-x64-${{ hashFiles('vendor/*.wrap') }} 70 - - run: | 71 - DEPS_CACHE=$(cygpath "${{ runner.temp }}/deps-cache") 72 - DEPS_CACHE_WIN=$(cygpath -m "${{ runner.temp }}/deps-cache") 73 - export PATH="/c/Users/$USER/.cargo/bin:$PATH" 74 - export PKG_CONFIG_PATH="$DEPS_CACHE/lib/pkgconfig:$PKG_CONFIG_PATH" 75 - meson setup build -Dc_std=gnu2x -Dbuild_timestamp=${{ inputs.build_timestamp }} -Ddeps_prefix_cmake="$DEPS_CACHE_WIN" && meson compile -C build 76 - - run: ./build/ant --version 77 - - uses: actions/upload-artifact@v4 78 - with: 79 - name: ant-windows-x64 80 - path: build/ant.exe
+44 -51
.github/workflows/build.yml
··· 13 13 prepare: 14 14 runs-on: ubuntu-latest 15 15 if: | 16 - github.event_name == 'workflow_dispatch' || 17 - github.event_name == 'pull_request' || 18 - (github.event_name == 'push' && contains(github.event.head_commit.message, '[ci] trigger build')) 16 + github.ref_type != 'tag' && 17 + !contains(github.event.head_commit.message || '', '(ci-ignore)') && 18 + !contains(github.event.pull_request.title || '', '(ci-ignore)') && ( 19 + github.event_name == 'workflow_dispatch' || 20 + github.event_name == 'pull_request' || 21 + (github.event_name == 'push' && contains(github.event.head_commit.message, '[ci] trigger build')) 22 + ) 19 23 outputs: 20 24 timestamp: ${{ steps.timestamp.outputs.value }} 21 25 steps: 22 26 - id: timestamp 23 27 run: echo "value=$(date +%s)" >> $GITHUB_OUTPUT 24 28 25 - macos-aarch64: 29 + build: 26 30 needs: prepare 27 - uses: ./.github/workflows/build-macos-aarch64.yml 31 + strategy: 32 + fail-fast: false 33 + matrix: 34 + platform: 35 + - linux-glibc-x64 36 + - linux-glibc-aarch64 37 + - linux-musl-x64 38 + - linux-musl-aarch64 39 + - macos-x64 40 + - macos-aarch64 41 + - macos-x64-mbedtls 42 + - macos-aarch64-mbedtls 43 + - windows-x64 44 + uses: ./.github/workflows/build-platform.yml 28 45 with: 46 + platform: ${{ matrix.platform }} 29 47 build_timestamp: ${{ needs.prepare.outputs.timestamp }} 30 48 31 - macos-x64: 32 - needs: prepare 33 - uses: ./.github/workflows/build-macos-x64.yml 34 - with: 35 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 49 + summary: 50 + needs: build 51 + runs-on: ubuntu-latest 52 + if: ${{ !cancelled() && needs.build.result != 'skipped' }} 53 + steps: 54 + - name: Download version artifacts 55 + uses: actions/download-artifact@v4 56 + with: 57 + pattern: version-* 58 + path: versions 36 59 37 - macos-aarch64-mbedtls: 38 - needs: prepare 39 - uses: ./.github/workflows/build-macos-aarch64-mbedtls.yml 40 - with: 41 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 42 - 43 - macos-x64-mbedtls: 44 - needs: prepare 45 - uses: ./.github/workflows/build-macos-x64-mbedtls.yml 46 - with: 47 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 48 - 49 - linux-glibc-x64: 50 - needs: prepare 51 - uses: ./.github/workflows/build-linux-glibc-x64.yml 52 - with: 53 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 54 - 55 - linux-glibc-aarch64: 56 - needs: prepare 57 - uses: ./.github/workflows/build-linux-glibc-aarch64.yml 58 - with: 59 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 60 - 61 - linux-musl-x64: 62 - needs: prepare 63 - uses: ./.github/workflows/build-linux-musl-x64.yml 64 - with: 65 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 66 - 67 - linux-musl-aarch64: 68 - needs: prepare 69 - uses: ./.github/workflows/build-linux-musl-aarch64.yml 70 - with: 71 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 72 - 73 - windows-x64: 74 - needs: prepare 75 - uses: ./.github/workflows/build-windows-x64.yml 76 - with: 77 - build_timestamp: ${{ needs.prepare.outputs.timestamp }} 60 + - name: Generate summary 61 + run: | 62 + echo "## Build Versions" >> $GITHUB_STEP_SUMMARY 63 + echo "" >> $GITHUB_STEP_SUMMARY 64 + echo "| Platform | Version |" >> $GITHUB_STEP_SUMMARY 65 + echo "|----------|---------|" >> $GITHUB_STEP_SUMMARY 66 + for dir in versions/version-*; do 67 + platform=$(basename "$dir" | sed 's/version-//') 68 + version=$(cat "$dir/version.txt" 2>/dev/null || echo "N/A") 69 + echo "| $platform | \`$version\` |" >> $GITHUB_STEP_SUMMARY 70 + done
+2
.gitignore
··· 9 9 10 10 /build 11 11 /traces 12 + 13 + /src/pkg/.zig-cache 12 14 /src/strip/target 13 15 14 16 /test262
+22
include/cli/pkg.h
··· 1 + #ifndef PKG_CMDS_H 2 + #define PKG_CMDS_H 3 + 4 + #include <stdbool.h> 5 + 6 + extern bool pkg_verbose; 7 + 8 + int pkg_cmd_init(int argc, char **argv); 9 + int pkg_cmd_install(int argc, char **argv); 10 + int pkg_cmd_add(int argc, char **argv); 11 + int pkg_cmd_remove(int argc, char **argv); 12 + int pkg_cmd_trust(int argc, char **argv); 13 + int pkg_cmd_run(int argc, char **argv); 14 + int pkg_cmd_exec(int argc, char **argv); 15 + int pkg_cmd_why(int argc, char **argv); 16 + int pkg_cmd_info(int argc, char **argv); 17 + int pkg_cmd_ls(int argc, char **argv); 18 + int pkg_cmd_cache(int argc, char **argv); 19 + 20 + bool pkg_script_exists(const char *package_json_path, const char *script_name); 21 + 22 + #endif
+324
include/pkg.h
··· 1 + #ifndef PKG_H 2 + #define PKG_H 3 + 4 + #include <stdint.h> 5 + #include <stddef.h> 6 + #include <stdbool.h> 7 + 8 + typedef enum { 9 + PKG_OK = 0, 10 + PKG_OUT_OF_MEMORY = -1, 11 + PKG_INVALID_LOCKFILE = -2, 12 + PKG_IO_ERROR = -3, 13 + PKG_NETWORK_ERROR = -4, 14 + PKG_CACHE_ERROR = -5, 15 + PKG_EXTRACT_ERROR = -6, 16 + PKG_RESOLVE_ERROR = -7, 17 + PKG_INVALID_ARGUMENT = -8, 18 + PKG_NOT_FOUND = -9, 19 + PKG_INTEGRITY_MISMATCH = -10, 20 + } pkg_error_t; 21 + 22 + typedef enum { 23 + PKG_PHASE_RESOLVING = 0, 24 + PKG_PHASE_FETCHING = 1, 25 + PKG_PHASE_EXTRACTING = 2, 26 + PKG_PHASE_LINKING = 3, 27 + PKG_PHASE_CACHING = 4, 28 + PKG_PHASE_POSTINSTALL = 5, 29 + } pkg_phase_t; 30 + 31 + typedef void (*pkg_progress_cb)( 32 + void *user_data, 33 + pkg_phase_t phase, 34 + uint32_t current, 35 + uint32_t total, 36 + const char *message 37 + ); 38 + 39 + typedef struct { 40 + const char *cache_dir; 41 + const char *registry_url; 42 + uint32_t max_connections; 43 + pkg_progress_cb progress_callback; 44 + void *user_data; 45 + bool verbose; 46 + } pkg_options_t; 47 + 48 + typedef struct pkg_context pkg_context_t; 49 + 50 + const char *pkg_error_string(const pkg_context_t *ctx); 51 + 52 + pkg_context_t *pkg_init(const pkg_options_t *options); 53 + 54 + pkg_error_t pkg_install( 55 + pkg_context_t *ctx, 56 + const char *package_json_path, 57 + const char *lockfile_path, 58 + const char *node_modules_path 59 + ); 60 + 61 + pkg_error_t pkg_resolve_and_install( 62 + pkg_context_t *ctx, 63 + const char *package_json_path, 64 + const char *lockfile_path, 65 + const char *node_modules_path 66 + ); 67 + 68 + pkg_error_t pkg_add( 69 + pkg_context_t *ctx, 70 + const char *package_json_path, 71 + const char *package_spec, 72 + bool dev 73 + ); 74 + 75 + pkg_error_t pkg_remove( 76 + pkg_context_t *ctx, 77 + const char *package_json_path, 78 + const char *package_name 79 + ); 80 + 81 + void pkg_free(pkg_context_t *ctx); 82 + void pkg_cache_sync(pkg_context_t *ctx); 83 + 84 + typedef struct { 85 + uint64_t total_size; 86 + uint64_t db_size; 87 + uint32_t package_count; 88 + } pkg_cache_stats_t; 89 + 90 + pkg_error_t pkg_cache_stats(pkg_context_t *ctx, pkg_cache_stats_t *out); 91 + 92 + int32_t pkg_cache_prune(pkg_context_t *ctx, uint32_t max_age_days); 93 + 94 + typedef struct { 95 + uint32_t package_count; 96 + uint32_t cache_hits; 97 + uint32_t cache_misses; 98 + uint32_t files_linked; 99 + uint32_t files_copied; 100 + uint32_t packages_installed; 101 + uint32_t packages_skipped; 102 + uint64_t elapsed_ms; 103 + } pkg_install_result_t; 104 + 105 + typedef struct { 106 + const char *name; 107 + const char *version; 108 + bool direct; 109 + } pkg_added_package_t; 110 + 111 + typedef struct { 112 + const char *name; 113 + const char *script; 114 + } pkg_lifecycle_script_t; 115 + 116 + uint32_t pkg_get_added_count(const pkg_context_t *ctx); 117 + 118 + pkg_error_t pkg_discover_lifecycle_scripts( 119 + pkg_context_t *ctx, 120 + const char *node_modules_path 121 + ); 122 + 123 + uint32_t pkg_get_lifecycle_script_count(const pkg_context_t *ctx); 124 + 125 + pkg_error_t pkg_get_lifecycle_script( 126 + const pkg_context_t *ctx, 127 + uint32_t index, 128 + pkg_lifecycle_script_t *out 129 + ); 130 + 131 + pkg_error_t pkg_run_postinstall( 132 + pkg_context_t *ctx, 133 + const char *node_modules_path, 134 + const char **package_names, 135 + uint32_t count 136 + ); 137 + 138 + pkg_error_t pkg_add_trusted_dependencies( 139 + const char *package_json_path, 140 + const char **package_names, 141 + uint32_t count 142 + ); 143 + 144 + pkg_error_t pkg_get_install_result( 145 + pkg_context_t *ctx, 146 + pkg_install_result_t *out 147 + ); 148 + 149 + pkg_error_t pkg_get_added_package( 150 + const pkg_context_t *ctx, 151 + uint32_t index, 152 + pkg_added_package_t *out 153 + ); 154 + 155 + int pkg_get_bin_path( 156 + const char *node_modules_path, 157 + const char *bin_name, 158 + char *out_path, 159 + size_t out_path_len 160 + ); 161 + 162 + typedef void (*pkg_bin_callback)( 163 + const char *name, 164 + void *user_data 165 + ); 166 + 167 + int pkg_list_bins( 168 + const char *node_modules_path, 169 + pkg_bin_callback callback, 170 + void *user_data 171 + ); 172 + 173 + int pkg_list_package_bins( 174 + const char *node_modules_path, 175 + const char *package_name, 176 + pkg_bin_callback callback, 177 + void *user_data 178 + ); 179 + 180 + int pkg_get_script( 181 + const char *package_json_path, 182 + const char *script_name, 183 + char *out_script, 184 + size_t out_script_len 185 + ); 186 + 187 + typedef struct { 188 + int exit_code; 189 + int signal; 190 + } pkg_script_result_t; 191 + 192 + pkg_error_t pkg_run_script( 193 + const char *package_json_path, 194 + const char *script_name, 195 + const char *node_modules_path, 196 + const char *extra_args, 197 + pkg_script_result_t *result 198 + ); 199 + 200 + typedef void (*pkg_script_callback)( 201 + const char *name, 202 + const char *command, 203 + void *user_data 204 + ); 205 + 206 + int pkg_list_scripts( 207 + const char *package_json_path, 208 + pkg_script_callback callback, 209 + void *user_data 210 + ); 211 + 212 + typedef struct { 213 + uint8_t peer: 1; 214 + uint8_t dev: 1; 215 + uint8_t optional: 1; 216 + uint8_t direct: 1; 217 + uint8_t _reserved: 4; 218 + } pkg_dep_type_t; 219 + 220 + typedef void (*pkg_why_callback)( 221 + const char *name, 222 + const char *version, 223 + const char *constraint, 224 + pkg_dep_type_t dep_type, 225 + void *user_data 226 + ); 227 + 228 + typedef struct { 229 + char target_version[64]; 230 + bool found; 231 + bool is_peer; 232 + bool is_dev; 233 + bool is_direct; 234 + } pkg_why_info_t; 235 + 236 + int pkg_why_info( 237 + const char *lockfile_path, 238 + const char *package_name, 239 + pkg_why_info_t *out 240 + ); 241 + 242 + int pkg_why( 243 + const char *lockfile_path, 244 + const char *package_name, 245 + pkg_why_callback callback, 246 + void *user_data 247 + ); 248 + 249 + typedef struct { 250 + const char *name; 251 + const char *version; 252 + const char *description; 253 + const char *license; 254 + const char *homepage; 255 + const char *tarball; 256 + const char *shasum; 257 + const char *integrity; 258 + const char *keywords; 259 + const char *published; 260 + uint32_t dep_count; 261 + uint32_t version_count; 262 + uint64_t unpacked_size; 263 + } pkg_info_t; 264 + 265 + typedef struct { 266 + const char *tag; 267 + const char *version; 268 + } pkg_dist_tag_t; 269 + 270 + typedef struct { 271 + const char *name; 272 + const char *email; 273 + } pkg_maintainer_t; 274 + 275 + pkg_error_t pkg_info( 276 + pkg_context_t *ctx, 277 + const char *package_spec, 278 + pkg_info_t *out 279 + ); 280 + 281 + uint32_t pkg_info_dist_tag_count(const pkg_context_t *ctx); 282 + pkg_error_t pkg_info_get_dist_tag(const pkg_context_t *ctx, uint32_t index, pkg_dist_tag_t *out); 283 + 284 + uint32_t pkg_info_maintainer_count(const pkg_context_t *ctx); 285 + pkg_error_t pkg_info_get_maintainer(const pkg_context_t *ctx, uint32_t index, pkg_maintainer_t *out); 286 + 287 + typedef struct { 288 + const char *name; 289 + const char *version; 290 + } pkg_dependency_t; 291 + 292 + uint32_t pkg_info_dependency_count(const pkg_context_t *ctx); 293 + pkg_error_t pkg_info_get_dependency(const pkg_context_t *ctx, uint32_t index, pkg_dependency_t *out); 294 + 295 + pkg_error_t pkg_exec_temp( 296 + pkg_context_t *ctx, 297 + const char *package_spec, 298 + char *out_bin_path, 299 + size_t out_bin_path_len 300 + ); 301 + 302 + pkg_error_t pkg_add_global( 303 + pkg_context_t *ctx, 304 + const char *package_spec 305 + ); 306 + 307 + pkg_error_t pkg_remove_global( 308 + pkg_context_t *ctx, 309 + const char *package_name 310 + ); 311 + 312 + typedef void (*pkg_global_list_callback)( 313 + const char *name, 314 + const char *version, 315 + void *user_data 316 + ); 317 + 318 + pkg_error_t pkg_list_global( 319 + pkg_context_t *ctx, 320 + pkg_global_list_callback callback, 321 + void *user_data 322 + ); 323 + 324 + #endif
+315
include/progress.h
··· 1 + #ifndef PROGRESS_H 2 + #define PROGRESS_H 3 + 4 + #include <stdio.h> 5 + #include <stdbool.h> 6 + #include <stdint.h> 7 + #include <stdatomic.h> 8 + #include <stdlib.h> 9 + #include <string.h> 10 + #include <time.h> 11 + 12 + #ifdef _WIN32 13 + #include <windows.h> 14 + #include <io.h> 15 + #define PROGRESS_ISATTY(fd) _isatty(fd) 16 + #define PROGRESS_FILENO(f) _fileno(f) 17 + #else 18 + #include <unistd.h> 19 + #include <sys/ioctl.h> 20 + #define PROGRESS_ISATTY(fd) isatty(fd) 21 + #define PROGRESS_FILENO(f) fileno(f) 22 + #endif 23 + 24 + #define PROGRESS_MSG_SIZE 256 25 + 26 + #ifdef __has_include 27 + #if __has_include(<pthread.h>) 28 + #include <pthread.h> 29 + #define PROGRESS_HAS_PTHREADS 1 30 + #endif 31 + #endif 32 + 33 + typedef struct { 34 + #if defined(PROGRESS_HAS_PTHREADS) 35 + pthread_mutex_t mtx; 36 + #elif defined(_WIN32) 37 + CRITICAL_SECTION cs; 38 + #else 39 + int dummy; 40 + #endif 41 + } progress_mutex_t; 42 + 43 + typedef struct { 44 + FILE *terminal; 45 + bool is_windows_terminal; 46 + bool supports_ansi; 47 + bool dont_print_on_dumb; 48 + uint64_t start_time_ns; 49 + uint64_t prev_refresh_ns; 50 + uint64_t refresh_rate_ns; 51 + uint64_t initial_delay_ns; 52 + bool done; 53 + bool timer_valid; 54 + size_t columns_written; 55 + progress_mutex_t mutex; 56 + char buffer[PROGRESS_MSG_SIZE]; 57 + char msg_buffer[PROGRESS_MSG_SIZE]; 58 + } progress_t; 59 + 60 + static inline void progress_mutex_init(progress_mutex_t *m) { 61 + #if defined(PROGRESS_HAS_PTHREADS) 62 + pthread_mutex_init(&m->mtx, NULL); 63 + #elif defined(_WIN32) 64 + InitializeCriticalSection(&m->cs); 65 + #else 66 + (void)m; 67 + #endif 68 + } 69 + 70 + static inline void progress_mutex_destroy(progress_mutex_t *m) { 71 + #if defined(PROGRESS_HAS_PTHREADS) 72 + pthread_mutex_destroy(&m->mtx); 73 + #elif defined(_WIN32) 74 + DeleteCriticalSection(&m->cs); 75 + #else 76 + (void)m; 77 + #endif 78 + } 79 + 80 + static inline void progress_mutex_lock(progress_mutex_t *m) { 81 + #if defined(PROGRESS_HAS_PTHREADS) 82 + pthread_mutex_lock(&m->mtx); 83 + #elif defined(_WIN32) 84 + EnterCriticalSection(&m->cs); 85 + #else 86 + (void)m; 87 + #endif 88 + } 89 + 90 + static inline void progress_mutex_unlock(progress_mutex_t *m) { 91 + #if defined(PROGRESS_HAS_PTHREADS) 92 + pthread_mutex_unlock(&m->mtx); 93 + #elif defined(_WIN32) 94 + LeaveCriticalSection(&m->cs); 95 + #else 96 + (void)m; 97 + #endif 98 + } 99 + 100 + static inline bool progress_mutex_trylock(progress_mutex_t *m) { 101 + #if defined(PROGRESS_HAS_PTHREADS) 102 + return pthread_mutex_trylock(&m->mtx) == 0; 103 + #elif defined(_WIN32) 104 + return TryEnterCriticalSection(&m->cs) != 0; 105 + #else 106 + (void)m; 107 + return true; 108 + #endif 109 + } 110 + 111 + static inline uint64_t progress_now_ns(void) { 112 + struct timespec ts; 113 + #if defined(CLOCK_MONOTONIC) 114 + if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { 115 + return (uint64_t)ts.tv_sec * 1000000000ULL + (uint64_t)ts.tv_nsec; 116 + } 117 + #elif defined(_WIN32) 118 + static LARGE_INTEGER freq = {0}; 119 + if (freq.QuadPart == 0) { 120 + QueryPerformanceFrequency(&freq); 121 + } 122 + LARGE_INTEGER counter; 123 + QueryPerformanceCounter(&counter); 124 + return (uint64_t)(counter.QuadPart * 1000000000ULL / freq.QuadPart); 125 + #endif 126 + if (clock_gettime(CLOCK_REALTIME, &ts) == 0) { 127 + return (uint64_t)ts.tv_sec * 1000000000ULL + (uint64_t)ts.tv_nsec; 128 + } 129 + return 0; 130 + } 131 + 132 + static inline bool progress_detect_ansi(FILE *f) { 133 + if (!f) return false; 134 + 135 + int fd = PROGRESS_FILENO(f); 136 + if (!PROGRESS_ISATTY(fd)) return false; 137 + 138 + #ifdef _WIN32 139 + HANDLE h = (HANDLE)_get_osfhandle(fd); 140 + DWORD mode; 141 + if (GetConsoleMode(h, &mode)) { 142 + if (SetConsoleMode(h, mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)) return true; 143 + } 144 + const char *term = getenv("TERM"); 145 + return term && strstr(term, "xterm") 146 + || term && strstr(term, "vt100") 147 + || term && strstr(term, "color") 148 + || term && strstr(term, "ansi"); 149 + #else 150 + const char *term = getenv("TERM"); 151 + if (!term) return false; 152 + if (strcmp(term, "dumb") == 0) return false; 153 + return true; 154 + #endif 155 + } 156 + 157 + static void progress_refresh_locked(progress_t *p); 158 + static void progress_clear_locked(progress_t *p, size_t *end); 159 + 160 + static inline void progress_start(progress_t *p, const char *message) { 161 + memset(p, 0, sizeof(*p)); 162 + progress_mutex_init(&p->mutex); 163 + 164 + FILE *f = stderr; 165 + int fd = PROGRESS_FILENO(f); 166 + 167 + if (PROGRESS_ISATTY(fd)) { 168 + p->terminal = f; 169 + p->supports_ansi = progress_detect_ansi(f); 170 + #ifdef _WIN32 171 + if (!p->supports_ansi) p->is_windows_terminal = true; 172 + #endif 173 + } else { 174 + p->terminal = f; 175 + p->supports_ansi = false; 176 + p->is_windows_terminal = false; 177 + } 178 + 179 + if (message) { 180 + strncpy(p->msg_buffer, message, PROGRESS_MSG_SIZE - 1); 181 + p->msg_buffer[PROGRESS_MSG_SIZE - 1] = '\0'; 182 + } else p->msg_buffer[0] = '\0'; 183 + 184 + p->refresh_rate_ns = 50 * 1000000ULL; 185 + p->initial_delay_ns = 500 * 1000000ULL; 186 + p->start_time_ns = progress_now_ns(); 187 + p->prev_refresh_ns = 0; 188 + p->timer_valid = (p->start_time_ns != 0); 189 + 190 + p->columns_written = 0; 191 + p->done = false; 192 + 193 + progress_refresh_locked(p); 194 + } 195 + 196 + static inline void progress_maybe_refresh(progress_t *p) { 197 + if (!p->timer_valid) return; 198 + if (!progress_mutex_trylock(&p->mutex)) return; 199 + 200 + uint64_t now = progress_now_ns(); 201 + uint64_t elapsed = now - p->start_time_ns; 202 + 203 + if (elapsed < p->initial_delay_ns) { 204 + progress_mutex_unlock(&p->mutex); 205 + return; 206 + } 207 + 208 + if (now < p->prev_refresh_ns || (now - p->prev_refresh_ns) < p->refresh_rate_ns) { 209 + progress_mutex_unlock(&p->mutex); 210 + return; 211 + } 212 + 213 + progress_refresh_locked(p); 214 + progress_mutex_unlock(&p->mutex); 215 + } 216 + 217 + static inline void progress_update(progress_t *p, const char *message) { 218 + progress_mutex_lock(&p->mutex); 219 + if (message) { 220 + strncpy(p->msg_buffer, message, PROGRESS_MSG_SIZE - 1); 221 + p->msg_buffer[PROGRESS_MSG_SIZE - 1] = '\0'; 222 + } else p->msg_buffer[0] = '\0'; 223 + 224 + progress_mutex_unlock(&p->mutex); 225 + progress_maybe_refresh(p); 226 + } 227 + 228 + static inline void progress_refresh(progress_t *p) { 229 + if (!progress_mutex_trylock(&p->mutex)) return; 230 + progress_refresh_locked(p); 231 + progress_mutex_unlock(&p->mutex); 232 + } 233 + 234 + static void progress_clear_locked(progress_t *p, size_t *end) { 235 + if (!p->terminal) return; 236 + 237 + size_t pos = *end; 238 + 239 + if (p->columns_written > 0) { 240 + if (p->supports_ansi) { 241 + int written = snprintf(p->buffer + pos, sizeof(p->buffer) - pos, "\x1b[%zuD\x1b[0K", p->columns_written); 242 + if (written > 0) pos += (size_t)written; 243 + } else if (p->is_windows_terminal) { 244 + #ifdef _WIN32 245 + HANDLE h = (HANDLE)_get_osfhandle(PROGRESS_FILENO(p->terminal)); 246 + CONSOLE_SCREEN_BUFFER_INFO info; 247 + if (GetConsoleScreenBufferInfo(h, &info)) { 248 + COORD cursor = { 249 + .X = (SHORT)(info.dwCursorPosition.X - (SHORT)p->columns_written), 250 + .Y = info.dwCursorPosition.Y 251 + }; 252 + if (cursor.X < 0) cursor.X = 0; 253 + 254 + DWORD fill_len = (DWORD)(info.dwSize.X - cursor.X); 255 + DWORD written; 256 + FillConsoleOutputAttribute(h, info.wAttributes, fill_len, cursor, &written); 257 + FillConsoleOutputCharacterA(h, ' ', fill_len, cursor, &written); 258 + SetConsoleCursorPosition(h, cursor); 259 + } 260 + #endif 261 + } else { 262 + if (pos < sizeof(p->buffer)) p->buffer[pos++] = '\n'; 263 + } 264 + p->columns_written = 0; 265 + } 266 + 267 + *end = pos; 268 + } 269 + 270 + static void progress_refresh_locked(progress_t *p) { 271 + bool is_dumb = !p->supports_ansi && !p->is_windows_terminal; 272 + if (is_dumb && p->dont_print_on_dumb) return; 273 + if (!p->terminal) return; 274 + 275 + size_t end = 0; 276 + progress_clear_locked(p, &end); 277 + 278 + if (!p->done) { 279 + if (p->msg_buffer[0]) { 280 + int written = snprintf(p->buffer + end, sizeof(p->buffer) - end, " %s", p->msg_buffer); 281 + if (written > 0) { 282 + size_t amt = ( 283 + (size_t)written < sizeof(p->buffer) - end) 284 + ? (size_t)written 285 + : sizeof(p->buffer) - end - 1; 286 + end += amt; 287 + p->columns_written = amt; 288 + } 289 + } 290 + } 291 + 292 + if (end > 0) { 293 + fwrite(p->buffer, 1, end, p->terminal); 294 + fflush(p->terminal); 295 + } 296 + 297 + p->prev_refresh_ns = progress_now_ns(); 298 + } 299 + 300 + static inline void progress_stop(progress_t *p) { 301 + progress_mutex_lock(&p->mutex); 302 + p->done = true; 303 + 304 + size_t end = 0; 305 + progress_clear_locked(p, &end); 306 + if (end > 0 && p->terminal) { 307 + fwrite(p->buffer, 1, end, p->terminal); 308 + fflush(p->terminal); 309 + } 310 + 311 + progress_mutex_unlock(&p->mutex); 312 + progress_mutex_destroy(&p->mutex); 313 + } 314 + 315 + #endif
+4
include/utils.h
··· 5 5 #include <stdlib.h> 6 6 #include <stdint.h> 7 7 8 + const char *ant_semver(void); 8 9 uint64_t hash_key(const char *key, size_t len); 10 + 9 11 int is_typescript_file(const char *filename); 10 12 int ant_version(void *argtable[]); 13 + 14 + void *try_oom(size_t size); 11 15 12 16 #endif
+13 -1
libant/scripts/bundle.sh
··· 18 18 LIBS=$(find "$BUILD_DIR" -name '*.a' \ 19 19 ! -name 'libant.a' \ 20 20 ! -name 'libant-lto.a' \ 21 + ! -name 'libpkg.a' \ 21 22 ! -path '*/oxc-target/release/deps/*' \ 22 23 ! -path '*/.external/*' \ 23 - 2>/dev/null | grep -v "$EXCLUDE" | sort -u) 24 + 2>/dev/null | grep -E -v "$EXCLUDE" | sort -u) 24 25 25 26 cd "$TMPDIR" 26 27 for lib in $LIBS; do ··· 60 61 if [ -f "$BUILD_DIR/libant.h" ]; then 61 62 cp "$BUILD_DIR/libant.h" "$DIST_DIR/ant.h" 62 63 echo "Created: $DIST_DIR/ant.h" 64 + fi 65 + 66 + pkg_lib_path=$(find "$BUILD_DIR" -name 'libpkg.a' -print | head -n 1) 67 + if [ -n "$pkg_lib_path" ] && [ -f "$pkg_lib_path" ]; then 68 + cp "$pkg_lib_path" "$DIST_DIR/libpkg.a" 69 + echo "Created: $DIST_DIR/libpkg.a ($(du -h "$pkg_lib_path" | cut -f1))" 70 + fi 71 + 72 + if [ -f "$ROOT_DIR/include/pkg.h" ]; then 73 + cp "$ROOT_DIR/include/pkg.h" "$DIST_DIR/pkg.h" 74 + echo "Created: $DIST_DIR/pkg.h" 63 75 fi 64 76 65 77 echo ""
+10 -8
maidfile.toml
··· 27 27 [tasks.setup] 28 28 script = ['meson subprojects download', ''' 29 29 bash -c 'CC="ccache $(which clang)" \ 30 + CC_LD="$(which ld64.lld)" \ 30 31 meson setup build --wipe' 31 32 '''] 32 33 ··· 38 39 39 40 [tasks.install] 40 41 script = ["maid build -q", "maid strip", ''' 41 - bash -c 'which ant && cp ./build/ant "$(which ant)" || 42 - { mkdir -p ~/.local/bin && cp ./build/ant ~/.local/bin/; }' 42 + bash -c 'if which ant >/dev/null 2>&1; then 43 + dir=$(dirname "$(which ant)") 44 + cp ./build/ant "$dir/ant" 45 + ln -sf "$dir/ant" "$dir/antx" 46 + else 47 + mkdir -p ~/.ant/bin 48 + cp ./build/ant ~/.ant/bin/ 49 + ln -sf ~/.ant/bin/ant ~/.ant/bin/antx 50 + fi' 43 51 '''] 44 52 45 53 [tasks.debug] ··· 47 55 bash -c 'CC="ccache $(which clang)" \ 48 56 meson setup build --wipe --buildtype=debug -Doptimization=0 -Db_lto=false -Dstrip=false -Db_lundef=false -Dunity=off' 49 57 '''] 50 - 51 - [tasks.embed_example] 52 - script = [''' 53 - bash -c 'CC="ccache $(which clang)" \ 54 - meson setup build -Dbuild_examples=true 2>/dev/null || true' 55 - ''', "meson compile -C build", "./build/embed_example"]
+31 -12
meson.build
··· 34 34 'src/runtime.c', 35 35 'src/snapshot.c', 36 36 'src/esm/remote.c', 37 + 'src/cli/pkg.c', 37 38 ) + files(module_files) 38 39 39 40 include = include_directories('include') ··· 51 52 install: true 52 53 ) 53 54 55 + pkg_lib = custom_target( 56 + 'pkg_zig', 57 + output: 'libpkg.a', 58 + command: [ 59 + 'sh', '-c', 60 + '"$ZIG" build --build-file "$PKG_ZIG_DIR/build.zig" --prefix "$PKG_BUILD_DIR" && cp "$PKG_BUILD_DIR/lib/$PKG_LIB_NAME" "@OUTPUT@"' 61 + ], 62 + env: { 63 + 'ZIG': zig.full_path(), 64 + 'ANT_VERSION': ant_version, 65 + 66 + 'PKG_ZIG_DIR': pkg_zig_dir, 'PKG_BUILD_DIR': pkg_build_dir, 67 + 'LMDB_INCLUDE': lmdb_include_path, 'ZLIB_INCLUDE': zlib_include_path, 68 + 'LIBUV_INCLUDE': libuv_include_path, 'YYJSON_INCLUDE': yyjson_include_path, 69 + 70 + 'PKG_TARGET': host_machine.cpu_family() + '-' + host_machine.system(), 71 + 'PKG_LIB_NAME': host_machine.system() == 'windows' ? 'pkg.lib' : 'libpkg.a', 72 + }, 73 + build_by_default: true, 74 + build_always_stale: true 75 + ) 76 + 77 + pkg_dep = declare_dependency( 78 + link_with: [pkg_lib], 79 + include_directories: include, 80 + dependencies: [lmdb_dep, tlsuv_dep, libuv_dep, nghttp2_dep] 81 + ) 82 + 54 83 libant_dep = declare_dependency( 55 84 link_with: libant, 56 85 include_directories: [ 57 86 include, version_include, build_include 58 87 ], 59 - dependencies: ant_deps + [oxc_dep] 88 + dependencies: ant_deps + [oxc_dep, pkg_dep] 60 89 ) 61 90 62 91 link_args = [] ··· 70 99 include_directories: [strip_include], 71 100 dependencies: libant_dep, 72 101 link_args: link_args 73 - ) 74 - 75 - if get_option('build_examples') 76 - executable( 77 - 'embed_example', 78 - files('examples/embed/embed.c'), 79 - include_directories: [include, build_include], 80 - link_with: libant, 81 - dependencies: ant_deps + [oxc_dep] 82 - ) 83 - endif 102 + )
+55 -8
meson/deps/meson.build
··· 80 80 tlsuv_opts.append_compile_args('c', tlsuv_compile_args) 81 81 tlsuv_dep = cmake.subproject('tlsuv', options: tlsuv_opts).dependency('tlsuv') 82 82 83 - pcre2_dep = subproject('pcre2', default_options: ['warning_level=0']).get_variable('libpcre2_8') 84 83 uthash_dep = subproject('uthash').get_variable('uthash_dep') 85 84 yyjson_dep = subproject('yyjson').get_variable('yyjson_dep') 86 85 uuidv7_dep = subproject('uuidv7').get_variable('uuidv7_dep') 87 86 argtable3_dep = subproject('argtable3').get_variable('argtable3_dep') 88 87 minicoro_dep = subproject('minicoro').get_variable('minicoro_dep') 89 88 90 - zlib_dep = subproject('zlib-ng', 91 - default_options: ['b_lto=false', 'warning_level=0'] 92 - ).get_variable('zlib_ng_dep') 89 + lmdb_dep = subproject('lmdb', default_options: [ 90 + 'warning_level=0', 91 + 'programs=disabled' 92 + ]).get_variable('lmdb_dep') 93 + 94 + zlib_dep = subproject('zlib-ng', default_options: [ 95 + 'b_lto=false', 96 + 'warning_level=0' 97 + ]).get_variable('zlib_ng_dep') 98 + 99 + nghttp2_dep = subproject('nghttp2', default_options: [ 100 + 'warning_level=0' 101 + ]).get_variable('libnghttp2_dep') 93 102 94 103 libffi_dep = subproject('libffi', default_options: [ 95 104 'warning_level=0', 96 105 'tests=false' 97 106 ]).get_variable('ffi_dep') 107 + 108 + pcre2_dep = subproject('pcre2', default_options: [ 109 + 'warning_level=0', 110 + 'grep=false', 111 + 'test=false' 112 + ]).get_variable('libpcre2_8') 98 113 99 114 cargo = find_program('cargo', required: true) 100 115 cp = find_program('cp', required: true) ··· 116 131 output: oxc_output_name, 117 132 command: [ 118 133 'sh', '-c', 119 - cargo.full_path() + ' build --release' + rust_target_arg + ' ' + 120 - '--manifest-path ' + src_root / 'src' / 'strip' / 'Cargo.toml' + ' ' + 121 - '--target-dir ' + meson.project_build_root() / 'oxc-target' + 122 - ' && ' + cp.full_path() + ' ' + oxc_release_dir / oxc_lib_name + ' @OUTPUT@' 134 + '"' + cargo.full_path() + '"' + ' build --release' + rust_target_arg + ' ' + 135 + '--manifest-path "' + src_root / 'src' / 'strip' / 'Cargo.toml' + '" ' + 136 + '--target-dir "' + meson.project_build_root() / 'oxc-target' + '"' + 137 + ' && "' + cp.full_path() + '" "' + oxc_release_dir / oxc_lib_name + '" @OUTPUT@' 123 138 ], 124 139 build_by_default: true 125 140 ) 126 141 127 142 oxc_dep = declare_dependency(link_with: oxc_lib) 143 + zig = find_program('zig', required: true) 144 + 145 + deps_info = { 146 + 'lmdb': { 147 + 'dir': 'openldap-LMDB_0.9.33', 148 + 'inc': 'libraries/liblmdb', 149 + }, 150 + 'yyjson': { 151 + 'dir': 'yyjson-0.12.0', 152 + 'inc': 'src', 153 + }, 154 + 'libuv': { 155 + 'dir': 'libuv-v1.51.0', 156 + 'inc': 'include', 157 + }, 158 + 'zlib-ng': { 159 + 'dir': 'zlib-ng-2.3.2', 160 + 'inc': '', 161 + 'build_dir': true, 162 + }, 163 + } 164 + 165 + subprojects_src = src_root / 'vendor' 166 + subprojects_build = meson.project_build_root() / 'vendor' 167 + 168 + lmdb_include_path = subprojects_src / deps_info['lmdb']['dir'] / deps_info['lmdb']['inc'] 169 + yyjson_include_path = subprojects_src / deps_info['yyjson']['dir'] / deps_info['yyjson']['inc'] 170 + libuv_include_path = subprojects_src / deps_info['libuv']['dir'] / deps_info['libuv']['inc'] 171 + zlib_include_path = subprojects_build / deps_info['zlib-ng']['dir'] / deps_info['zlib-ng']['inc'] 172 + 173 + pkg_zig_dir = src_root / 'src' / 'pkg' 174 + pkg_build_dir = meson.current_build_dir() 128 175 129 176 ant_deps = [ 130 177 libffi_dep, uuid_dep,
+1 -1
meson/version/meson.build
··· 4 4 timestamp_opt = get_option('build_timestamp') 5 5 timestamp = timestamp_opt != '' ? timestamp_opt : run_command('date', '+%s', check: true).stdout().strip() 6 6 7 - ant_version = '0.5.0.' + timestamp + '-g' + git_hash 7 + ant_version = '0.5.1.' + timestamp + '-g' + git_hash 8 8 cmd_cc = meson.get_compiler('c') 9 9 10 10 target_triple = run_command(cmd_cc.cmd_array(), '-dumpmachine', check: true).stdout().strip()
-1
meson_options.txt
··· 2 2 option('build_timestamp', type: 'string', value: '', description: 'build timestamp (defaults to current time if empty)') 3 3 option('tls_library', type: 'combo', choices: ['openssl', 'mbedtls'], value: 'openssl', description: 'TLS library to use') 4 4 option('deps_prefix_cmake', type: 'string', value: '', description: 'prefix path for finding dependencies in cmake subprojects') 5 - option('build_examples', type: 'boolean', value: false, description: 'build embedding examples')
+1504
src/cli/pkg.c
··· 1 + #include <compat.h> // IWYU pragma: keep 2 + 3 + #include <pkg.h> 4 + #include <cli/pkg.h> 5 + #include <stdio.h> 6 + #include <stdlib.h> 7 + #include <string.h> 8 + #include <unistd.h> 9 + #include <sys/stat.h> 10 + #include <dirent.h> 11 + #include <time.h> 12 + #include <argtable3.h> 13 + #include <yyjson.h> 14 + 15 + #include "utils.h" 16 + #include "config.h" 17 + #include "progress.h" 18 + 19 + extern bool io_no_color; 20 + bool pkg_verbose = false; 21 + 22 + #define C(color) (io_no_color ? "" : (color)) 23 + #define C_RESET C("\x1b[0m") 24 + #define C_BOLD C("\x1b[1m") 25 + #define C_DIM C("\x1b[2m") 26 + #define C_UL C("\x1b[4m") 27 + #define C_UL_OFF C("\x1b[24m") 28 + #define C_GREEN C("\x1b[32m") 29 + #define C_YELLOW C("\x1b[33m") 30 + #define C_BLUE C("\x1b[34m") 31 + #define C_MAGENTA C("\x1b[35m") 32 + #define C_CYAN C("\x1b[36m") 33 + #define C_WHITE C("\x1b[37m") 34 + #define C_RED C("\x1b[31m") 35 + 36 + static void progress_callback(void *user_data, pkg_phase_t phase, uint32_t current, uint32_t total, const char *message) { 37 + progress_t *progress = (progress_t *)user_data; 38 + if (!progress || !message || !message[0]) return; 39 + 40 + const char *icon; 41 + switch (phase) { 42 + case PKG_PHASE_RESOLVING: icon = "๐Ÿ”"; break; 43 + case PKG_PHASE_FETCHING: icon = "๐Ÿšš"; break; 44 + case PKG_PHASE_EXTRACTING: icon = "๐Ÿ“ฆ"; break; 45 + case PKG_PHASE_LINKING: icon = "๐Ÿ”—"; break; 46 + case PKG_PHASE_CACHING: icon = "๐Ÿ’พ"; break; 47 + case PKG_PHASE_POSTINSTALL: icon = "โš™๏ธ "; break; 48 + default: icon = "๐Ÿ“ฆ"; break; 49 + } 50 + 51 + char msg[PROGRESS_MSG_SIZE]; 52 + if (total > 0) snprintf(msg, sizeof(msg), "%s %s [%u/%u]", icon, message, current, total); 53 + else if (current > 0) snprintf(msg, sizeof(msg), "%s %s [%u]", icon, message, current); 54 + else snprintf(msg, sizeof(msg), "%s %s", icon, message); 55 + 56 + progress_update(progress, msg); 57 + } 58 + 59 + static void print_added_packages(pkg_context_t *ctx) { 60 + uint32_t count = pkg_get_added_count(ctx); 61 + uint32_t printed = 0; 62 + if (count > 0) printf("\n"); 63 + 64 + for (uint32_t i = 0; i < count; i++) { 65 + pkg_added_package_t pkg; 66 + if (pkg_get_added_package(ctx, i, &pkg) == PKG_OK && pkg.direct) { 67 + printf("%s+%s %s%s%s@%s%s%s\n", 68 + C_GREEN, C_RESET, 69 + C_BOLD, pkg.name, C_RESET, 70 + C_DIM, pkg.version, C_RESET 71 + ); printed++; 72 + } 73 + } 74 + 75 + if (printed > 0) printf("\n"); 76 + } 77 + 78 + static uint64_t timespec_diff_ms(struct timespec *start, struct timespec *end) { 79 + int64_t sec = end->tv_sec - start->tv_sec; 80 + int64_t nsec = end->tv_nsec - start->tv_nsec; 81 + if (nsec < 0) { sec--; nsec += 1000000000; } 82 + return (uint64_t)sec * 1000 + (uint64_t)nsec / 1000000; 83 + } 84 + 85 + static void print_elapsed(uint64_t elapsed_ms) { 86 + fputs(C_BOLD, stdout); 87 + if (elapsed_ms < 1000) { 88 + printf("%llums", (unsigned long long)elapsed_ms); 89 + } else printf("%.2fs", (double)elapsed_ms / 1000.0); 90 + fputs(C_RESET, stdout); 91 + } 92 + 93 + static void print_install_header(const char *cmd) { 94 + const char *version = ant_semver(); 95 + 96 + printf("%sant %s%s v%s %s(%s)%s\n", 97 + C_BOLD, cmd, C_RESET, version, 98 + C_DIM, ANT_GIT_HASH, C_RESET 99 + ); 100 + } 101 + 102 + static void print_bin_callback(const char *name, void *user_data) { 103 + (void)user_data; 104 + printf(" %s-%s %s\n", C_DIM, C_RESET, name); 105 + } 106 + 107 + static void prompt_with_default(const char *prompt, const char *def, char *buf, size_t buf_size) { 108 + if (def && def[0]) { 109 + printf("%s%s%s %s(%s)%s: ", C_CYAN, prompt, C_RESET, C_DIM, def, C_RESET); 110 + } else printf("%s%s%s: ", C_CYAN, prompt, C_RESET); 111 + fflush(stdout); 112 + 113 + if (fgets(buf, (int)buf_size, stdin)) { 114 + size_t len = strlen(buf); 115 + if (len > 0 && buf[len - 1] == '\n') buf[len - 1] = '\0'; 116 + } 117 + 118 + if (buf[0] == '\0' && def) { 119 + strncpy(buf, def, buf_size - 1); 120 + buf[buf_size - 1] = '\0'; 121 + } 122 + } 123 + 124 + typedef struct { 125 + const char *target; 126 + int count; 127 + } why_ctx_t; 128 + 129 + static void print_why_callback(const char *name, const char *version, const char *constraint, pkg_dep_type_t dep_type, void *user_data) { 130 + why_ctx_t *ctx = (why_ctx_t *)user_data; 131 + 132 + if (strcmp(name, "package.json") == 0) { 133 + const char *type_str = dep_type.dev ? "devDependencies" : "dependencies"; 134 + printf(" %sโ””%s %s%s%s %s(%s)%s\n", 135 + C_DIM, C_RESET, 136 + C_GREEN, name, C_RESET, 137 + C_DIM, type_str, C_RESET); 138 + } else { 139 + const char *type_str = dep_type.peer ? "peer" : (dep_type.dev ? "dev" : (dep_type.optional ? "optional" : "")); 140 + if (type_str[0]) { 141 + printf(" %sโ””%s %s %s%s%s@%s%s%s %s\"%s\"%s\n", 142 + C_DIM, C_RESET, 143 + type_str, 144 + C_BOLD, name, C_RESET, 145 + C_DIM, version, C_RESET, 146 + C_CYAN, constraint, C_RESET); 147 + } else { 148 + printf(" %sโ””%s %s%s%s@%s%s%s %s\"%s\"%s\n", 149 + C_DIM, C_RESET, 150 + C_BOLD, name, C_RESET, 151 + C_DIM, version, C_RESET, 152 + C_CYAN, constraint, C_RESET); 153 + } 154 + } 155 + 156 + ctx->count++; 157 + } 158 + 159 + static void print_script(const char *name, const char *command, void *ud) { 160 + (void)ud; 161 + if (strlen(command) > 50) { 162 + printf(" %-15s %.47s...\n", name, command); 163 + } else { 164 + printf(" %-15s %s\n", name, command); 165 + } 166 + } 167 + 168 + static void print_bin_name(const char *name, void *ud) { 169 + (void)ud; 170 + printf(" %s\n", name); 171 + } 172 + 173 + bool pkg_script_exists(const char *package_json_path, const char *script_name) { 174 + char script_cmd[4096]; 175 + return pkg_get_script(package_json_path, script_name, script_cmd, sizeof(script_cmd)) >= 0; 176 + } 177 + 178 + static const char *get_global_dir(void) { 179 + static char global_dir[4096] = {0}; 180 + if (global_dir[0] == '\0') { 181 + const char *home = getenv("HOME"); 182 + if (home) snprintf(global_dir, sizeof(global_dir), "%s/.ant/pkg/global", home); 183 + } 184 + return global_dir; 185 + } 186 + 187 + static int cmd_add_global(const char *package_spec) { 188 + print_install_header("add -g"); 189 + 190 + progress_t progress; 191 + if (!pkg_verbose) progress_start(&progress, "๐Ÿ” Resolving [1/1]"); 192 + 193 + pkg_options_t opts = { 194 + .progress_callback = pkg_verbose ? NULL : progress_callback, 195 + .user_data = pkg_verbose ? NULL : &progress, 196 + .verbose = pkg_verbose 197 + }; 198 + pkg_context_t *ctx = pkg_init(&opts); 199 + if (!ctx) { 200 + if (!pkg_verbose) progress_stop(&progress); 201 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 202 + return EXIT_FAILURE; 203 + } 204 + 205 + pkg_error_t err = pkg_add_global(ctx, package_spec); 206 + if (!pkg_verbose) progress_stop(&progress); 207 + 208 + if (err != PKG_OK) { 209 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 210 + pkg_free(ctx); 211 + return EXIT_FAILURE; 212 + } 213 + 214 + pkg_install_result_t result; 215 + if (pkg_get_install_result(ctx, &result) == PKG_OK) { 216 + printf("\n%sinstalled globally%s %s%s%s\n", 217 + C_GREEN, C_RESET, C_BOLD, package_spec, C_RESET); 218 + printf(" %s(binaries linked to ~/.ant/bin)%s\n", C_DIM, C_RESET); 219 + printf("\n%s[%s", C_DIM, C_RESET); 220 + print_elapsed(result.elapsed_ms); 221 + printf("%s]%s done\n", C_DIM, C_RESET); 222 + } 223 + 224 + pkg_free(ctx); 225 + return EXIT_SUCCESS; 226 + } 227 + 228 + static int cmd_remove_global(const char *package_name) { 229 + print_install_header("remove -g"); 230 + 231 + progress_t progress; 232 + if (!pkg_verbose) progress_start(&progress, "๐Ÿ” Resolving"); 233 + 234 + pkg_options_t opts = { 235 + .progress_callback = pkg_verbose ? NULL : progress_callback, 236 + .user_data = pkg_verbose ? NULL : &progress, 237 + .verbose = pkg_verbose 238 + }; 239 + pkg_context_t *ctx = pkg_init(&opts); 240 + if (!ctx) { 241 + if (!pkg_verbose) progress_stop(&progress); 242 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 243 + return EXIT_FAILURE; 244 + } 245 + 246 + pkg_error_t err = pkg_remove_global(ctx, package_name); 247 + if (!pkg_verbose) progress_stop(&progress); 248 + 249 + if (err == PKG_NOT_FOUND) { 250 + printf("\nPackage '%s' not found in global dependencies\n", package_name); 251 + pkg_free(ctx); 252 + return EXIT_SUCCESS; 253 + } 254 + 255 + if (err != PKG_OK) { 256 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 257 + pkg_free(ctx); 258 + return EXIT_FAILURE; 259 + } 260 + 261 + printf("\n%s-%s Removed globally: %s%s%s\n", C_RED, C_RESET, C_BOLD, package_name, C_RESET); 262 + 263 + pkg_free(ctx); 264 + return EXIT_SUCCESS; 265 + } 266 + 267 + static int cmd_install(void) { 268 + print_install_header("install"); 269 + 270 + progress_t progress; 271 + 272 + if (!pkg_verbose) { 273 + progress_start(&progress, "๐Ÿ” Resolving [1/1]"); 274 + } 275 + 276 + pkg_options_t opts = { 277 + .progress_callback = pkg_verbose ? NULL : progress_callback, 278 + .user_data = pkg_verbose ? NULL : &progress, 279 + .verbose = pkg_verbose 280 + }; 281 + pkg_context_t *ctx = pkg_init(&opts); 282 + if (!ctx) { 283 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 284 + return EXIT_FAILURE; 285 + } 286 + 287 + struct stat st; 288 + bool needs_resolve = (stat("ant.lockb", &st) != 0); 289 + 290 + if (needs_resolve) { 291 + if (stat("package.json", &st) != 0) { 292 + if (!pkg_verbose) { progress_stop(&progress); } 293 + fprintf(stderr, "Error: No package.json found\n"); 294 + pkg_free(ctx); 295 + return EXIT_FAILURE; 296 + } 297 + 298 + pkg_error_t err = pkg_resolve_and_install(ctx, "package.json", "ant.lockb", "node_modules"); 299 + if (err != PKG_OK) { 300 + if (!pkg_verbose) { progress_stop(&progress); } 301 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 302 + pkg_free(ctx); 303 + return EXIT_FAILURE; 304 + } 305 + } else { 306 + pkg_error_t err = pkg_install(ctx, "package.json", "ant.lockb", "node_modules"); 307 + if (err != PKG_OK) { 308 + if (!pkg_verbose) { progress_stop(&progress); } 309 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 310 + pkg_free(ctx); 311 + return EXIT_FAILURE; 312 + } 313 + } 314 + 315 + if (!pkg_verbose) { 316 + progress_stop(&progress); 317 + 318 + } 319 + 320 + pkg_install_result_t result; 321 + if (pkg_get_install_result(ctx, &result) == PKG_OK) { 322 + if (result.packages_installed > 0) { 323 + print_added_packages(ctx); 324 + printf("%s%u%s package%s installed", 325 + C_GREEN, result.packages_installed, C_RESET, 326 + result.packages_installed == 1 ? "" : "s"); 327 + if (result.cache_hits > 0) { 328 + printf(" %s(%u cached)%s", C_DIM, result.cache_hits, C_RESET); 329 + } 330 + printf(" %s[%s", C_DIM, C_RESET); 331 + print_elapsed(result.elapsed_ms); 332 + printf("%s]%s\n", C_DIM, C_RESET); 333 + } else { 334 + printf("\n%sChecked%s %s%u%s installs across %s%u%s packages %s(no changes)%s %s[%s", 335 + C_DIM, C_RESET, 336 + C_GREEN, result.packages_installed + result.packages_skipped, C_RESET, 337 + C_GREEN, result.package_count, C_RESET, 338 + C_DIM, C_RESET, 339 + C_DIM, C_RESET); 340 + print_elapsed(result.elapsed_ms); 341 + printf("%s]%s\n", C_DIM, C_RESET); 342 + } 343 + } 344 + 345 + if (pkg_discover_lifecycle_scripts(ctx, "node_modules") == PKG_OK) { 346 + uint32_t script_count = pkg_get_lifecycle_script_count(ctx); 347 + if (script_count > 0) { 348 + printf("\n%s%u%s package%s need%s to run lifecycle scripts:\n", 349 + C_YELLOW, script_count, C_RESET, 350 + script_count == 1 ? "" : "s", 351 + script_count == 1 ? "s" : ""); 352 + 353 + for (uint32_t i = 0; i < script_count; i++) { 354 + pkg_lifecycle_script_t script; 355 + if (pkg_get_lifecycle_script(ctx, i, &script) == PKG_OK) { 356 + printf(" %sโ€ข%s %s%s%s %s(%s)%s\n", 357 + C_DIM, C_RESET, 358 + C_CYAN, script.name, C_RESET, 359 + C_DIM, script.script, C_RESET); 360 + } 361 + } 362 + 363 + printf("\nRun: %sant trust <pkg>%s or %sant trust --all%s\n", C_DIM, C_RESET, C_DIM, C_RESET); 364 + } 365 + } 366 + 367 + pkg_free(ctx); 368 + return EXIT_SUCCESS; 369 + } 370 + 371 + static int cmd_add(const char *package_spec, bool dev) { 372 + print_install_header(dev ? "add -D" : "add"); 373 + 374 + char pkg_name[256]; 375 + const char *at_pos = strchr(package_spec, '@'); 376 + if (at_pos == package_spec) { 377 + at_pos = strchr(package_spec + 1, '@'); 378 + } 379 + if (at_pos) { 380 + size_t name_len = (size_t)(at_pos - package_spec); 381 + if (name_len >= sizeof(pkg_name)) name_len = sizeof(pkg_name) - 1; 382 + memcpy(pkg_name, package_spec, name_len); 383 + pkg_name[name_len] = '\0'; 384 + } else { 385 + strncpy(pkg_name, package_spec, sizeof(pkg_name) - 1); 386 + pkg_name[sizeof(pkg_name) - 1] = '\0'; 387 + } 388 + 389 + progress_t progress; 390 + 391 + if (!pkg_verbose) { 392 + progress_start(&progress, "๐Ÿ” Resolving [1/1]"); 393 + } 394 + 395 + pkg_options_t opts = { 396 + .progress_callback = pkg_verbose ? NULL : progress_callback, 397 + .user_data = pkg_verbose ? NULL : &progress, 398 + .verbose = pkg_verbose 399 + }; 400 + pkg_context_t *ctx = pkg_init(&opts); 401 + if (!ctx) { 402 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 403 + return EXIT_FAILURE; 404 + } 405 + 406 + pkg_error_t err = pkg_add(ctx, "package.json", package_spec, dev); 407 + if (err != PKG_OK) { 408 + if (!pkg_verbose) { progress_stop(&progress); } 409 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 410 + pkg_free(ctx); 411 + return EXIT_FAILURE; 412 + } 413 + 414 + err = pkg_resolve_and_install(ctx, "package.json", "ant.lockb", "node_modules"); 415 + if (err != PKG_OK) { 416 + if (!pkg_verbose) { progress_stop(&progress); } 417 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 418 + pkg_free(ctx); 419 + return EXIT_FAILURE; 420 + } 421 + 422 + if (!pkg_verbose) progress_stop(&progress); 423 + 424 + pkg_added_package_t added_pkg = {0}; 425 + uint32_t added_count = pkg_get_added_count(ctx); 426 + for (uint32_t i = 0; i < added_count; i++) { 427 + pkg_added_package_t pkg; 428 + if (pkg_get_added_package(ctx, i, &pkg) == PKG_OK && pkg.direct) { 429 + if (strcmp(pkg.name, pkg_name) == 0) { 430 + added_pkg = pkg; break; 431 + } 432 + } 433 + } 434 + 435 + pkg_install_result_t result; 436 + if (pkg_get_install_result(ctx, &result) == PKG_OK) { 437 + printf("\n"); 438 + 439 + if (added_pkg.name) { 440 + int bin_count = pkg_list_package_bins("node_modules", added_pkg.name, NULL, NULL); 441 + 442 + if (bin_count > 0) { 443 + printf("%sinstalled%s %s%s@%s%s with binaries:\n", 444 + C_GREEN, C_RESET, 445 + C_BOLD, added_pkg.name, added_pkg.version, C_RESET); 446 + pkg_list_package_bins("node_modules", added_pkg.name, print_bin_callback, NULL); 447 + } else { 448 + printf("%sinstalled%s %s%s@%s%s\n", 449 + C_GREEN, C_RESET, 450 + C_BOLD, added_pkg.name, added_pkg.version, C_RESET); 451 + } 452 + } 453 + 454 + printf("\n%s[%s", C_DIM, C_RESET); 455 + print_elapsed(result.elapsed_ms); 456 + printf("%s]%s done\n", C_DIM, C_RESET); 457 + } 458 + 459 + pkg_free(ctx); 460 + return EXIT_SUCCESS; 461 + } 462 + 463 + static int cmd_remove(const char *package_name) { 464 + print_install_header("remove"); 465 + progress_t progress; 466 + 467 + if (!pkg_verbose) { 468 + progress_start(&progress, "๐Ÿ” Resolving"); 469 + } 470 + 471 + pkg_options_t opts = { 472 + .progress_callback = pkg_verbose ? NULL : progress_callback, 473 + .user_data = pkg_verbose ? NULL : &progress, 474 + .verbose = pkg_verbose 475 + }; 476 + pkg_context_t *ctx = pkg_init(&opts); 477 + if (!ctx) { 478 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 479 + return EXIT_FAILURE; 480 + } 481 + 482 + pkg_error_t err = pkg_remove(ctx, "package.json", package_name); 483 + if (err != PKG_OK && err != PKG_NOT_FOUND) { 484 + if (!pkg_verbose) { progress_stop(&progress); } 485 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 486 + pkg_free(ctx); 487 + return EXIT_FAILURE; 488 + } 489 + 490 + if (err == PKG_NOT_FOUND) { 491 + if (!pkg_verbose) { progress_stop(&progress); } 492 + printf("\n%s[%s", C_DIM, C_RESET); 493 + printf("%s0ms%s", C_BOLD, C_RESET); 494 + printf("%s]%s done\n", C_DIM, C_RESET); 495 + pkg_free(ctx); 496 + return EXIT_SUCCESS; 497 + } 498 + 499 + err = pkg_resolve_and_install(ctx, "package.json", "ant.lockb", "node_modules"); 500 + if (err != PKG_OK) { 501 + if (!pkg_verbose) { progress_stop(&progress); } 502 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 503 + pkg_free(ctx); 504 + return EXIT_FAILURE; 505 + } 506 + 507 + if (!pkg_verbose) { 508 + progress_stop(&progress); 509 + 510 + } 511 + 512 + pkg_install_result_t result; 513 + if (pkg_get_install_result(ctx, &result) == PKG_OK) { 514 + printf("\n%s%u%s package%s installed %s[%s", 515 + C_GREEN, result.packages_installed, C_RESET, 516 + result.packages_installed == 1 ? "" : "s", 517 + C_DIM, C_RESET); 518 + print_elapsed(result.elapsed_ms); 519 + printf("%s]%s\n", C_DIM, C_RESET); 520 + } 521 + 522 + printf("%s-%s Removed: %s%s%s\n", C_RED, C_RESET, C_BOLD, package_name, C_RESET); 523 + pkg_free(ctx); 524 + 525 + return EXIT_SUCCESS; 526 + } 527 + 528 + static int cmd_trust(const char **pkgs, int count, bool all) { 529 + print_install_header("trust"); 530 + 531 + struct timespec start_time; 532 + clock_gettime(CLOCK_MONOTONIC, &start_time); 533 + 534 + pkg_options_t opts = { .verbose = pkg_verbose }; 535 + pkg_context_t *ctx = pkg_init(&opts); 536 + if (!ctx) { 537 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 538 + return EXIT_FAILURE; 539 + } 540 + 541 + if (pkg_discover_lifecycle_scripts(ctx, "node_modules") != PKG_OK) { 542 + fprintf(stderr, "Error: Failed to scan node_modules\n"); 543 + pkg_free(ctx); 544 + return EXIT_FAILURE; 545 + } 546 + 547 + uint32_t script_count = pkg_get_lifecycle_script_count(ctx); 548 + if (script_count == 0) { 549 + printf("No packages need lifecycle scripts to run.\n"); 550 + pkg_free(ctx); 551 + return EXIT_SUCCESS; 552 + } 553 + 554 + const char **to_run = NULL; 555 + uint32_t to_run_count = 0; 556 + 557 + if (all) { 558 + to_run = try_oom(script_count * sizeof(char *)); 559 + if (to_run) { 560 + for (uint32_t i = 0; i < script_count; i++) { 561 + pkg_lifecycle_script_t script; 562 + if (pkg_get_lifecycle_script(ctx, i, &script) == PKG_OK) { 563 + to_run[to_run_count++] = script.name; 564 + } 565 + } 566 + } 567 + } else if (count > 0) { 568 + to_run = try_oom(count * sizeof(char *)); 569 + if (to_run) { 570 + for (int i = 0; i < count; i++) { 571 + bool found = false; 572 + for (uint32_t j = 0; j < script_count; j++) { 573 + pkg_lifecycle_script_t script; 574 + if (pkg_get_lifecycle_script(ctx, j, &script) == PKG_OK) { 575 + if (strcmp(pkgs[i], script.name) == 0) { 576 + to_run[to_run_count++] = script.name; 577 + found = true; break; 578 + } 579 + } 580 + } 581 + if (!found) fprintf(stderr, "Warning: %s has no pending lifecycle script\n", pkgs[i]); 582 + } 583 + } 584 + } else { 585 + printf("%s%u%s package%s with lifecycle scripts:\n", 586 + C_YELLOW, script_count, C_RESET, 587 + script_count == 1 ? "" : "s"); 588 + 589 + for (uint32_t i = 0; i < script_count; i++) { 590 + pkg_lifecycle_script_t script; 591 + if (pkg_get_lifecycle_script(ctx, i, &script) == PKG_OK) { 592 + printf(" %sโ€ข%s %s%s%s %s(%s)%s\n", 593 + C_DIM, C_RESET, 594 + C_CYAN, script.name, C_RESET, 595 + C_DIM, script.script, C_RESET); 596 + } 597 + } 598 + printf("\nRun: %sant trust <pkg>%s or %sant trust --all%s\n", C_DIM, C_RESET, C_DIM, C_RESET); 599 + pkg_free(ctx); 600 + return EXIT_SUCCESS; 601 + } 602 + 603 + if (to_run && to_run_count > 0) { 604 + if (pkg_verbose) { 605 + printf("[trust] adding %u packages to trustedDependencies\n", to_run_count); 606 + for (uint32_t i = 0; i < to_run_count; i++) { 607 + printf("[trust] %s\n", to_run[i]); 608 + } 609 + } 610 + pkg_error_t add_err = pkg_add_trusted_dependencies("package.json", to_run, to_run_count); 611 + if (add_err == PKG_OK) { 612 + printf("Added %s%u%s package%s to %strustedDependencies%s in package.json\n", 613 + C_GREEN, to_run_count, C_RESET, 614 + to_run_count == 1 ? "" : "s", 615 + C_BOLD, C_RESET); 616 + } else { 617 + if (pkg_verbose) printf("[trust] failed to add trustedDependencies: error %d\n", add_err); 618 + } 619 + 620 + printf("Running lifecycle scripts for %s%u%s package%s...\n", 621 + C_GREEN, to_run_count, C_RESET, 622 + to_run_count == 1 ? "" : "s"); 623 + pkg_run_postinstall(ctx, "node_modules", to_run, to_run_count); 624 + 625 + struct timespec end_time; 626 + clock_gettime(CLOCK_MONOTONIC, &end_time); 627 + uint64_t elapsed_ms = timespec_diff_ms(&start_time, &end_time); 628 + 629 + printf("\n%s%u%s package%s trusted %s[%s", 630 + C_GREEN, to_run_count, C_RESET, 631 + to_run_count == 1 ? "" : "s", 632 + C_DIM, C_RESET); 633 + print_elapsed(elapsed_ms); 634 + printf("%s]%s\n", C_DIM, C_RESET); 635 + free((void *)to_run); 636 + } 637 + 638 + pkg_free(ctx); 639 + return EXIT_SUCCESS; 640 + } 641 + 642 + static int cmd_init(void) { 643 + FILE *fp = fopen("package.json", "r"); 644 + if (fp) { 645 + fclose(fp); 646 + fprintf(stderr, "Error: package.json already exists\n"); 647 + return EXIT_FAILURE; 648 + } 649 + 650 + char cwd[PATH_MAX]; 651 + const char *default_name = "my-project"; 652 + if (getcwd(cwd, sizeof(cwd))) { 653 + char *base = strrchr(cwd, '/'); 654 + if (base && base[1]) default_name = base + 1; 655 + } 656 + 657 + bool interactive = isatty(fileno(stdin)); 658 + 659 + char name[256] = {0}; 660 + char version[64] = {0}; 661 + char entry[256] = {0}; 662 + 663 + if (interactive) { 664 + printf("%sant init%s\n\n", C_BOLD, C_RESET); 665 + 666 + prompt_with_default("package name", default_name, name, sizeof(name)); 667 + prompt_with_default("version", "1.0.0", version, sizeof(version)); 668 + prompt_with_default("entry point", "index.js", entry, sizeof(entry)); 669 + 670 + printf("\n"); 671 + } else { 672 + strncpy(name, default_name, sizeof(name) - 1); 673 + strncpy(version, "1.0.0", sizeof(version) - 1); 674 + strncpy(entry, "index.js", sizeof(entry) - 1); 675 + } 676 + 677 + fp = fopen("package.json", "w"); 678 + if (!fp) { 679 + fprintf(stderr, "Error: Could not create package.json\n"); 680 + return EXIT_FAILURE; 681 + } 682 + 683 + yyjson_mut_doc *doc = yyjson_mut_doc_new(NULL); 684 + yyjson_mut_val *root = yyjson_mut_obj(doc); 685 + yyjson_mut_doc_set_root(doc, root); 686 + 687 + yyjson_mut_obj_add_str(doc, root, "name", name); 688 + yyjson_mut_obj_add_str(doc, root, "version", version); 689 + yyjson_mut_obj_add_str(doc, root, "type", "module"); 690 + yyjson_mut_obj_add_str(doc, root, "main", entry); 691 + 692 + yyjson_mut_val *scripts = yyjson_mut_obj_add_obj(doc, root, "scripts"); 693 + char start_cmd[300]; 694 + snprintf(start_cmd, sizeof(start_cmd), "ant %s", entry); 695 + yyjson_mut_obj_add_str(doc, scripts, "start", start_cmd); 696 + 697 + yyjson_mut_obj_add_obj(doc, root, "dependencies"); 698 + yyjson_mut_obj_add_obj(doc, root, "devDependencies"); 699 + 700 + size_t len; char *json_str = yyjson_mut_write( 701 + doc, YYJSON_WRITE_PRETTY_TWO_SPACES 702 + | YYJSON_WRITE_ESCAPE_UNICODE, &len 703 + ); 704 + 705 + if (json_str) { 706 + fwrite(json_str, 1, len, fp); 707 + free(json_str); 708 + } 709 + 710 + yyjson_mut_doc_free(doc); 711 + fclose(fp); 712 + 713 + printf("%s+%s Created %spackage.json%s\n", C_GREEN, C_RESET, C_BOLD, C_RESET); 714 + return EXIT_SUCCESS; 715 + } 716 + 717 + static int cmd_why(const char *package_name) { 718 + struct stat st; 719 + if (stat("ant.lockb", &st) != 0) { 720 + fprintf(stderr, "Error: No lockfile found. Run 'ant install' first.\n"); 721 + return EXIT_FAILURE; 722 + } 723 + 724 + pkg_why_info_t info; 725 + if (pkg_why_info("ant.lockb", package_name, &info) < 0) { 726 + fprintf(stderr, "Error: Failed to read lockfile\n"); 727 + return EXIT_FAILURE; 728 + } 729 + 730 + if (!info.found) { 731 + printf("\n%s%s%s is not installed\n\n", C_BOLD, package_name, C_RESET); 732 + return EXIT_SUCCESS; 733 + } 734 + 735 + const char *type_label = info.is_peer ? " peer" : (info.is_dev ? " dev" : ""); 736 + printf("\n%s%s%s@%s%s%s%s%s%s\n", C_BOLD, package_name, C_RESET, C_DIM, info.target_version, C_RESET, C_YELLOW, type_label, C_RESET); 737 + 738 + why_ctx_t ctx = { .target = package_name, .count = 0 }; 739 + int result = pkg_why("ant.lockb", package_name, print_why_callback, &ctx); 740 + 741 + if (result < 0) { 742 + fprintf(stderr, "Error: Failed to read lockfile\n"); 743 + return EXIT_FAILURE; 744 + } 745 + 746 + if (ctx.count == 0) { 747 + printf(" %s(no dependents)%s\n", C_DIM, C_RESET); 748 + } 749 + 750 + printf("\n"); 751 + return EXIT_SUCCESS; 752 + } 753 + 754 + int pkg_cmd_init(int argc, char **argv) { 755 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 756 + struct arg_end *end = arg_end(5); 757 + 758 + void *argtable[] = { help, end }; 759 + int nerrors = arg_parse(argc, argv, argtable); 760 + 761 + int exitcode = EXIT_SUCCESS; 762 + if (help->count > 0) { 763 + printf("Usage: ant init\n\n"); 764 + printf("Create a new package.json\n"); 765 + } else if (nerrors > 0) { 766 + arg_print_errors(stdout, end, "ant init"); 767 + exitcode = EXIT_FAILURE; 768 + } else { 769 + exitcode = cmd_init(); 770 + } 771 + 772 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 773 + return exitcode; 774 + } 775 + 776 + int pkg_cmd_install(int argc, char **argv) { 777 + struct arg_str *pkgs = arg_strn(NULL, NULL, "<package[@version]>", 0, 100, NULL); 778 + struct arg_lit *global = arg_lit0("g", "global", "install globally"); 779 + struct arg_lit *dev = arg_lit0("D", "save-dev", "add as devDependency"); 780 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 781 + struct arg_end *end = arg_end(5); 782 + 783 + void *argtable[] = { pkgs, global, dev, help, end }; 784 + int nerrors = arg_parse(argc, argv, argtable); 785 + 786 + int exitcode = EXIT_SUCCESS; 787 + if (help->count > 0) { 788 + printf("Usage: ant install [packages...] [-g] [-D] [--verbose]\n\n"); 789 + printf("Install from lockfile, or add packages if specified.\n"); 790 + printf("\nOptions:\n -g, --global Install globally to ~/.ant/pkg/global\n"); 791 + printf(" -D, --save-dev Add as devDependency\n"); 792 + } else if (nerrors > 0) { 793 + arg_print_errors(stdout, end, "ant install"); 794 + exitcode = EXIT_FAILURE; 795 + } else if (pkgs->count == 0) { 796 + exitcode = cmd_install(); 797 + } else { 798 + bool is_dev = dev->count > 0; 799 + for (int i = 0; i < pkgs->count && exitcode == EXIT_SUCCESS; i++) { 800 + exitcode = global->count > 0 ? cmd_add_global(pkgs->sval[i]) : cmd_add(pkgs->sval[i], is_dev); 801 + } 802 + } 803 + 804 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 805 + return exitcode; 806 + } 807 + 808 + int pkg_cmd_add(int argc, char **argv) { 809 + struct arg_str *pkgs = arg_strn(NULL, NULL, "<package[@version]>", 1, 100, NULL); 810 + struct arg_lit *global = arg_lit0("g", "global", "install globally"); 811 + struct arg_lit *dev = arg_lit0("D", "save-dev", "add as devDependency"); 812 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 813 + struct arg_end *end = arg_end(5); 814 + 815 + void *argtable[] = { pkgs, global, dev, help, end }; 816 + int nerrors = arg_parse(argc, argv, argtable); 817 + 818 + int exitcode = EXIT_SUCCESS; 819 + if (help->count > 0) { 820 + printf("Usage: ant add <package[@version]>... [options]\n\n"); 821 + printf("Add packages to dependencies.\n"); 822 + printf("\nOptions:\n -g, --global Install globally to ~/.ant/pkg/global\n"); 823 + printf(" -D, --save-dev Add as devDependency\n"); 824 + } else if (nerrors > 0) { 825 + arg_print_errors(stdout, end, "ant add"); 826 + exitcode = EXIT_FAILURE; 827 + } else { 828 + bool is_dev = dev->count > 0; 829 + for (int i = 0; i < pkgs->count && exitcode == EXIT_SUCCESS; i++) { 830 + exitcode = global->count > 0 ? cmd_add_global(pkgs->sval[i]) : cmd_add(pkgs->sval[i], is_dev); 831 + } 832 + } 833 + 834 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 835 + return exitcode; 836 + } 837 + 838 + int pkg_cmd_remove(int argc, char **argv) { 839 + struct arg_str *pkgs = arg_strn(NULL, NULL, "<package>", 1, 100, NULL); 840 + struct arg_lit *global = arg_lit0("g", "global", "remove from global packages"); 841 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 842 + struct arg_end *end = arg_end(5); 843 + 844 + void *argtable[] = { pkgs, global, help, end }; 845 + int nerrors = arg_parse(argc, argv, argtable); 846 + 847 + int exitcode = EXIT_SUCCESS; 848 + if (help->count > 0) { 849 + printf("Usage: ant remove <package>... [-g]\n\n"); 850 + printf("Remove packages from dependencies.\n"); 851 + printf("\nOptions:\n -g, --global Remove from global packages\n"); 852 + } else if (nerrors > 0) { 853 + arg_print_errors(stdout, end, "ant remove"); 854 + exitcode = EXIT_FAILURE; 855 + } else { 856 + for (int i = 0; i < pkgs->count && exitcode == EXIT_SUCCESS; i++) { 857 + exitcode = global->count > 0 ? cmd_remove_global(pkgs->sval[i]) : cmd_remove(pkgs->sval[i]); 858 + } 859 + } 860 + 861 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 862 + return exitcode; 863 + } 864 + 865 + int pkg_cmd_trust(int argc, char **argv) { 866 + struct arg_str *pkgs = arg_strn(NULL, NULL, "<package>", 0, 100, NULL); 867 + struct arg_lit *all = arg_lit0("a", "all", "trust all packages with lifecycle scripts"); 868 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 869 + struct arg_end *end = arg_end(5); 870 + 871 + void *argtable[] = { pkgs, all, help, end }; 872 + int nerrors = arg_parse(argc, argv, argtable); 873 + 874 + int exitcode = EXIT_SUCCESS; 875 + if (help->count > 0) { 876 + printf("Usage: ant trust [packages...] [--all]\n\n"); 877 + printf("Run lifecycle scripts for packages.\n"); 878 + printf(" --all, -a Trust and run all pending lifecycle scripts\n"); 879 + } else if (nerrors > 0) { 880 + arg_print_errors(stdout, end, "ant trust"); 881 + exitcode = EXIT_FAILURE; 882 + } else { 883 + exitcode = cmd_trust(pkgs->sval, pkgs->count, all->count > 0); 884 + } 885 + 886 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 887 + return exitcode; 888 + } 889 + 890 + int pkg_cmd_run(int argc, char **argv) { 891 + if (argc < 2) { 892 + printf("Usage: ant run <script> [-- args...]\n\n"); 893 + printf("Run a script from package.json\n\n"); 894 + printf("Available scripts:\n"); 895 + 896 + int count = pkg_list_scripts("package.json", NULL, NULL); 897 + if (count < 0) { 898 + printf(" (no package.json found)\n"); 899 + } else if (count == 0) { 900 + printf(" (no scripts defined)\n"); 901 + } else { 902 + pkg_list_scripts("package.json", print_script, NULL); 903 + } 904 + return EXIT_SUCCESS; 905 + } 906 + 907 + const char *script_name = argv[1]; 908 + 909 + char script_cmd[4096]; 910 + int script_len = pkg_get_script("package.json", script_name, script_cmd, sizeof(script_cmd)); 911 + if (script_len < 0) { 912 + fprintf(stderr, "Error: script '%s' not found in package.json\n", script_name); 913 + fprintf(stderr, "Try 'ant run' to list available scripts.\n"); 914 + return EXIT_FAILURE; 915 + } 916 + 917 + char extra_args[4096] = {0}; 918 + int extra_args_len = 0; 919 + bool found_separator = false; 920 + 921 + for (int i = 2; i < argc; i++) { 922 + if (!found_separator && strcmp(argv[i], "--") == 0) { 923 + found_separator = true; 924 + continue; 925 + } 926 + if (found_separator) { 927 + if (extra_args_len > 0) { 928 + extra_args[extra_args_len++] = ' '; 929 + } 930 + size_t arg_len = strlen(argv[i]); 931 + if ((size_t)extra_args_len + arg_len < sizeof(extra_args) - 1) { 932 + memcpy(extra_args + extra_args_len, argv[i], arg_len); 933 + extra_args_len += (int)arg_len; 934 + } 935 + } 936 + } 937 + extra_args[extra_args_len] = '\0'; 938 + 939 + printf("%s$%s %s%s%s", C_MAGENTA, C_RESET, C_BOLD, script_cmd, C_RESET); 940 + if (extra_args_len > 0) { 941 + printf(" %s", extra_args); 942 + } 943 + printf("\n"); 944 + 945 + pkg_script_result_t result = {0}; 946 + pkg_error_t err = pkg_run_script( 947 + "package.json", script_name, "node_modules", 948 + extra_args_len > 0 ? extra_args : NULL, 949 + &result 950 + ); 951 + 952 + if (err != PKG_OK) { 953 + if (err == PKG_NOT_FOUND) { 954 + fprintf(stderr, "Error: script '%s' not found\n", script_name); 955 + } else { 956 + fprintf(stderr, "Error: failed to run script '%s'\n", script_name); 957 + } 958 + return EXIT_FAILURE; 959 + } 960 + 961 + if (result.signal != 0) { 962 + fprintf(stderr, "Script '%s' killed by signal %d\n", script_name, result.signal); 963 + return 128 + result.signal; 964 + } 965 + 966 + return result.exit_code; 967 + } 968 + 969 + int pkg_cmd_exec(int argc, char **argv) { 970 + if (argc < 2) { 971 + printf("Usage: ant x [--ant] <command> [args...]\n\n"); 972 + printf("Run a command from node_modules/.bin or download temporarily\n\n"); 973 + printf("Options:\n"); 974 + printf(" --ant Run with ant instead of node\n\n"); 975 + printf("Available commands:\n"); 976 + 977 + int count = pkg_list_bins("node_modules", NULL, NULL); 978 + if (count < 0) { 979 + printf(" (no binaries found - run 'ant install' first)\n"); 980 + } else if (count == 0) { 981 + printf(" (no binaries installed)\n"); 982 + } else { 983 + pkg_list_bins("node_modules", print_bin_name, NULL); 984 + } 985 + return EXIT_SUCCESS; 986 + } 987 + 988 + bool use_ant = false; 989 + int cmd_idx = 1; 990 + 991 + if (strcmp(argv[1], "--ant") == 0) { 992 + use_ant = true; 993 + cmd_idx = 2; 994 + if (argc < 3) { 995 + fprintf(stderr, "Error: missing command after --ant\n"); 996 + return EXIT_FAILURE; 997 + } 998 + } 999 + 1000 + const char *cmd_name = argv[cmd_idx]; 1001 + char bin_path[4096]; 1002 + 1003 + int path_len = pkg_get_bin_path("node_modules", cmd_name, bin_path, sizeof(bin_path)); 1004 + 1005 + if (path_len < 0) { 1006 + const char *global_dir = get_global_dir(); 1007 + if (global_dir[0]) { 1008 + char global_nm[4096]; 1009 + snprintf(global_nm, sizeof(global_nm), "%s/node_modules", global_dir); 1010 + path_len = pkg_get_bin_path(global_nm, cmd_name, bin_path, sizeof(bin_path)); 1011 + } 1012 + } 1013 + 1014 + if (path_len < 0) { 1015 + progress_t progress; 1016 + 1017 + if (!pkg_verbose) { 1018 + char msg[256]; 1019 + snprintf(msg, sizeof(msg), "๐Ÿ” Resolving %s", cmd_name); 1020 + progress_start(&progress, msg); 1021 + } 1022 + 1023 + pkg_options_t opts = { 1024 + .progress_callback = pkg_verbose ? NULL : progress_callback, 1025 + .user_data = pkg_verbose ? NULL : &progress, 1026 + .verbose = pkg_verbose 1027 + }; 1028 + pkg_context_t *ctx = pkg_init(&opts); 1029 + if (!ctx) { 1030 + if (!pkg_verbose) progress_stop(&progress); 1031 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1032 + return EXIT_FAILURE; 1033 + } 1034 + 1035 + pkg_error_t err = pkg_exec_temp(ctx, cmd_name, bin_path, sizeof(bin_path)); 1036 + if (!pkg_verbose) progress_stop(&progress); 1037 + 1038 + if (err != PKG_OK) { 1039 + const char *err_msg = pkg_error_string(ctx); 1040 + if (err_msg && err_msg[0]) { 1041 + fprintf(stderr, "Error: %s\n", err_msg); 1042 + } else { 1043 + fprintf(stderr, "Error: '%s' not found\n", cmd_name); 1044 + } 1045 + pkg_free(ctx); 1046 + return EXIT_FAILURE; 1047 + } 1048 + pkg_free(ctx); 1049 + } 1050 + 1051 + const char *runtime = use_ant ? "ant" : "node"; 1052 + int arg_offset = cmd_idx + 1; 1053 + int new_argc = argc - arg_offset + 2; 1054 + 1055 + char **exec_argv = try_oom(sizeof(char*) * (new_argc + 1)); 1056 + if (!exec_argv) { 1057 + fprintf(stderr, "Error: out of memory\n"); 1058 + return EXIT_FAILURE; 1059 + } 1060 + 1061 + exec_argv[0] = (char *)runtime; 1062 + exec_argv[1] = bin_path; 1063 + for (int i = arg_offset; i < argc; i++) { 1064 + exec_argv[i - arg_offset + 2] = argv[i]; 1065 + } 1066 + exec_argv[new_argc] = NULL; 1067 + 1068 + execvp(runtime, exec_argv); 1069 + 1070 + free(exec_argv); 1071 + fprintf(stderr, "Error: failed to execute '%s %s' - is %s installed?\n", runtime, bin_path, runtime); 1072 + return EXIT_FAILURE; 1073 + } 1074 + 1075 + int pkg_cmd_why(int argc, char **argv) { 1076 + struct arg_str *pkg = arg_str1(NULL, NULL, "<package>", "package name to query"); 1077 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 1078 + struct arg_end *end = arg_end(5); 1079 + 1080 + void *argtable[] = { pkg, help, end }; 1081 + int nerrors = arg_parse(argc, argv, argtable); 1082 + 1083 + int exitcode = EXIT_SUCCESS; 1084 + if (help->count > 0) { 1085 + printf("Usage: ant why <package>\n\n"); 1086 + printf("Show which packages depend on the given package.\n"); 1087 + } else if (nerrors > 0) { 1088 + arg_print_errors(stdout, end, "ant why"); 1089 + exitcode = EXIT_FAILURE; 1090 + } else { 1091 + exitcode = cmd_why(pkg->sval[0]); 1092 + } 1093 + 1094 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 1095 + return exitcode; 1096 + } 1097 + 1098 + static const char *format_size(uint64_t bytes, char *buf, size_t buf_size) { 1099 + if (bytes >= 1024ULL * 1024 * 1024) snprintf(buf, buf_size, "%.2f GB", (double)bytes / (1024.0 * 1024.0 * 1024.0)); 1100 + else if (bytes >= 1024 * 1024) snprintf(buf, buf_size, "%.2f MB", (double)bytes / (1024.0 * 1024.0)); 1101 + else if (bytes >= 1024) snprintf(buf, buf_size, "%.2f KB", (double)bytes / 1024.0); 1102 + else snprintf(buf, buf_size, "%llu B", (unsigned long long)bytes); 1103 + return buf; 1104 + } 1105 + 1106 + static int cmd_info(const char *package_spec) { 1107 + pkg_options_t opts = { .verbose = false }; 1108 + pkg_context_t *ctx = pkg_init(&opts); 1109 + if (!ctx) { 1110 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1111 + return EXIT_FAILURE; 1112 + } 1113 + 1114 + pkg_info_t info; 1115 + pkg_error_t err = pkg_info(ctx, package_spec, &info); 1116 + if (err != PKG_OK) { 1117 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 1118 + pkg_free(ctx); 1119 + return EXIT_FAILURE; 1120 + } 1121 + 1122 + char size_buf[32]; 1123 + 1124 + printf("%s%s%s%s@%s%s%s%s%s", C_BLUE, C_UL, info.name, C_UL_OFF, C_BLUE, C_BOLD, C_UL, info.version, C_RESET); 1125 + if (info.license[0]) printf(" | %s%s%s", C_CYAN, info.license, C_RESET); 1126 + printf(" | deps: %u | versions: %u\n", info.dep_count, info.version_count); 1127 + 1128 + if (info.description[0]) printf("%s\n", info.description); 1129 + if (info.homepage[0]) printf("%s%s%s\n", C_BLUE, info.homepage, C_RESET); 1130 + if (info.keywords[0]) printf("keywords: %s\n", info.keywords); 1131 + 1132 + uint32_t dep_count = pkg_info_dependency_count(ctx); 1133 + if (dep_count > 0) { 1134 + printf("\n%sdependencies%s (%u):\n", C_BOLD, C_RESET, dep_count); 1135 + for (uint32_t i = 0; i < dep_count; i++) { 1136 + pkg_dependency_t dep; 1137 + if (pkg_info_get_dependency(ctx, i, &dep) == PKG_OK) { 1138 + printf("- %s%s%s: %s\n", C_CYAN, dep.name, C_RESET, dep.version); 1139 + } 1140 + } 1141 + } 1142 + 1143 + printf("\n%sdist%s\n", C_BOLD, C_RESET); 1144 + if (info.tarball[0]) printf(" %s.tarball:%s %s\n", C_DIM, C_RESET, info.tarball); 1145 + if (info.shasum[0]) printf(" %s.shasum:%s %s%s%s\n", C_DIM, C_RESET, C_GREEN, info.shasum, C_RESET); 1146 + if (info.integrity[0]) printf(" %s.integrity:%s %s%s%s\n", C_DIM, C_RESET, C_GREEN, info.integrity, C_RESET); 1147 + if (info.unpacked_size > 0) printf(" %s.unpackedSize:%s %s%s%s\n", C_DIM, C_RESET, C_BLUE, format_size(info.unpacked_size, size_buf, sizeof(size_buf)), C_RESET); 1148 + 1149 + uint32_t tag_count = pkg_info_dist_tag_count(ctx); 1150 + if (tag_count > 0) { 1151 + printf("\n%sdist-tags:%s\n", C_BOLD, C_RESET); 1152 + for (uint32_t i = 0; i < tag_count; i++) { 1153 + pkg_dist_tag_t tag; 1154 + if (pkg_info_get_dist_tag(ctx, i, &tag) == PKG_OK) { 1155 + const char *tag_color = C_MAGENTA; 1156 + if (strcmp(tag.tag, "beta") == 0) tag_color = C_BLUE; 1157 + else if (strcmp(tag.tag, "latest") == 0) tag_color = C_CYAN; 1158 + printf("%s%s%s: %s\n", tag_color, tag.tag, C_RESET, tag.version); 1159 + } 1160 + } 1161 + } 1162 + 1163 + uint32_t maint_count = pkg_info_maintainer_count(ctx); 1164 + if (maint_count > 0) { 1165 + printf("\n%smaintainers:%s\n", C_BOLD, C_RESET); 1166 + for (uint32_t i = 0; i < maint_count; i++) { 1167 + pkg_maintainer_t maint; 1168 + if (pkg_info_get_maintainer(ctx, i, &maint) == PKG_OK) { 1169 + printf("- %s", maint.name); 1170 + if (maint.email[0]) printf(" <%s>", maint.email); 1171 + printf("\n"); 1172 + } 1173 + } 1174 + } 1175 + 1176 + // Published date 1177 + if (info.published[0]) printf("\n%sPublished:%s %s\n", C_BOLD, C_RESET, info.published); 1178 + 1179 + pkg_free(ctx); 1180 + return EXIT_SUCCESS; 1181 + } 1182 + 1183 + int pkg_cmd_info(int argc, char **argv) { 1184 + struct arg_str *pkg = arg_str1(NULL, NULL, "<package[@version]>", "package to look up"); 1185 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 1186 + struct arg_end *end = arg_end(5); 1187 + 1188 + void *argtable[] = { pkg, help, end }; 1189 + int nerrors = arg_parse(argc, argv, argtable); 1190 + 1191 + int exitcode = EXIT_SUCCESS; 1192 + if (help->count > 0) { 1193 + printf("Usage: ant info <package[@version]>\n\n"); 1194 + printf("Show package information from the npm registry.\n"); 1195 + } else if (nerrors > 0) { 1196 + arg_print_errors(stdout, end, "ant info"); 1197 + exitcode = EXIT_FAILURE; 1198 + } else { 1199 + exitcode = cmd_info(pkg->sval[0]); 1200 + } 1201 + 1202 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 1203 + return exitcode; 1204 + } 1205 + 1206 + typedef struct { 1207 + int count; 1208 + bool show_path; 1209 + const char *nm_path; 1210 + } ls_ctx_t; 1211 + 1212 + static void print_ls_package(const char *name, void *user_data) { 1213 + ls_ctx_t *ctx = (ls_ctx_t *)user_data; 1214 + 1215 + char pkg_json_path[4096]; 1216 + snprintf(pkg_json_path, sizeof(pkg_json_path), "%s/%s/package.json", ctx->nm_path, name); 1217 + 1218 + FILE *f = fopen(pkg_json_path, "r"); 1219 + if (!f) { 1220 + printf(" %s%s%s\n", C_BOLD, name, C_RESET); 1221 + ctx->count++; 1222 + return; 1223 + } 1224 + 1225 + char buf[8192]; 1226 + size_t len = fread(buf, 1, sizeof(buf) - 1, f); 1227 + fclose(f); 1228 + buf[len] = '\0'; 1229 + 1230 + const char *version = "?"; 1231 + char version_buf[64] = {0}; 1232 + 1233 + char *ver_key = strstr(buf, "\"version\""); 1234 + if (ver_key) { 1235 + char *colon = strchr(ver_key, ':'); 1236 + if (colon) { 1237 + char *quote1 = strchr(colon, '"'); 1238 + if (quote1) { 1239 + char *quote2 = strchr(quote1 + 1, '"'); 1240 + if (quote2) { 1241 + size_t vlen = (size_t)(quote2 - quote1 - 1); 1242 + if (vlen < sizeof(version_buf)) { 1243 + memcpy(version_buf, quote1 + 1, vlen); 1244 + version_buf[vlen] = '\0'; 1245 + version = version_buf; 1246 + } 1247 + } 1248 + } 1249 + } 1250 + } 1251 + 1252 + printf(" %s%s%s@%s%s%s\n", C_BOLD, name, C_RESET, C_DIM, version, C_RESET); 1253 + ctx->count++; 1254 + } 1255 + 1256 + typedef struct { 1257 + int count; 1258 + } global_ls_ctx_t; 1259 + 1260 + static void print_global_package(const char *name, const char *version, void *user_data) { 1261 + global_ls_ctx_t *ctx = (global_ls_ctx_t *)user_data; 1262 + printf(" %s%s%s@%s%s%s\n", C_BOLD, name, C_RESET, C_DIM, version, C_RESET); 1263 + ctx->count++; 1264 + } 1265 + 1266 + static int cmd_ls_global(void) { 1267 + pkg_options_t opts = { .verbose = pkg_verbose }; 1268 + pkg_context_t *ctx = pkg_init(&opts); 1269 + if (!ctx) { 1270 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1271 + return EXIT_FAILURE; 1272 + } 1273 + 1274 + printf("%sGlobal packages%s:\n", C_BOLD, C_RESET); 1275 + 1276 + global_ls_ctx_t ls_ctx = { .count = 0 }; 1277 + pkg_error_t err = pkg_list_global(ctx, print_global_package, &ls_ctx); 1278 + 1279 + if (err == PKG_NOT_FOUND || ls_ctx.count == 0) { 1280 + printf(" (none)\n"); 1281 + } else if (err != PKG_OK) { 1282 + fprintf(stderr, "Error: %s\n", pkg_error_string(ctx)); 1283 + pkg_free(ctx); 1284 + return EXIT_FAILURE; 1285 + } else { 1286 + printf("\n%s%d%s package%s\n", C_GREEN, ls_ctx.count, C_RESET, ls_ctx.count == 1 ? "" : "s"); 1287 + } 1288 + 1289 + pkg_free(ctx); 1290 + return EXIT_SUCCESS; 1291 + } 1292 + 1293 + static int cmd_ls(const char *nm_path, bool is_global) { 1294 + struct stat st; 1295 + if (stat(nm_path, &st) != 0) { 1296 + if (is_global) { 1297 + printf("No global packages installed.\n"); 1298 + } else { 1299 + printf("No packages installed. Run 'ant install' first.\n"); 1300 + } 1301 + return EXIT_SUCCESS; 1302 + } 1303 + 1304 + if (is_global) { 1305 + printf("%sGlobal packages%s:\n", C_BOLD, C_RESET); 1306 + } else { 1307 + printf("%sInstalled packages%s:\n", C_BOLD, C_RESET); 1308 + } 1309 + 1310 + ls_ctx_t ctx = { .count = 0, .show_path = false, .nm_path = nm_path }; 1311 + 1312 + if (is_global) { 1313 + const char *global_dir = get_global_dir(); 1314 + char pkg_json_path[4096]; 1315 + snprintf(pkg_json_path, sizeof(pkg_json_path), "%s/package.json", global_dir); 1316 + 1317 + yyjson_doc *doc = yyjson_read_file(pkg_json_path, 0, NULL, NULL); 1318 + if (!doc) { 1319 + printf(" (none)\n"); 1320 + return EXIT_SUCCESS; 1321 + } 1322 + 1323 + yyjson_val *root = yyjson_doc_get_root(doc); 1324 + yyjson_val *deps = yyjson_obj_get(root, "dependencies"); 1325 + 1326 + if (!deps || !yyjson_is_obj(deps)) { 1327 + yyjson_doc_free(doc); 1328 + printf(" (none)\n"); 1329 + return EXIT_SUCCESS; 1330 + } 1331 + 1332 + yyjson_obj_iter iter; 1333 + yyjson_obj_iter_init(deps, &iter); 1334 + yyjson_val *key; 1335 + while ((key = yyjson_obj_iter_next(&iter)) != NULL) { 1336 + const char *pkg_name = yyjson_get_str(key); 1337 + if (pkg_name) print_ls_package(pkg_name, &ctx); 1338 + } 1339 + 1340 + yyjson_doc_free(doc); 1341 + } else { 1342 + DIR *dir = opendir(nm_path); 1343 + if (!dir) { 1344 + printf(" (none)\n"); 1345 + return EXIT_SUCCESS; 1346 + } 1347 + 1348 + struct dirent *entry; 1349 + while ((entry = readdir(dir)) != NULL) { 1350 + if (entry->d_name[0] == '.') continue; 1351 + 1352 + if (entry->d_name[0] == '@') { 1353 + char scope_path[4096]; 1354 + snprintf(scope_path, sizeof(scope_path), "%s/%s", nm_path, entry->d_name); 1355 + DIR *scope_dir = opendir(scope_path); 1356 + if (scope_dir) { 1357 + struct dirent *scoped; 1358 + while ((scoped = readdir(scope_dir)) != NULL) { 1359 + if (scoped->d_name[0] == '.') continue; 1360 + char full_name[512]; 1361 + snprintf(full_name, sizeof(full_name), "%s/%s", entry->d_name, scoped->d_name); 1362 + print_ls_package(full_name, &ctx); 1363 + } 1364 + closedir(scope_dir); 1365 + } 1366 + } else { 1367 + print_ls_package(entry->d_name, &ctx); 1368 + } 1369 + } 1370 + closedir(dir); 1371 + } 1372 + 1373 + if (ctx.count == 0) { 1374 + printf(" (none)\n"); 1375 + } else { 1376 + printf("\n%s%d%s package%s\n", C_GREEN, ctx.count, C_RESET, ctx.count == 1 ? "" : "s"); 1377 + } 1378 + 1379 + return EXIT_SUCCESS; 1380 + } 1381 + 1382 + int pkg_cmd_ls(int argc, char **argv) { 1383 + struct arg_lit *global = arg_lit0("g", "global", "list global packages"); 1384 + struct arg_lit *help = arg_lit0("h", "help", "display help"); 1385 + struct arg_end *end = arg_end(5); 1386 + 1387 + void *argtable[] = { global, help, end }; 1388 + int nerrors = arg_parse(argc, argv, argtable); 1389 + 1390 + int exitcode = EXIT_SUCCESS; 1391 + if (help->count > 0) { 1392 + printf("Usage: ant ls [-g]\n\n"); 1393 + printf("List installed packages.\n"); 1394 + printf("\nOptions:\n -g, --global List global packages\n"); 1395 + } else if (nerrors > 0) { 1396 + arg_print_errors(stdout, end, "ant ls"); 1397 + exitcode = EXIT_FAILURE; 1398 + } else if (global->count > 0) { 1399 + exitcode = cmd_ls_global(); 1400 + } else { 1401 + exitcode = cmd_ls("node_modules", false); 1402 + } 1403 + 1404 + arg_freetable(argtable, sizeof(argtable)/sizeof(argtable[0])); 1405 + return exitcode; 1406 + } 1407 + 1408 + static int cmd_cache_info(void) { 1409 + pkg_options_t opts = { .verbose = pkg_verbose }; 1410 + pkg_context_t *ctx = pkg_init(&opts); 1411 + if (!ctx) { 1412 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1413 + return EXIT_FAILURE; 1414 + } 1415 + 1416 + pkg_cache_stats_t stats; 1417 + pkg_error_t err = pkg_cache_stats(ctx, &stats); 1418 + if (err != PKG_OK) { 1419 + fprintf(stderr, "Error: Failed to get cache stats\n"); 1420 + pkg_free(ctx); 1421 + return EXIT_FAILURE; 1422 + } 1423 + 1424 + char size_buf[64], db_buf[64]; 1425 + printf("%sCache location:%s ~/.ant/pkg\n", C_BOLD, C_RESET); 1426 + printf("%sPackages:%s %u\n", C_BOLD, C_RESET, stats.package_count); 1427 + printf("%sSize:%s %s\n", C_BOLD, C_RESET, format_size(stats.total_size, size_buf, sizeof(size_buf))); 1428 + printf("%sDB size:%s %s\n", C_BOLD, C_RESET, format_size(stats.db_size, db_buf, sizeof(db_buf))); 1429 + 1430 + pkg_free(ctx); 1431 + return EXIT_SUCCESS; 1432 + } 1433 + 1434 + static int cmd_cache_prune(uint32_t max_age_days) { 1435 + pkg_options_t opts = { .verbose = pkg_verbose }; 1436 + pkg_context_t *ctx = pkg_init(&opts); 1437 + if (!ctx) { 1438 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1439 + return EXIT_FAILURE; 1440 + } 1441 + 1442 + int32_t pruned = pkg_cache_prune(ctx, max_age_days); 1443 + if (pruned < 0) { 1444 + fprintf(stderr, "Error: Failed to prune cache\n"); 1445 + pkg_free(ctx); 1446 + return EXIT_FAILURE; 1447 + } 1448 + 1449 + if (pruned == 0) { 1450 + printf("No packages to prune (all packages newer than %u days)\n", max_age_days); 1451 + } else { 1452 + printf("%sPruned%s %d package%s older than %u days\n", 1453 + C_GREEN, C_RESET, pruned, pruned == 1 ? "" : "s", max_age_days); 1454 + } 1455 + 1456 + pkg_free(ctx); 1457 + return EXIT_SUCCESS; 1458 + } 1459 + 1460 + static int cmd_cache_sync(void) { 1461 + pkg_options_t opts = { .verbose = pkg_verbose }; 1462 + pkg_context_t *ctx = pkg_init(&opts); 1463 + if (!ctx) { 1464 + fprintf(stderr, "Error: Failed to initialize package manager\n"); 1465 + return EXIT_FAILURE; 1466 + } 1467 + 1468 + pkg_cache_sync(ctx); 1469 + printf("%sCache synced%s\n", C_GREEN, C_RESET); 1470 + 1471 + pkg_free(ctx); 1472 + return EXIT_SUCCESS; 1473 + } 1474 + 1475 + int pkg_cmd_cache(int argc, char **argv) { 1476 + if (argc < 2) { 1477 + printf("Usage: ant cache <command>\n\n"); 1478 + printf("Manage the package cache.\n\n"); 1479 + printf("Commands:\n"); 1480 + printf(" info Show cache statistics\n"); 1481 + printf(" prune [days] Remove packages older than N days (default: 30)\n"); 1482 + printf(" sync Sync cache to disk\n"); 1483 + return EXIT_SUCCESS; 1484 + } 1485 + 1486 + const char *subcmd = argv[1]; 1487 + 1488 + if (strcmp(subcmd, "info") == 0) { 1489 + return cmd_cache_info(); 1490 + } else if (strcmp(subcmd, "prune") == 0) { 1491 + uint32_t days = 30; 1492 + if (argc >= 3) { 1493 + days = (uint32_t)atoi(argv[2]); 1494 + if (days == 0) days = 30; 1495 + } 1496 + return cmd_cache_prune(days); 1497 + } else if (strcmp(subcmd, "sync") == 0) { 1498 + return cmd_cache_sync(); 1499 + } else { 1500 + fprintf(stderr, "Unknown cache command: %s\n", subcmd); 1501 + fprintf(stderr, "Run 'ant cache' for usage.\n"); 1502 + return EXIT_FAILURE; 1503 + } 1504 + }
+109 -13
src/main.c
··· 2 2 #include <arena.h> 3 3 4 4 #include <oxc.h> 5 + #include <cli/pkg.h> 5 6 #include <stdio.h> 6 7 #include <stdlib.h> 7 8 #include <string.h> 9 + #include <unistd.h> 8 10 #include <sys/stat.h> 9 11 #include <argtable3.h> 10 12 11 13 #include "ant.h" 14 + #include "config.h" 12 15 #include "repl.h" 13 16 #include "utils.h" 14 17 #include "reactor.h" ··· 48 51 #include "modules/collections.h" 49 52 50 53 int js_result = EXIT_SUCCESS; 54 + typedef int (*cmd_fn)(int argc, char **argv); 55 + 56 + typedef struct { 57 + const char *name; 58 + const char *alias; 59 + const char *desc; 60 + cmd_fn fn; 61 + } subcommand_t; 62 + 63 + static const subcommand_t subcommands[] = { 64 + {"init", NULL, "Create a new package.json", pkg_cmd_init}, 65 + {"install", "i", "Install dependencies from lockfile", pkg_cmd_install}, 66 + {"add", "a", "Add a package to dependencies", pkg_cmd_add}, 67 + {"remove", "rm", "Remove a package from dependencies", pkg_cmd_remove}, 68 + {"trust", NULL, "Run lifecycle scripts for packages", pkg_cmd_trust}, 69 + {"run", NULL, "Run a script from package.json", pkg_cmd_run}, 70 + {"exec", "x", "Run a command from node_modules/.bin", pkg_cmd_exec}, 71 + {"why", "explain", "Show why a package is installed", pkg_cmd_why}, 72 + {"info", NULL, "Show package information from registry", pkg_cmd_info}, 73 + {"ls", "list", "List installed packages", pkg_cmd_ls}, 74 + {"cache", NULL, "Manage the package cache", pkg_cmd_cache}, 75 + {NULL, NULL, NULL, NULL} 76 + }; 77 + 78 + static const subcommand_t *find_subcommand(const char *name) { 79 + for (const subcommand_t *cmd = subcommands; cmd->name; cmd++) { 80 + if (strcmp(name, cmd->name) == 0) return cmd; 81 + if (cmd->alias && strcmp(name, cmd->alias) == 0) return cmd; 82 + } 83 + return NULL; 84 + } 85 + 86 + static void print_subcommands(void) { 87 + printf("Commands:\n"); 88 + for (const subcommand_t *cmd = subcommands; cmd->name; cmd++) { 89 + printf(" %-12s %s\n", cmd->name, cmd->desc); 90 + } 91 + printf("\n"); 92 + } 51 93 52 94 static void eval_code(struct js *js, struct arg_str *eval, struct arg_lit *print) { 53 95 const char *script = eval->sval[0]; ··· 161 203 return EXIT_SUCCESS; 162 204 } 163 205 164 - int main(int argc, char *argv[]) { 206 + int main(int argc, char *argv[]) { 207 + int filtered_argc = 0; 208 + 209 + const char *binary_name = strrchr(argv[0], '/'); 210 + binary_name = binary_name ? binary_name + 1 : argv[0]; 211 + 212 + if (strcmp(binary_name, "antx") == 0) { 213 + char **exec_argv = try_oom(sizeof(char*) * (argc + 2)); 214 + exec_argv[0] = argv[0]; exec_argv[1] = "x"; 215 + for (int i = 1; i < argc; i++) exec_argv[i + 1] = argv[i]; 216 + exec_argv[argc + 1] = NULL; 217 + 218 + int exitcode = pkg_cmd_exec(argc, exec_argv + 1); 219 + free(exec_argv); return exitcode; 220 + } 221 + 222 + char **filtered_argv = try_oom(sizeof(char*) * argc); 223 + for (int i = 0; i < argc; i++) { 224 + if (strcmp(argv[i], "--verbose") == 0) pkg_verbose = true; 225 + else if (strcmp(argv[i], "--no-color") == 0) io_no_color = true; 226 + else filtered_argv[filtered_argc++] = argv[i]; 227 + } 228 + 229 + if (filtered_argc >= 2 && filtered_argv[1][0] != '-') { 230 + const subcommand_t *cmd = find_subcommand(filtered_argv[1]); 231 + if (cmd) { 232 + int exitcode = cmd->fn(filtered_argc - 1, filtered_argv + 1); 233 + free(filtered_argv); 234 + return exitcode; 235 + } 236 + 237 + if (pkg_script_exists("package.json", filtered_argv[1])) { 238 + int exitcode = pkg_cmd_run(filtered_argc, filtered_argv); 239 + free(filtered_argv); 240 + return exitcode; 241 + } 242 + } 243 + 244 + argc = filtered_argc; 245 + argv = filtered_argv; 246 + 165 247 struct arg_lit *help = arg_lit0("h", "help", "display this help and exit"); 166 248 struct arg_lit *version = arg_lit0("v", "version", "display version information and exit"); 167 - struct arg_lit *no_color = arg_lit0(NULL, "no-color", "disable colored output"); 249 + struct arg_lit *version_raw = arg_lit0(NULL, "version-raw", "raw version number for scripts"); 168 250 struct arg_str *eval = arg_str0("e", "eval", "<script>", "evaluate script"); 169 251 struct arg_lit *print = arg_lit0("p", "print", "evaluate script and print result"); 170 252 struct arg_int *initial_mem = arg_int0(NULL, "initial-mem", "<size>", "initial memory size in KB (default: 16kb)"); ··· 174 256 struct arg_end *end = arg_end(20); 175 257 176 258 void *argtable[] = { 177 - help, version, no_color, eval, 178 - print, initial_mem, max_mem, 259 + help, version, version_raw, 260 + eval, print, initial_mem, max_mem, 179 261 localstorage_file, file, end 180 262 }; 181 - 263 + 182 264 int nerrors = arg_parse(argc, argv, argtable); 183 265 184 266 if (help->count > 0) { 185 267 printf("Ant sized JavaScript\n\n"); 186 - printf("Usage: ant [options] [module.js]\n\n"); 268 + printf("Usage: ant [options] [module.js]\n"); 269 + printf(" ant <command> [args]\n\n"); 270 + print_subcommands(); 187 271 printf("If no module file is specified, ant starts in REPL mode.\n\n"); 188 272 printf("Options:\n"); 189 - arg_print_glossary(stdout, argtable, " %-25s %s\n"); 273 + printf(" %-28s %s\n", "--verbose", "enable verbose output"); 274 + printf(" %-28s %s\n", "--no-color", "disable colored output"); 275 + arg_print_glossary(stdout, argtable, " %-28s %s\n"); 190 276 arg_freetable(argtable, ARGTABLE_COUNT); 277 + free(filtered_argv); 191 278 return EXIT_SUCCESS; 192 279 } 193 280 194 - if (version->count > 0) return ant_version(argtable); 281 + if (version_raw->count > 0) { 282 + fputs(ANT_VERSION "\n", stdout); 283 + arg_freetable(argtable, ARGTABLE_COUNT); 284 + free(filtered_argv); return EXIT_SUCCESS; 285 + } 286 + 287 + if (version->count > 0) { 288 + int res = ant_version(argtable); 289 + free(filtered_argv); return res; 290 + } 195 291 196 292 if (nerrors > 0) { 197 293 arg_print_errors(stdout, end, "ant"); 198 294 printf("Try 'ant --help' for more information.\n"); 199 295 arg_freetable(argtable, ARGTABLE_COUNT); 200 - return EXIT_FAILURE; 296 + free(filtered_argv); return EXIT_FAILURE; 201 297 } 202 298 203 299 bool repl_mode = (file->count == 0 && eval->count == 0); 204 300 const char *module_file = repl_mode ? NULL : (file->count > 0 ? file->filename[0] : NULL); 205 - 206 - if (no_color->count > 0) io_no_color = true; 207 301 208 302 struct js *js = js_create_dynamic( 209 303 initial_mem->count > 0 ? (size_t)initial_mem->ival[0] * 1024 : 0, ··· 213 307 if (js == NULL) { 214 308 fprintf(stderr, "Error: Failed to allocate JavaScript runtime\n"); 215 309 arg_freetable(argtable, ARGTABLE_COUNT); 310 + free(filtered_argv); 216 311 return EXIT_FAILURE; 217 312 } 218 313 ··· 270 365 if (stat(module_file, &path_stat) == 0 && S_ISDIR(path_stat.st_mode)) { 271 366 size_t len = strlen(module_file); 272 367 int has_slash = (len > 0 && module_file[len - 1] == '/'); 273 - resolved_file = malloc(len + 10 + (has_slash ? 0 : 1)); 368 + resolved_file = try_oom(len + 10 + (has_slash ? 0 : 1)); 274 369 sprintf(resolved_file, "%s%sindex.js", module_file, has_slash ? "" : "/"); 275 370 module_file = resolved_file; 276 371 } ··· 283 378 284 379 js_destroy(js); 285 380 arg_freetable(argtable, ARGTABLE_COUNT); 381 + free(filtered_argv); 286 382 287 383 return js_result; 288 - } 384 + }
+2 -3
src/modules/readline.c
··· 234 234 #endif 235 235 236 236 static void write_output(rl_interface_t *iface, const char *str) { 237 - (void)iface; 238 - printf("%s", str); 237 + fputs(str, stdout); 239 238 fflush(stdout); 240 239 } 241 240 ··· 1201 1200 default: seq = "\033[2K\r"; break; 1202 1201 } 1203 1202 1204 - printf("%s", seq); 1203 + fputs(seq, stdout); 1205 1204 fflush(stdout); 1206 1205 1207 1206 return js_true;
+78
src/pkg/build.zig
··· 1 + const std = @import("std"); 2 + 3 + fn getEnv(key: []const u8) ?[]const u8 { 4 + return std.process.getEnvVarOwned(std.heap.page_allocator, key) catch null; 5 + } 6 + 7 + fn darwinMinVersion(os_tag: ?std.Target.Os.Tag) ?std.Target.Query.OsVersion { 8 + const tag = os_tag orelse return null; 9 + if (tag != .macos) return null; 10 + return .{ .semver = .{ .major = 15, .minor = 0, .patch = 0 } }; 11 + } 12 + 13 + pub fn build(b: *std.Build) void { 14 + const resolved_target = blk: { 15 + const target_str = getEnv("PKG_TARGET") orelse break :blk b.standardTargetOptions(.{}); 16 + defer std.heap.page_allocator.free(target_str); 17 + var it = std.mem.splitScalar(u8, target_str, '-'); 18 + 19 + const cpu_arch = if (it.next()) |a| 20 + std.meta.stringToEnum(std.Target.Cpu.Arch, a) else null; 21 + 22 + const os_tag = if (it.next()) |o| blk2: { 23 + if (std.mem.eql(u8, o, "darwin")) break :blk2 std.Target.Os.Tag.macos; 24 + break :blk2 std.meta.stringToEnum(std.Target.Os.Tag, o); 25 + } else null; 26 + 27 + std.debug.print("[zig.build] cpu_arch: {?}\n", .{cpu_arch}); 28 + std.debug.print("[zig.build] os_tag: {?}\n", .{os_tag}); 29 + 30 + break :blk b.resolveTargetQuery(.{ 31 + .cpu_arch = cpu_arch, 32 + .os_tag = os_tag, 33 + .os_version_min = darwinMinVersion(os_tag), 34 + .cpu_model = .baseline, 35 + }); 36 + }; 37 + 38 + const lmdb_include = getEnv("LMDB_INCLUDE"); 39 + const zlib_include = getEnv("ZLIB_INCLUDE"); 40 + const libuv_include = getEnv("LIBUV_INCLUDE"); 41 + const yyjson_include = getEnv("YYJSON_INCLUDE"); 42 + 43 + const lib = b.addLibrary(.{ 44 + .name = "pkg", 45 + .root_module = b.createModule(.{ 46 + .root_source_file = b.path("root.zig"), 47 + .target = resolved_target, 48 + .optimize = .ReleaseFast, 49 + .link_libc = true, 50 + .link_libcpp = true, 51 + .omit_frame_pointer = true, 52 + .unwind_tables = .none, 53 + .strip = true, 54 + }), 55 + }); 56 + 57 + lib.use_llvm = true; 58 + if (!resolved_target.result.os.tag.isDarwin()) lib.use_lld = true; 59 + 60 + lib.addCSourceFile(.{ 61 + .file = b.path("metadata.c"), 62 + .flags = &.{ "-O3", "-DNDEBUG" }, 63 + }); 64 + 65 + const version = getEnv("ANT_VERSION") orelse "unknown"; 66 + const options = b.addOptions(); 67 + options.addOption([]const u8, "version", version); 68 + 69 + lib.root_module.addOptions("config", options); 70 + lib.root_module.addCMacro("NDEBUG", "1"); 71 + 72 + if (lmdb_include) |p| lib.root_module.addIncludePath(.{ .cwd_relative = p }); 73 + if (zlib_include) |p| lib.root_module.addIncludePath(.{ .cwd_relative = p }); 74 + if (libuv_include) |p| lib.root_module.addIncludePath(.{ .cwd_relative = p }); 75 + if (yyjson_include) |p| lib.root_module.addIncludePath(.{ .cwd_relative = p }); 76 + 77 + b.installArtifact(lib); 78 + }
+677
src/pkg/cache.zig
··· 1 + const std = @import("std"); 2 + 3 + const c = @cImport({ 4 + @cInclude("lmdb.h"); 5 + }); 6 + 7 + extern fn strip_npm_metadata(json_data: [*]const u8, json_len: usize, out_len: *usize) ?[*]u8; 8 + extern fn strip_metadata_free(ptr: [*]u8) void; 9 + 10 + pub const CacheEntry = struct { 11 + integrity: [64]u8, 12 + path: []const u8, 13 + unpacked_size: u64, 14 + file_count: u32, 15 + cached_at: i64, 16 + allocator: ?std.mem.Allocator = null, 17 + 18 + pub fn deinit(self: *CacheEntry) void { 19 + if (self.allocator) |alloc| alloc.free(self.path); 20 + } 21 + }; 22 + 23 + const SerializedEntry = extern struct { 24 + unpacked_size: u64, 25 + file_count: u32, 26 + cached_at: i64, 27 + path_len: u32, 28 + }; 29 + 30 + fn check(rc: c_int) !void { 31 + if (rc != 0) return error.MdbError; 32 + } 33 + 34 + pub const CacheDB = struct { 35 + env: *c.MDB_env, 36 + dbi_primary: c.MDB_dbi, 37 + dbi_secondary: c.MDB_dbi, 38 + dbi_metadata: c.MDB_dbi, 39 + cache_dir: []const u8, 40 + allocator: std.mem.Allocator, 41 + 42 + const MAP_SIZE: usize = 8 * 1024 * 1024 * 1024; 43 + const METADATA_TTL_SECS: i64 = 24 * 60 * 60; 44 + 45 + pub fn open(cache_dir: []const u8) !*CacheDB { 46 + const allocator = std.heap.c_allocator; 47 + 48 + std.fs.cwd().makePath(cache_dir) catch |err| switch (err) { 49 + error.PathAlreadyExists => {}, 50 + else => return error.CacheError, 51 + }; 52 + 53 + const packages_path = try std.fmt.allocPrintSentinel(allocator, "{s}/cache", .{cache_dir}, 0); 54 + defer allocator.free(packages_path); 55 + std.fs.cwd().makePath(packages_path) catch |err| switch (err) { 56 + error.PathAlreadyExists => {}, 57 + else => return error.CacheError, 58 + }; 59 + 60 + var env: ?*c.MDB_env = null; 61 + if (c.mdb_env_create(&env) != 0) { 62 + return error.DatabaseOpen; 63 + } 64 + errdefer c.mdb_env_close(env); 65 + 66 + try check(c.mdb_env_set_mapsize(env, MAP_SIZE)); 67 + try check(c.mdb_env_set_maxdbs(env, 3)); 68 + 69 + const db_path = try std.fmt.allocPrintSentinel(allocator, "{s}/index.lmdb", .{cache_dir}, 0); 70 + defer allocator.free(db_path); 71 + 72 + const flags: c_uint = c.MDB_NOSUBDIR | c.MDB_NOSYNC; 73 + if (c.mdb_env_open(env, db_path.ptr, flags, 0o644) != 0) { 74 + return error.DatabaseOpen; 75 + } 76 + 77 + const self = try allocator.create(CacheDB); 78 + errdefer allocator.destroy(self); 79 + 80 + self.* = .{ 81 + .env = env.?, 82 + .dbi_primary = 0, 83 + .dbi_secondary = 0, 84 + .dbi_metadata = 0, 85 + .cache_dir = try allocator.dupe(u8, cache_dir), 86 + .allocator = allocator, 87 + }; 88 + 89 + try self.openDatabases(); 90 + self.autoPruneMetadata(); 91 + 92 + return self; 93 + } 94 + 95 + fn autoPruneMetadata(self: *CacheDB) void { 96 + var txn: ?*c.MDB_txn = null; 97 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) return; 98 + 99 + self.pruneExpiredMetadata(txn.?); 100 + _ = c.mdb_txn_commit(txn); 101 + } 102 + 103 + fn openDatabases(self: *CacheDB) !void { 104 + var txn: ?*c.MDB_txn = null; 105 + 106 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 107 + return error.DatabaseOpen; 108 + } errdefer c.mdb_txn_abort(txn); 109 + 110 + if (c.mdb_dbi_open(txn, "primary", c.MDB_CREATE, &self.dbi_primary) != 0) return error.DatabaseOpen; 111 + if (c.mdb_dbi_open(txn, "secondary", c.MDB_CREATE, &self.dbi_secondary) != 0) return error.DatabaseOpen; 112 + if (c.mdb_dbi_open(txn, "metadata", c.MDB_CREATE, &self.dbi_metadata) != 0) return error.DatabaseOpen; 113 + if (c.mdb_txn_commit(txn) != 0) return error.DatabaseOpen; 114 + } 115 + 116 + pub fn close(self: *CacheDB) void { 117 + c.mdb_dbi_close(self.env, self.dbi_primary); 118 + c.mdb_dbi_close(self.env, self.dbi_secondary); 119 + c.mdb_dbi_close(self.env, self.dbi_metadata); 120 + c.mdb_env_close(self.env); 121 + 122 + self.allocator.free(self.cache_dir); 123 + self.allocator.destroy(self); 124 + } 125 + 126 + fn makeIntegrityKey(integrity: *const [64]u8) [66]u8 { 127 + var key: [66]u8 = undefined; 128 + key[0] = 'i'; key[1] = ':'; 129 + @memcpy(key[2..66], integrity); 130 + return key; 131 + } 132 + 133 + fn makeNameKey(allocator: std.mem.Allocator, name: []const u8, version: []const u8) ![]u8 { 134 + return std.fmt.allocPrint(allocator, "n:{s}@{s}", .{ name, version }); 135 + } 136 + 137 + pub fn lookup(self: *CacheDB, integrity: *const [64]u8) ?CacheEntry { 138 + var txn: ?*c.MDB_txn = null; 139 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 140 + return null; 141 + } defer c.mdb_txn_abort(txn); 142 + 143 + const key_bytes = makeIntegrityKey(integrity); 144 + var key = c.MDB_val{ 145 + .mv_size = key_bytes.len, 146 + .mv_data = @constCast(&key_bytes), 147 + }; 148 + var value: c.MDB_val = undefined; 149 + 150 + if (c.mdb_get(txn, self.dbi_primary, &key, &value) != 0) return null; 151 + return deserializeEntry(integrity, value, self.allocator); 152 + } 153 + 154 + pub fn hasIntegrity(self: *CacheDB, integrity: *const [64]u8) bool { 155 + var txn: ?*c.MDB_txn = null; 156 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 157 + return false; 158 + } defer c.mdb_txn_abort(txn); 159 + 160 + const key_bytes = makeIntegrityKey(integrity); 161 + var key = c.MDB_val{ 162 + .mv_size = key_bytes.len, 163 + .mv_data = @constCast(&key_bytes), 164 + }; 165 + var value: c.MDB_val = undefined; 166 + 167 + return c.mdb_get(txn, self.dbi_primary, &key, &value) == 0; 168 + } 169 + 170 + fn deserializeEntry(integrity: *const [64]u8, value: c.MDB_val, allocator: std.mem.Allocator) ?CacheEntry { 171 + if (value.mv_size < @sizeOf(SerializedEntry)) return null; 172 + 173 + const data: [*]const u8 = @ptrCast(value.mv_data); 174 + 175 + var header: SerializedEntry = undefined; 176 + @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]); 177 + 178 + const path_start = @sizeOf(SerializedEntry); 179 + if (value.mv_size < path_start + header.path_len) return null; 180 + 181 + const path = allocator.dupe(u8, data[path_start..][0..header.path_len]) catch return null; 182 + 183 + return CacheEntry{ 184 + .integrity = integrity.*, 185 + .path = path, 186 + .unpacked_size = header.unpacked_size, 187 + .file_count = header.file_count, 188 + .cached_at = header.cached_at, 189 + .allocator = allocator, 190 + }; 191 + } 192 + 193 + pub fn lookupByName(self: *CacheDB, name: []const u8, version: []const u8) ?CacheEntry { 194 + var txn: ?*c.MDB_txn = null; 195 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 196 + return null; 197 + } defer c.mdb_txn_abort(txn); 198 + 199 + const name_key = makeNameKey(self.allocator, name, version) catch return null; 200 + defer self.allocator.free(name_key); 201 + 202 + var key = c.MDB_val{ 203 + .mv_size = name_key.len, 204 + .mv_data = @constCast(name_key.ptr), 205 + }; 206 + var value: c.MDB_val = undefined; 207 + 208 + if (c.mdb_get(txn, self.dbi_secondary, &key, &value) != 0) return null; 209 + if (value.mv_size != 64) return null; 210 + 211 + const integrity: *const [64]u8 = @ptrCast(value.mv_data); 212 + return self.lookup(integrity); 213 + } 214 + 215 + pub const BatchHit = struct { 216 + index: u32, 217 + file_count: u32, 218 + }; 219 + 220 + pub fn batchLookup( 221 + self: *CacheDB, 222 + integrities: []const [64]u8, 223 + allocator: std.mem.Allocator, 224 + ) !struct { 225 + items: []BatchHit, 226 + allocator: std.mem.Allocator, 227 + pub fn deinit(s: *@This()) void { 228 + s.allocator.free(s.items); 229 + } 230 + } { 231 + var hits = std.ArrayListUnmanaged(BatchHit){}; 232 + errdefer hits.deinit(allocator); 233 + 234 + var txn: ?*c.MDB_txn = null; 235 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 236 + return .{ .items = &.{}, .allocator = allocator }; 237 + } defer c.mdb_txn_abort(txn); 238 + 239 + for (integrities, 0..) |integrity, i| { 240 + const key_bytes = makeIntegrityKey(&integrity); 241 + var key = c.MDB_val{ 242 + .mv_size = key_bytes.len, 243 + .mv_data = @constCast(&key_bytes), 244 + }; 245 + var value: c.MDB_val = undefined; 246 + if (c.mdb_get(txn, self.dbi_primary, &key, &value) == 0) { 247 + var file_count: u32 = 0; 248 + if (value.mv_size >= @sizeOf(SerializedEntry)) { 249 + const data: [*]const u8 = @ptrCast(value.mv_data); 250 + var header: SerializedEntry = undefined; 251 + @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]); 252 + file_count = header.file_count; 253 + } 254 + try hits.append(allocator, .{ .index = @intCast(i), .file_count = file_count }); 255 + } 256 + } 257 + 258 + return .{ .items = hits.toOwnedSlice(allocator) catch &.{}, .allocator = allocator }; 259 + } 260 + 261 + pub fn insert(self: *CacheDB, entry: *const CacheEntry, name: ?[]const u8, version: ?[]const u8) !void { 262 + var txn: ?*c.MDB_txn = null; 263 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 264 + return error.InsertError; 265 + } errdefer c.mdb_txn_abort(txn); 266 + 267 + try self.insertInTxn(txn.?, entry, name, version); 268 + if (c.mdb_txn_commit(txn) != 0) return error.InsertError; 269 + } 270 + 271 + fn insertInTxn(self: *CacheDB, txn: *c.MDB_txn, entry: *const CacheEntry, name: ?[]const u8, version: ?[]const u8) !void { 272 + const value_size = @sizeOf(SerializedEntry) + entry.path.len; 273 + const value_buf = try self.allocator.alloc(u8, value_size); 274 + defer self.allocator.free(value_buf); 275 + 276 + const header: *SerializedEntry = @ptrCast(@alignCast(value_buf.ptr)); 277 + header.* = .{ 278 + .unpacked_size = entry.unpacked_size, 279 + .file_count = entry.file_count, 280 + .cached_at = entry.cached_at, 281 + .path_len = @intCast(entry.path.len), 282 + }; 283 + 284 + @memcpy(value_buf[@sizeOf(SerializedEntry)..], entry.path); 285 + const key_bytes = makeIntegrityKey(&entry.integrity); 286 + 287 + var key = c.MDB_val{ 288 + .mv_size = key_bytes.len, 289 + .mv_data = @constCast(&key_bytes), 290 + }; 291 + 292 + var value = c.MDB_val{ 293 + .mv_size = value_size, 294 + .mv_data = value_buf.ptr, 295 + }; 296 + 297 + if (c.mdb_put(txn, self.dbi_primary, &key, &value, 0) != 0) { 298 + return error.InsertError; 299 + } 300 + 301 + if (name != null and version != null) { 302 + const name_key = try makeNameKey(self.allocator, name.?, version.?); 303 + defer self.allocator.free(name_key); 304 + 305 + var sec_key = c.MDB_val{ 306 + .mv_size = name_key.len, 307 + .mv_data = @constCast(name_key.ptr), 308 + }; 309 + var sec_value = c.MDB_val{ 310 + .mv_size = 64, 311 + .mv_data = @constCast(&entry.integrity), 312 + }; 313 + 314 + if (c.mdb_put(txn, self.dbi_secondary, &sec_key, &sec_value, 0) != 0) { 315 + return error.InsertError; 316 + } 317 + } 318 + } 319 + 320 + pub fn batchInsert(self: *CacheDB, entries: []const CacheEntry) !void { 321 + var txn: ?*c.MDB_txn = null; 322 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 323 + return error.InsertError; 324 + } errdefer c.mdb_txn_abort(txn); 325 + 326 + for (entries) |*entry| { 327 + try self.insertInTxn(txn.?, entry, null, null); 328 + } if (c.mdb_txn_commit(txn) != 0) return error.InsertError; 329 + } 330 + 331 + pub const NamedCacheEntry = struct { 332 + entry: CacheEntry, 333 + name: []const u8, 334 + version: []const u8, 335 + }; 336 + 337 + pub fn batchInsertNamed(self: *CacheDB, entries: []const NamedCacheEntry) !void { 338 + if (entries.len == 0) return; 339 + 340 + var txn: ?*c.MDB_txn = null; 341 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 342 + return error.InsertError; 343 + } errdefer c.mdb_txn_abort(txn); 344 + 345 + for (entries) |item| { 346 + self.insertInTxn(txn.?, &item.entry, item.name, item.version) catch continue; 347 + } if (c.mdb_txn_commit(txn) != 0) return error.InsertError; 348 + } 349 + 350 + pub fn delete(self: *CacheDB, integrity: *const [64]u8) !void { 351 + var txn: ?*c.MDB_txn = null; 352 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 353 + return error.DeleteError; 354 + } errdefer c.mdb_txn_abort(txn); 355 + 356 + const key_bytes = makeIntegrityKey(integrity); 357 + var key = c.MDB_val{ 358 + .mv_size = key_bytes.len, 359 + .mv_data = @constCast(&key_bytes), 360 + }; 361 + 362 + _ = c.mdb_del(txn, self.dbi_primary, &key, null); 363 + if (c.mdb_txn_commit(txn) != 0) return error.DeleteError; 364 + } 365 + 366 + pub fn getPackagePath(self: *CacheDB, integrity: *const [64]u8, allocator: std.mem.Allocator) ![]u8 { 367 + const hex = std.fmt.bytesToHex(integrity.*, .lower); 368 + return std.fmt.allocPrint(allocator, "{s}/cache/{s}", .{ self.cache_dir, hex }); 369 + } 370 + 371 + pub fn sync(self: *CacheDB) void { 372 + _ = c.mdb_env_sync(self.env, 1); 373 + } 374 + 375 + pub fn stats(self: *CacheDB) !struct { entries: usize, db_size: usize, cache_size: usize } { 376 + var txn: ?*c.MDB_txn = null; 377 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 378 + return error.DatabaseError; 379 + } defer c.mdb_txn_abort(txn); 380 + 381 + var db_stat: c.MDB_stat = undefined; 382 + _ = c.mdb_stat(txn, self.dbi_primary, &db_stat); 383 + 384 + var env_info: c.MDB_envinfo = undefined; 385 + _ = c.mdb_env_info(self.env, &env_info); 386 + 387 + const db_size = self.getDbFileSize(); 388 + const cache_size = self.calculateCacheSize(); 389 + 390 + return .{ 391 + .entries = db_stat.ms_entries, 392 + .db_size = db_size, 393 + .cache_size = cache_size, 394 + }; 395 + } 396 + 397 + const BLOCK_SIZE: usize = 4096; 398 + 399 + inline fn alignToBlock(size: u64) usize { 400 + const s: usize = @intCast(size); 401 + return ((s + BLOCK_SIZE - 1) / BLOCK_SIZE) * BLOCK_SIZE; 402 + } 403 + 404 + fn getDbFileSize(self: *CacheDB) usize { 405 + const db_path = std.fmt.allocPrint(self.allocator, "{s}/index.lmdb", .{self.cache_dir}) catch return 0; 406 + defer self.allocator.free(db_path); 407 + 408 + const stat = std.fs.cwd().statFile(db_path) catch return 0; 409 + return alignToBlock(stat.size); 410 + } 411 + 412 + fn calculateCacheSize(self: *CacheDB) usize { 413 + const cache_path = std.fmt.allocPrint(self.allocator, "{s}/cache", .{self.cache_dir}) catch return 0; 414 + defer self.allocator.free(cache_path); 415 + 416 + var total: usize = 0; 417 + var dir = std.fs.cwd().openDir(cache_path, .{ .iterate = true }) catch return 0; 418 + defer dir.close(); 419 + 420 + var iter = dir.iterate(); 421 + while (iter.next() catch null) |entry| { 422 + if (entry.kind == .directory) { 423 + total += self.getDirSize(dir, entry.name); 424 + } else if (entry.kind == .file) { 425 + const stat = dir.statFile(entry.name) catch continue; 426 + total += alignToBlock(stat.size); 427 + } 428 + } 429 + return total; 430 + } 431 + 432 + fn getDirSize(self: *CacheDB, parent: std.fs.Dir, name: []const u8) usize { 433 + var subdir = parent.openDir(name, .{ .iterate = true }) catch return 0; 434 + defer subdir.close(); 435 + 436 + var total: usize = 0; 437 + var iter = subdir.iterate(); 438 + while (iter.next() catch null) |entry| { 439 + if (entry.kind == .directory) { 440 + total += self.getDirSize(subdir, entry.name); 441 + } else if (entry.kind == .file) { 442 + const stat = subdir.statFile(entry.name) catch continue; 443 + total += alignToBlock(stat.size); 444 + } 445 + } 446 + return total; 447 + } 448 + 449 + fn makeMetadataKey(allocator: std.mem.Allocator, name: []const u8) ![]u8 { 450 + return std.fmt.allocPrint(allocator, "m:{s}", .{name}); 451 + } 452 + 453 + pub fn lookupMetadata(self: *CacheDB, name: []const u8, allocator: std.mem.Allocator) ?[]u8 { 454 + var txn: ?*c.MDB_txn = null; 455 + if (c.mdb_txn_begin(self.env, null, c.MDB_RDONLY, &txn) != 0) { 456 + return null; 457 + } defer c.mdb_txn_abort(txn); 458 + 459 + const meta_key = makeMetadataKey(self.allocator, name) catch return null; 460 + defer self.allocator.free(meta_key); 461 + 462 + var key = c.MDB_val{ 463 + .mv_size = meta_key.len, 464 + .mv_data = @constCast(meta_key.ptr), 465 + }; 466 + var value: c.MDB_val = undefined; 467 + 468 + if (c.mdb_get(txn, self.dbi_metadata, &key, &value) != 0) return null; 469 + if (value.mv_size < @sizeOf(i64)) return null; 470 + 471 + const data: [*]const u8 = @ptrCast(value.mv_data); 472 + var cached_at: i64 = undefined; 473 + @memcpy(std.mem.asBytes(&cached_at), data[0..@sizeOf(i64)]); 474 + 475 + const now = std.time.timestamp(); 476 + if (now - cached_at > METADATA_TTL_SECS) return null; 477 + 478 + const json_data = data[@sizeOf(i64)..value.mv_size]; 479 + return allocator.dupe(u8, json_data) catch null; 480 + } 481 + 482 + pub fn insertMetadata(self: *CacheDB, name: []const u8, json_data: []const u8) !void { 483 + var stripped_len: usize = 0; 484 + const stripped_ptr = strip_npm_metadata(json_data.ptr, json_data.len, &stripped_len); 485 + defer if (stripped_ptr) |p| strip_metadata_free(p); 486 + 487 + const data_to_store = if (stripped_ptr) |p| p[0..stripped_len] else json_data; 488 + 489 + var txn: ?*c.MDB_txn = null; 490 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) { 491 + return error.InsertError; 492 + } 493 + errdefer c.mdb_txn_abort(txn); 494 + 495 + const meta_key = try makeMetadataKey(self.allocator, name); 496 + defer self.allocator.free(meta_key); 497 + 498 + const value_size = @sizeOf(i64) + data_to_store.len; 499 + const value_buf = try self.allocator.alloc(u8, value_size); 500 + defer self.allocator.free(value_buf); 501 + 502 + const now: i64 = std.time.timestamp(); 503 + @memcpy(value_buf[0..@sizeOf(i64)], std.mem.asBytes(&now)); 504 + @memcpy(value_buf[@sizeOf(i64)..], data_to_store); 505 + 506 + var key = c.MDB_val{ 507 + .mv_size = meta_key.len, 508 + .mv_data = @constCast(meta_key.ptr), 509 + }; 510 + 511 + var value = c.MDB_val{ 512 + .mv_size = value_size, 513 + .mv_data = value_buf.ptr, 514 + }; 515 + 516 + if (c.mdb_put(txn, self.dbi_metadata, &key, &value, 0) != 0) return error.InsertError; 517 + if (c.mdb_txn_commit(txn) != 0) return error.InsertError; 518 + } 519 + 520 + const PruneCollections = struct { 521 + keys: std.ArrayListUnmanaged([66]u8) = .{}, 522 + paths: std.ArrayListUnmanaged([]const u8) = .{}, 523 + 524 + fn deinit(self: *PruneCollections, allocator: std.mem.Allocator) void { 525 + for (self.paths.items) |p| allocator.free(p); 526 + self.paths.deinit(allocator); 527 + self.keys.deinit(allocator); 528 + } 529 + }; 530 + 531 + inline fn collectExpiredEntries( 532 + self: *CacheDB, 533 + txn: *c.MDB_txn, 534 + cutoff: i64, 535 + collections: *PruneCollections, 536 + ) !void { 537 + var cursor: ?*c.MDB_cursor = null; 538 + if (c.mdb_cursor_open(txn, self.dbi_primary, &cursor) != 0) return error.DatabaseError; 539 + defer c.mdb_cursor_close(cursor); 540 + 541 + var key: c.MDB_val = undefined; 542 + var value: c.MDB_val = undefined; 543 + var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST); 544 + 545 + while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) { 546 + if (value.mv_size < @sizeOf(SerializedEntry)) continue; 547 + 548 + const data: [*]const u8 = @ptrCast(value.mv_data); 549 + var header: SerializedEntry = undefined; 550 + @memcpy(std.mem.asBytes(&header), data[0..@sizeOf(SerializedEntry)]); 551 + 552 + if (header.cached_at >= cutoff) continue; 553 + if (key.mv_size != 66) continue; 554 + 555 + const key_data: [*]const u8 = @ptrCast(key.mv_data); 556 + var key_copy: [66]u8 = undefined; 557 + @memcpy(&key_copy, key_data[0..66]); 558 + collections.keys.append(self.allocator, key_copy) catch continue; 559 + 560 + const path_start = @sizeOf(SerializedEntry); 561 + if (value.mv_size >= path_start + header.path_len) { 562 + const path = self.allocator.dupe(u8, data[path_start..][0..header.path_len]) catch continue; 563 + collections.paths.append(self.allocator, path) catch self.allocator.free(path); 564 + } 565 + } 566 + } 567 + 568 + inline fn deletePrimaryEntries(self: *CacheDB, txn: *c.MDB_txn, keys: []const [66]u8) u32 { 569 + var pruned: u32 = 0; 570 + for (keys) |*key_bytes| { 571 + var del_key = c.MDB_val{ .mv_size = 66, .mv_data = @constCast(key_bytes) }; 572 + if (c.mdb_del(txn, self.dbi_primary, &del_key, null) == 0) pruned += 1; 573 + } 574 + return pruned; 575 + } 576 + 577 + inline fn pruneStaleSecondaryEntries(self: *CacheDB, txn: *c.MDB_txn) void { 578 + var cursor: ?*c.MDB_cursor = null; 579 + if (c.mdb_cursor_open(txn, self.dbi_secondary, &cursor) != 0) return; 580 + defer c.mdb_cursor_close(cursor); 581 + 582 + var to_delete = std.ArrayListUnmanaged([]u8){}; 583 + defer { 584 + for (to_delete.items) |k| self.allocator.free(k); 585 + to_delete.deinit(self.allocator); 586 + } 587 + 588 + var key: c.MDB_val = undefined; 589 + var value: c.MDB_val = undefined; 590 + var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST); 591 + 592 + while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) { 593 + if (value.mv_size != 64) continue; 594 + 595 + const integrity: *const [64]u8 = @ptrCast(value.mv_data); 596 + const int_key = makeIntegrityKey(integrity); 597 + var check_key = c.MDB_val{ .mv_size = int_key.len, .mv_data = @constCast(&int_key) }; 598 + var check_val: c.MDB_val = undefined; 599 + 600 + if (c.mdb_get(txn, self.dbi_primary, &check_key, &check_val) == 0) continue; 601 + 602 + const key_data: [*]const u8 = @ptrCast(key.mv_data); 603 + const key_copy = self.allocator.dupe(u8, key_data[0..key.mv_size]) catch continue; 604 + to_delete.append(self.allocator, key_copy) catch self.allocator.free(key_copy); 605 + } 606 + 607 + for (to_delete.items) |sec_key| { 608 + var del_key = c.MDB_val{ .mv_size = sec_key.len, .mv_data = @ptrCast(sec_key.ptr) }; 609 + _ = c.mdb_del(txn, self.dbi_secondary, &del_key, null); 610 + } 611 + } 612 + 613 + inline fn deletePackageFiles(paths: []const []const u8) void { 614 + for (paths) |path| std.fs.cwd().deleteTree(path) catch {}; 615 + } 616 + 617 + inline fn pruneExpiredMetadata(self: *CacheDB, txn: *c.MDB_txn) void { 618 + const now = std.time.timestamp(); 619 + 620 + var cursor: ?*c.MDB_cursor = null; 621 + if (c.mdb_cursor_open(txn, self.dbi_metadata, &cursor) != 0) return; 622 + defer c.mdb_cursor_close(cursor); 623 + 624 + var to_delete = std.ArrayListUnmanaged([]u8){}; 625 + defer { 626 + for (to_delete.items) |k| self.allocator.free(k); 627 + to_delete.deinit(self.allocator); 628 + } 629 + 630 + var key: c.MDB_val = undefined; 631 + var value: c.MDB_val = undefined; 632 + var rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_FIRST); 633 + 634 + while (rc == 0) : (rc = c.mdb_cursor_get(cursor, &key, &value, c.MDB_NEXT)) { 635 + if (value.mv_size < @sizeOf(i64)) continue; 636 + 637 + const data: [*]const u8 = @ptrCast(value.mv_data); 638 + var cached_at: i64 = undefined; 639 + @memcpy(std.mem.asBytes(&cached_at), data[0..@sizeOf(i64)]); 640 + 641 + if (now - cached_at <= METADATA_TTL_SECS) continue; 642 + 643 + const key_data: [*]const u8 = @ptrCast(key.mv_data); 644 + const key_copy = self.allocator.dupe(u8, key_data[0..key.mv_size]) catch continue; 645 + to_delete.append(self.allocator, key_copy) catch self.allocator.free(key_copy); 646 + } 647 + 648 + for (to_delete.items) |meta_key| { 649 + var del_key = c.MDB_val{ .mv_size = meta_key.len, .mv_data = @ptrCast(meta_key.ptr) }; 650 + _ = c.mdb_del(txn, self.dbi_metadata, &del_key, null); 651 + } 652 + } 653 + 654 + pub fn prune(self: *CacheDB, max_age_days: u32) !u32 { 655 + const now = std.time.timestamp(); 656 + const max_age_secs: i64 = @as(i64, max_age_days) * 24 * 60 * 60; 657 + const cutoff = now - max_age_secs; 658 + 659 + var txn: ?*c.MDB_txn = null; 660 + if (c.mdb_txn_begin(self.env, null, 0, &txn) != 0) return error.DatabaseError; 661 + errdefer c.mdb_txn_abort(txn); 662 + 663 + var collections = PruneCollections{}; 664 + defer collections.deinit(self.allocator); 665 + 666 + try self.collectExpiredEntries(txn.?, cutoff, &collections); 667 + const pruned = self.deletePrimaryEntries(txn.?, collections.keys.items); 668 + 669 + self.pruneStaleSecondaryEntries(txn.?); 670 + self.pruneExpiredMetadata(txn.?); 671 + 672 + if (c.mdb_txn_commit(txn) != 0) return error.DatabaseError; 673 + deletePackageFiles(collections.paths.items); 674 + 675 + return pruned; 676 + } 677 + };
+28
src/pkg/debug.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + 4 + pub var enabled: bool = false; 5 + 6 + pub fn log(comptime fmt: []const u8, args: anytype) void { 7 + if (!enabled) return; 8 + 9 + var buf: [2048]u8 = undefined; 10 + const msg = std.fmt.bufPrint(&buf, "[pkg] " ++ fmt ++ "\n", args) catch return; 11 + 12 + if (comptime builtin.os.tag == .windows) { 13 + const handle = std.os.windows.GetStdHandle(std.os.windows.STD_ERROR_HANDLE) catch return; 14 + _ = std.os.windows.WriteFile(handle, msg, null) catch {}; 15 + } else _ = std.c.write(2, msg.ptr, msg.len); 16 + } 17 + 18 + pub fn timer(comptime label: []const u8, start: u64) u64 { 19 + if (!enabled) return start; 20 + 21 + const now = std.time.nanoTimestamp(); 22 + const elapsed_ns: u64 = @intCast(now - @as(i128, start)); 23 + const elapsed_ms = elapsed_ns / 1_000_000; 24 + 25 + log("{s}: {d}ms", .{ label, elapsed_ms }); 26 + 27 + return @intCast(now); 28 + }
+507
src/pkg/extractor.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + const linker = @import("linker.zig"); 4 + 5 + const c = @cImport({ 6 + @cInclude("zlib-ng.h"); 7 + }); 8 + 9 + pub const ExtractError = error{ 10 + DecompressionFailed, 11 + InvalidTarHeader, 12 + IoError, 13 + OutOfMemory, 14 + PathTooLong, 15 + UnsupportedFormat, 16 + InvalidPath, 17 + }; 18 + 19 + inline fn validateBasic(path: []const u8) ExtractError!void { 20 + if (path.len == 0 or path.len > 4096) return error.InvalidPath; 21 + if (path[0] == '/') return error.InvalidPath; 22 + } 23 + 24 + inline fn validateBadCharsAndTraversal(path: []const u8) ExtractError!void { 25 + const len = path.len; 26 + var i: usize = 0; var segment_start: usize = 0; 27 + 28 + while (i < len) : (i += 1) { 29 + const ch = path[i]; 30 + if (ch == 0 or ch == '\\' or ch < 0x20) return error.InvalidPath; 31 + if (ch == '/') { 32 + const seg_len = i - segment_start; if (seg_len == 2) { 33 + const seg = path[segment_start..i]; 34 + if (seg[0] == '.' and seg[1] == '.') return error.InvalidPath; 35 + } segment_start = i + 1; 36 + } 37 + } 38 + 39 + const final_len = len - segment_start; if (final_len == 2) { 40 + const seg = path[segment_start..]; 41 + if (seg[0] == '.' and seg[1] == '.') return error.InvalidPath; 42 + } 43 + } 44 + 45 + inline fn isWindowsReserved(name: []const u8) bool { 46 + const reserved = [_][]const u8{ 47 + "CON", "PRN", "AUX", "NUL", 48 + "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", 49 + "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", 50 + }; 51 + 52 + for (reserved) |r| { 53 + if (name.len < r.len) continue; 54 + const prefix = name[0..r.len]; 55 + if (!std.ascii.eqlIgnoreCase(prefix, r)) continue; 56 + return name.len == r.len or name[r.len] == '.'; 57 + } 58 + 59 + return false; 60 + } 61 + 62 + inline fn validateWindowsReserved(path: []const u8) ExtractError!void { 63 + if (comptime builtin.os.tag != .windows) return; 64 + 65 + const slash_idx = std.mem.lastIndexOfScalar(u8, path, '/'); 66 + const basename = if (slash_idx) |i| path[i + 1 ..] else path; 67 + if (basename.len == 0) return error.InvalidPath; 68 + 69 + const first = std.ascii.toUpper(basename[0]); 70 + const should_check = first == 'C' or first == 'P' or first == 'A' or first == 'N' or first == 'L'; 71 + if (should_check and isWindowsReserved(basename)) return error.InvalidPath; 72 + } 73 + 74 + fn validatePath(path: []const u8) ExtractError!void { 75 + try validateBasic(path); 76 + try validateBadCharsAndTraversal(path); 77 + try validateWindowsReserved(path); 78 + } 79 + 80 + pub const TarHeader = extern struct { 81 + name: [100]u8, 82 + mode: [8]u8, 83 + uid: [8]u8, 84 + gid: [8]u8, 85 + size: [12]u8, 86 + mtime: [12]u8, 87 + checksum: [8]u8, 88 + typeflag: u8, 89 + linkname: [100]u8, 90 + magic: [6]u8, 91 + version: [2]u8, 92 + uname: [32]u8, 93 + gname: [32]u8, 94 + devmajor: [8]u8, 95 + devminor: [8]u8, 96 + prefix: [155]u8, 97 + _padding: [12]u8, 98 + 99 + comptime { 100 + std.debug.assert(@sizeOf(TarHeader) == 512); 101 + } 102 + 103 + pub fn isZero(self: *const TarHeader) bool { 104 + const bytes: *const [512]u8 = @ptrCast(self); 105 + for (bytes) |b| if (b != 0) return false; 106 + return true; 107 + } 108 + 109 + pub fn getName(self: *const TarHeader, buf: []u8) []const u8 { 110 + const prefix_len = std.mem.indexOfScalar(u8, &self.prefix, 0) orelse self.prefix.len; 111 + const name_len = std.mem.indexOfScalar(u8, &self.name, 0) orelse self.name.len; 112 + 113 + if (prefix_len > 0) { 114 + @memcpy(buf[0..prefix_len], self.prefix[0..prefix_len]); 115 + buf[prefix_len] = '/'; 116 + @memcpy(buf[prefix_len + 1 ..][0..name_len], self.name[0..name_len]); 117 + return buf[0 .. prefix_len + 1 + name_len]; 118 + } 119 + 120 + return self.name[0..name_len]; 121 + } 122 + 123 + pub fn getSize(self: *const TarHeader) !u64 { 124 + const size_str = std.mem.trimRight(u8, &self.size, &[_]u8{ 0, ' ' }); 125 + return std.fmt.parseInt(u64, size_str, 8) catch return error.InvalidTarHeader; 126 + } 127 + 128 + pub fn getMode(self: *const TarHeader) !u32 { 129 + const mode_str = std.mem.trimRight(u8, &self.mode, &[_]u8{ 0, ' ' }); 130 + return std.fmt.parseInt(u32, mode_str, 8) catch return error.InvalidTarHeader; 131 + } 132 + 133 + pub fn isFile(self: *const TarHeader) bool { 134 + return self.typeflag == '0' or self.typeflag == 0; 135 + } 136 + 137 + pub fn isDirectory(self: *const TarHeader) bool { 138 + return self.typeflag == '5'; 139 + } 140 + 141 + pub fn isSymlink(self: *const TarHeader) bool { 142 + return self.typeflag == '2'; 143 + } 144 + }; 145 + 146 + pub const GzipDecompressor = struct { 147 + stream: c.zng_stream, 148 + initialized: bool, 149 + allocator: std.mem.Allocator, 150 + 151 + pub fn init(allocator: std.mem.Allocator) !*GzipDecompressor { 152 + const self = try allocator.create(GzipDecompressor); 153 + errdefer allocator.destroy(self); 154 + 155 + self.allocator = allocator; 156 + self.stream = std.mem.zeroes(c.zng_stream); 157 + self.initialized = false; 158 + 159 + const ret = c.zng_inflateInit2(&self.stream, 15 + 32); 160 + if (ret != c.Z_OK) { 161 + allocator.destroy(self); 162 + return error.DecompressionFailed; 163 + } 164 + 165 + self.initialized = true; 166 + return self; 167 + } 168 + 169 + pub fn deinit(self: *GzipDecompressor) void { 170 + if (self.initialized) _ = c.zng_inflateEnd(&self.stream); 171 + self.allocator.destroy(self); 172 + } 173 + 174 + pub fn decompress( 175 + self: *GzipDecompressor, 176 + input: []const u8, 177 + output_fn: *const fn (data: []const u8, user_data: ?*anyopaque) anyerror!void, 178 + user_data: ?*anyopaque, 179 + ) !bool { 180 + var output_buf: [256 * 1024]u8 = undefined; 181 + 182 + self.stream.next_in = @constCast(input.ptr); 183 + self.stream.avail_in = @intCast(input.len); 184 + 185 + while (self.stream.avail_in > 0) { 186 + self.stream.next_out = &output_buf; 187 + self.stream.avail_out = output_buf.len; 188 + 189 + const ret = c.zng_inflate(&self.stream, c.Z_NO_FLUSH); 190 + 191 + if (ret == c.Z_STREAM_END) { 192 + const produced = output_buf.len - self.stream.avail_out; 193 + if (produced > 0) { 194 + try output_fn(output_buf[0..produced], user_data); 195 + } return true; 196 + } 197 + 198 + if (ret != c.Z_OK) return error.DecompressionFailed; 199 + const produced = output_buf.len - self.stream.avail_out; 200 + if (produced > 0) try output_fn(output_buf[0..produced], user_data); 201 + } 202 + 203 + return false; 204 + } 205 + }; 206 + 207 + pub const TarParser = struct { 208 + state: State, 209 + header: TarHeader, 210 + header_bytes_read: usize, 211 + current_file_remaining: u64, 212 + skip_bytes: usize, 213 + strip_prefix: [128]u8, 214 + strip_prefix_len: usize, 215 + prefix_detected: bool, 216 + path_buf: [256]u8, 217 + 218 + const State = enum { 219 + read_header, 220 + read_file_data, 221 + skip_padding, 222 + }; 223 + 224 + pub fn init(default_prefix: []const u8) TarParser { 225 + var prefix_buf: [128]u8 = undefined; 226 + const len = @min(default_prefix.len, 128); 227 + @memcpy(prefix_buf[0..len], default_prefix[0..len]); 228 + return .{ 229 + .state = .read_header, 230 + .header = undefined, 231 + .header_bytes_read = 0, 232 + .current_file_remaining = 0, 233 + .skip_bytes = 0, 234 + .strip_prefix = prefix_buf, 235 + .strip_prefix_len = len, 236 + .prefix_detected = false, 237 + .path_buf = undefined, 238 + }; 239 + } 240 + 241 + pub const Entry = struct { 242 + path: []const u8, 243 + mode: u32, 244 + size: u64, 245 + entry_type: Type, 246 + 247 + pub const Type = enum { 248 + file, 249 + directory, 250 + symlink, 251 + }; 252 + }; 253 + 254 + pub const ParseResult = struct { 255 + kind: Kind, 256 + consumed: usize, 257 + 258 + pub const Kind = union(enum) { 259 + need_more_data, 260 + entry: Entry, 261 + file_data: []const u8, 262 + end_of_archive, 263 + err: ExtractError, 264 + }; 265 + }; 266 + 267 + pub fn feed(self: *TarParser, data: []const u8) ParseResult { 268 + switch (self.state) { 269 + .read_header => { 270 + const needed = @sizeOf(TarHeader) - self.header_bytes_read; 271 + const to_copy = @min(needed, data.len); 272 + 273 + const header_bytes: *[512]u8 = @ptrCast(&self.header); 274 + @memcpy(header_bytes[self.header_bytes_read..][0..to_copy], data[0..to_copy]); 275 + self.header_bytes_read += to_copy; 276 + 277 + if (self.header_bytes_read < @sizeOf(TarHeader)) { 278 + return .{ .kind = .need_more_data, .consumed = to_copy }; 279 + } self.header_bytes_read = 0; 280 + 281 + if (self.header.isZero()) { 282 + return .{ .kind = .end_of_archive, .consumed = to_copy }; 283 + } var path = self.header.getName(&self.path_buf); 284 + 285 + if (!self.prefix_detected and self.header.isDirectory()) { 286 + const prefix_len = @min(path.len, 128); 287 + @memcpy(self.strip_prefix[0..prefix_len], path[0..prefix_len]); 288 + self.strip_prefix_len = prefix_len; 289 + self.prefix_detected = true; 290 + } 291 + 292 + const prefix = self.strip_prefix[0..self.strip_prefix_len]; 293 + if (std.mem.startsWith(u8, path, prefix)) { 294 + path = path[self.strip_prefix_len..]; 295 + } 296 + 297 + if (path.len > 0) validatePath(path) catch { 298 + return .{ .kind = .{ .err = ExtractError.InvalidPath }, .consumed = to_copy }; 299 + }; 300 + 301 + const size = self.header.getSize() catch return .{ .kind = .{ .err = ExtractError.InvalidTarHeader }, .consumed = to_copy }; 302 + const mode = self.header.getMode() catch return .{ .kind = .{ .err = ExtractError.InvalidTarHeader }, .consumed = to_copy }; 303 + 304 + const entry_type: Entry.Type = if (self.header.isDirectory()) .directory 305 + else if (self.header.isSymlink()) .symlink 306 + else .file; 307 + 308 + self.current_file_remaining = size; 309 + if (size > 0) { 310 + self.state = .read_file_data; 311 + } else self.state = .read_header; 312 + 313 + const entry: Entry = .{ 314 + .path = path, 315 + .mode = mode, 316 + .size = size, 317 + .entry_type = entry_type, 318 + }; 319 + 320 + return .{ .consumed = to_copy, .kind = .{ .entry = entry } }; 321 + }, 322 + 323 + .read_file_data => { 324 + const to_read: usize = @min(self.current_file_remaining, data.len); 325 + self.current_file_remaining -= to_read; 326 + 327 + if (self.current_file_remaining == 0) { 328 + const size = self.header.getSize() catch return .{ .kind = .{ .err = ExtractError.InvalidTarHeader }, .consumed = to_read }; 329 + const padding = (512 - (size % 512)) % 512; 330 + if (padding > 0) { 331 + self.skip_bytes = @intCast(padding); 332 + self.state = .skip_padding; 333 + } else self.state = .read_header; 334 + } 335 + 336 + return .{ .kind = .{ .file_data = data[0..to_read] }, .consumed = to_read }; 337 + }, 338 + 339 + .skip_padding => { 340 + const to_skip = @min(self.skip_bytes, data.len); 341 + self.skip_bytes -= to_skip; 342 + 343 + if (self.skip_bytes == 0) { 344 + self.state = .read_header; 345 + } 346 + 347 + if (data.len > to_skip) { 348 + const next = self.feed(data[to_skip..]); 349 + return .{ .kind = next.kind, .consumed = to_skip + next.consumed }; 350 + } 351 + return .{ .kind = .need_more_data, .consumed = to_skip }; 352 + }, 353 + } 354 + } 355 + 356 + pub fn reset(self: *TarParser) void { self.* = TarParser.init(self.strip_prefix[0..self.strip_prefix_len]); } 357 + }; 358 + 359 + pub const Extractor = struct { 360 + allocator: std.mem.Allocator, 361 + output_dir: std.fs.Dir, 362 + parser: TarParser, 363 + decompressor: *GzipDecompressor, 364 + current_file: ?std.fs.File, 365 + current_file_path: [256]u8, 366 + current_file_path_len: usize, 367 + current_file_mode: u32, 368 + files_extracted: u32, 369 + bytes_extracted: u64, 370 + 371 + pub fn init(allocator: std.mem.Allocator, output_path: []const u8) !*Extractor { 372 + const extractor = try allocator.create(Extractor); 373 + errdefer allocator.destroy(extractor); 374 + 375 + std.fs.cwd().makePath(output_path) catch |err| switch (err) { 376 + error.PathAlreadyExists => {}, 377 + else => return error.IoError, 378 + }; 379 + 380 + const decompressor = try GzipDecompressor.init(allocator); 381 + errdefer decompressor.deinit(); 382 + 383 + extractor.* = .{ 384 + .allocator = allocator, 385 + .output_dir = try std.fs.cwd().openDir(output_path, .{}), 386 + .parser = TarParser.init("package/"), 387 + .decompressor = decompressor, 388 + .current_file = null, 389 + .current_file_path = undefined, 390 + .current_file_path_len = 0, 391 + .current_file_mode = 0o644, 392 + .files_extracted = 0, 393 + .bytes_extracted = 0, 394 + }; 395 + 396 + return extractor; 397 + } 398 + 399 + pub fn deinit(self: *Extractor) void { 400 + if (self.current_file) |f| { 401 + f.close(); 402 + self.applyFileMode(); 403 + } 404 + self.output_dir.close(); 405 + self.decompressor.deinit(); 406 + self.allocator.destroy(self); 407 + } 408 + 409 + fn applyFileMode(self: *Extractor) void { 410 + if (self.current_file_path_len == 0) return; 411 + 412 + if (comptime builtin.os.tag != .windows) { 413 + if (self.current_file_mode & 0o111 != 0) { 414 + const path = self.current_file_path[0..self.current_file_path_len]; 415 + var path_buf: [257]u8 = undefined; 416 + @memcpy(path_buf[0..path.len], path); 417 + path_buf[path.len] = 0; 418 + const path_z: [*:0]const u8 = path_buf[0..path.len :0]; 419 + _ = std.c.fchmodat(self.output_dir.fd, path_z, @intCast(self.current_file_mode & 0o777), 0); 420 + } 421 + } 422 + self.current_file_path_len = 0; 423 + } 424 + 425 + pub fn feedCompressed(self: *Extractor, data: []const u8) !void { 426 + _ = try self.decompressor.decompress(data, handleDecompressed, self); 427 + } 428 + 429 + fn handleDecompressed(data: []const u8, user_data: ?*anyopaque) !void { 430 + const self: *Extractor = @ptrCast(@alignCast(user_data)); 431 + try self.feedTar(data); 432 + } 433 + 434 + pub fn feedTar(self: *Extractor, data: []const u8) !void { 435 + var remaining = data; 436 + while (remaining.len > 0) { 437 + const result = self.parser.feed(remaining); 438 + remaining = remaining[result.consumed..]; 439 + switch (result.kind) { 440 + .need_more_data => return, 441 + .entry => |entry| try self.handleEntry(entry), 442 + .file_data => |d| try self.writeFileData(d), 443 + .end_of_archive => return self.closeCurrentFile(), 444 + .err => |e| return e, 445 + } 446 + } 447 + } 448 + 449 + inline fn handleEntry(self: *Extractor, entry: TarParser.Entry) !void { 450 + if (entry.path.len == 0) return; 451 + switch (entry.entry_type) { 452 + .directory => self.output_dir.makePath(entry.path) catch {}, 453 + .file => try self.createFile(entry), 454 + .symlink => try self.createSymlink(entry), 455 + } 456 + } 457 + 458 + inline fn createFile(self: *Extractor, entry: TarParser.Entry) !void { 459 + self.closeCurrentFile(); 460 + if (std.fs.path.dirname(entry.path)) |dir| { 461 + try self.output_dir.makePath(dir); 462 + } 463 + self.current_file = try self.output_dir.createFile(entry.path, .{}); 464 + const len = @min(entry.path.len, 256); 465 + @memcpy(self.current_file_path[0..len], entry.path[0..len]); 466 + self.current_file_path_len = len; 467 + self.current_file_mode = entry.mode; 468 + self.files_extracted += 1; 469 + } 470 + 471 + inline fn createSymlink(self: *Extractor, entry: TarParser.Entry) !void { 472 + const linkname_len = std.mem.indexOfScalar(u8, &self.parser.header.linkname, 0) orelse self.parser.header.linkname.len; 473 + const target = self.parser.header.linkname[0..linkname_len]; 474 + 475 + if (entry.path.len == 0 or target.len == 0) return; 476 + try validatePath(target); 477 + 478 + if (std.fs.path.dirname(entry.path)) |dir| { 479 + try self.output_dir.makePath(dir); 480 + } 481 + 482 + self.output_dir.deleteFile(entry.path) catch {}; 483 + try linker.createSymlinkOrCopy(self.output_dir, target, entry.path); 484 + } 485 + 486 + inline fn writeFileData(self: *Extractor, data: []const u8) !void { 487 + if (self.current_file) |f| { 488 + try f.writeAll(data); 489 + self.bytes_extracted += data.len; 490 + } 491 + } 492 + 493 + inline fn closeCurrentFile(self: *Extractor) void { 494 + if (self.current_file) |f| { 495 + f.close(); 496 + self.applyFileMode(); 497 + self.current_file = null; 498 + } 499 + } 500 + 501 + pub fn stats(self: *const Extractor) struct { files: u32, bytes: u64 } { 502 + return .{ 503 + .files = self.files_extracted, 504 + .bytes = self.bytes_extracted, 505 + }; 506 + } 507 + };
+1594
src/pkg/fetcher.zig
··· 1 + const std = @import("std"); 2 + const c_allocator = std.heap.c_allocator; 3 + 4 + const debug = @import("debug.zig"); 5 + const extractor = @import("extractor.zig"); 6 + const uv = @import("uv.zig"); 7 + const tlsuv = @import("tlsuv.zig"); 8 + const nghttp2 = @import("nghttp2.zig"); 9 + 10 + const config = @import("config"); 11 + const user_agent: [:0]const u8 = "ant/" ++ config.version; 12 + 13 + pub const FetchError = error{ 14 + ConnectionFailed, 15 + TlsError, 16 + Http2Error, 17 + Timeout, 18 + InvalidUrl, 19 + ResponseError, 20 + OutOfMemory, 21 + }; 22 + 23 + pub const ParsedUrl = struct { 24 + scheme: []const u8, 25 + host: []const u8, 26 + port: u16, 27 + path: []const u8, 28 + 29 + pub fn parse(url: []const u8) !ParsedUrl { 30 + var remaining = url; 31 + const scheme_end = std.mem.indexOf(u8, remaining, "://") orelse return error.InvalidUrl; 32 + const scheme = remaining[0..scheme_end]; 33 + remaining = remaining[scheme_end + 3 ..]; 34 + 35 + const path_start = std.mem.indexOf(u8, remaining, "/") orelse remaining.len; 36 + const host_port = remaining[0..path_start]; 37 + remaining = if (path_start < remaining.len) remaining[path_start..] else "/"; 38 + 39 + var host: []const u8 = host_port; 40 + var port: u16 = if (std.mem.eql(u8, scheme, "https")) 443 else 80; 41 + 42 + if (std.mem.indexOf(u8, host_port, ":")) |colon| { 43 + host = host_port[0..colon]; 44 + port = std.fmt.parseInt(u16, host_port[colon + 1 ..], 10) catch return error.InvalidUrl; 45 + } 46 + 47 + return .{ .scheme = scheme, .host = host, .port = port, .path = remaining }; 48 + } 49 + }; 50 + 51 + pub const StreamHandler = struct { 52 + on_data: *const fn ([]const u8, ?*anyopaque) void, 53 + on_complete: *const fn (u16, ?*anyopaque) void, 54 + on_error: *const fn (FetchError, ?*anyopaque) void, 55 + user_data: ?*anyopaque, 56 + 57 + pub fn init( 58 + on_data: *const fn ([]const u8, ?*anyopaque) void, 59 + on_complete: *const fn (u16, ?*anyopaque) void, 60 + on_error: *const fn (FetchError, ?*anyopaque) void, 61 + user_data: ?*anyopaque, 62 + ) StreamHandler { 63 + return .{ .on_data = on_data, .on_complete = on_complete, .on_error = on_error, .user_data = user_data }; 64 + } 65 + }; 66 + 67 + const PendingRequest = struct { 68 + url: []const u8, 69 + handler: ?StreamHandler, 70 + }; 71 + 72 + const MAX_PENDING_REQUESTS = 20; 73 + const NUM_CONNECTIONS = 6; 74 + const NUM_META_CONNECTIONS = 3; 75 + const META_SLOW_LOG_MS: u64 = 250; 76 + 77 + const Http2Client = struct { 78 + allocator: std.mem.Allocator, 79 + loop: *uv.loop_t, 80 + tls: tlsuv.stream_t, 81 + h2_session: ?*nghttp2.session, 82 + host: [:0]const u8, 83 + use_tls: bool, 84 + connected: i32, 85 + connect_pending: bool, 86 + write_buf: std.ArrayListUnmanaged(u8), 87 + requests: [MAX_PENDING_REQUESTS]RequestState, 88 + request_count: usize, 89 + requests_done: usize, 90 + 91 + const RequestState = struct { 92 + stream_id: i32, 93 + path: ?[:0]const u8, 94 + on_data: ?*const fn ([]const u8, ?*anyopaque) void, 95 + on_complete: ?*const fn (u16, ?*anyopaque) void, 96 + on_error: ?*const fn (FetchError, ?*anyopaque) void, 97 + userdata: ?*anyopaque, 98 + response_body: std.ArrayListUnmanaged(u8), 99 + status_code: u16, 100 + done: bool, 101 + has_error: bool, 102 + start_ns: u64, 103 + end_ns: u64, 104 + bytes: usize, 105 + content_encoding: ContentEncoding, 106 + }; 107 + 108 + const ContentEncoding = enum { 109 + identity, 110 + gzip, 111 + }; 112 + 113 + const alpn_protocols = [_][*:0]const u8{ "h2", "http/1.1" }; 114 + 115 + pub fn init(allocator: std.mem.Allocator, host: []const u8, use_tls: bool) !*Http2Client { 116 + const client = try allocator.create(Http2Client); 117 + errdefer allocator.destroy(client); 118 + 119 + const host_z = try allocator.dupeZ(u8, host); 120 + errdefer allocator.free(host_z); 121 + 122 + client.* = .{ 123 + .allocator = allocator, 124 + .loop = uv.uv_default_loop(), 125 + .tls = .{}, 126 + .h2_session = null, 127 + .host = host_z, 128 + .use_tls = use_tls, 129 + .connected = 0, 130 + .connect_pending = false, 131 + .write_buf = .{}, 132 + .requests = undefined, 133 + .request_count = 0, 134 + .requests_done = 0, 135 + }; 136 + 137 + for (&client.requests) |*req| { 138 + req.* = .{ 139 + .stream_id = 0, 140 + .path = null, 141 + .on_data = null, 142 + .on_complete = null, 143 + .on_error = null, 144 + .userdata = null, 145 + .response_body = .{}, 146 + .status_code = 0, 147 + .done = false, 148 + .has_error = false, 149 + .start_ns = 0, 150 + .end_ns = 0, 151 + .bytes = 0, 152 + .content_encoding = .identity, 153 + }; 154 + } 155 + 156 + if (tlsuv.tlsuv_stream_init(client.loop, &client.tls, null) != 0) { 157 + allocator.free(host_z); 158 + allocator.destroy(client); 159 + return error.ConnectionFailed; 160 + } 161 + 162 + _ = tlsuv.tlsuv_stream_set_hostname(&client.tls, host_z.ptr); 163 + _ = tlsuv.tlsuv_stream_set_protocols(&client.tls, 2, &alpn_protocols); 164 + 165 + return client; 166 + } 167 + 168 + pub fn deinit(self: *Http2Client) void { 169 + for (&self.requests) |*req| { 170 + req.on_data = null; 171 + req.on_complete = null; 172 + req.on_error = null; 173 + req.userdata = null; 174 + } 175 + 176 + if (self.connected > 0) { 177 + self.tls.data = self; 178 + _ = tlsuv.tlsuv_stream_close(&self.tls, onStreamClose); 179 + while (self.connected > 0) _ = uv.uv_run(self.loop, uv.RUN_ONCE); 180 + } 181 + 182 + if (self.h2_session) |session| nghttp2.nghttp2_session_del(session); 183 + 184 + for (&self.requests) |*req| { 185 + if (req.stream_id != -1) { 186 + if (req.path) |p| self.allocator.free(p); 187 + req.response_body.deinit(self.allocator); 188 + } 189 + } 190 + 191 + self.write_buf.deinit(self.allocator); 192 + self.allocator.free(self.host); 193 + self.allocator.destroy(self); 194 + } 195 + 196 + pub fn resetRequests(self: *Http2Client) void { 197 + for (self.requests[0..self.request_count]) |*req| { 198 + if (req.stream_id != -1) { 199 + if (req.path) |p| self.allocator.free(p); 200 + req.response_body.deinit(self.allocator); 201 + } 202 + req.* = .{ 203 + .stream_id = 0, 204 + .path = null, 205 + .on_data = null, 206 + .on_complete = null, 207 + .on_error = null, 208 + .userdata = null, 209 + .response_body = .{}, 210 + .status_code = 0, 211 + .done = false, 212 + .has_error = false, 213 + .start_ns = 0, 214 + .end_ns = 0, 215 + .bytes = 0, 216 + .content_encoding = .identity, 217 + }; 218 + } 219 + self.request_count = 0; 220 + self.requests_done = 0; 221 + } 222 + 223 + pub fn hasCapacity(self: *const Http2Client) bool { 224 + for (self.requests[0..self.request_count]) |req| { 225 + if (req.stream_id == -1) return true; 226 + } 227 + return self.request_count < MAX_PENDING_REQUESTS - 1; 228 + } 229 + 230 + pub fn recycleCompletedRequests(self: *Http2Client) void { 231 + if (self.requests_done == 0) return; 232 + 233 + for (self.requests[0..self.request_count]) |*req| { 234 + if (req.done and req.stream_id != -1) { 235 + if (req.path) |p| self.allocator.free(p); 236 + req.response_body.deinit(self.allocator); 237 + req.path = null; 238 + req.response_body = .{}; 239 + req.stream_id = -1; 240 + } 241 + } 242 + } 243 + 244 + fn findOrAllocSlot(self: *Http2Client) ?*RequestState { 245 + for (self.requests[0..self.request_count]) |*req| { 246 + if (req.stream_id == -1) return req; 247 + } 248 + if (self.request_count < MAX_PENDING_REQUESTS) { 249 + const req = &self.requests[self.request_count]; 250 + self.request_count += 1; 251 + return req; 252 + } 253 + return null; 254 + } 255 + 256 + fn onStreamClose(handle: *uv.handle_t) callconv(.c) void { 257 + const tls: *tlsuv.stream_t = @ptrCast(@alignCast(handle)); 258 + const client: *Http2Client = @ptrCast(@alignCast(tls.data)); 259 + client.connected = -2; 260 + } 261 + 262 + fn findRequest(self: *Http2Client, stream_id: i32) ?*RequestState { 263 + for (self.requests[0..self.request_count]) |*req| if (req.stream_id == stream_id) return req; 264 + return null; 265 + } 266 + 267 + fn h2Send(_: ?*nghttp2.session, data: [*c]const u8, len: usize, _: c_int, ud: ?*anyopaque) callconv(.c) isize { 268 + const client: *Http2Client = @ptrCast(@alignCast(ud)); 269 + client.write_buf.appendSlice(client.allocator, data[0..len]) catch return nghttp2.ERR_NOMEM; 270 + return @intCast(len); 271 + } 272 + 273 + fn h2FrameRecv(_: ?*nghttp2.session, frame: *const nghttp2.frame, ud: ?*anyopaque) callconv(.c) c_int { 274 + const client: *Http2Client = @ptrCast(@alignCast(ud)); 275 + if (frame.hd.flags & nghttp2.FLAG_END_STREAM != 0) { 276 + if (client.findRequest(frame.hd.stream_id)) |req| { 277 + if (!req.done) { 278 + req.done = true; 279 + req.end_ns = @intCast(std.time.nanoTimestamp()); 280 + client.requests_done += 1; 281 + if (req.on_complete) |cb| cb(req.status_code, req.userdata); 282 + } 283 + } 284 + } 285 + return 0; 286 + } 287 + 288 + fn h2DataChunk(session: ?*nghttp2.session, _: u8, stream_id: i32, data: [*c]const u8, len: usize, ud: ?*anyopaque) callconv(.c) c_int { 289 + const client: *Http2Client = @ptrCast(@alignCast(ud)); 290 + const req = client.findRequest(stream_id) orelse return 0; 291 + if (req.on_data) |cb| cb(data[0..len], req.userdata) else req.response_body.appendSlice(client.allocator, data[0..len]) catch { 292 + req.has_error = true; 293 + }; req.bytes += len; 294 + if (session) |s| _ = nghttp2.nghttp2_session_consume(s, stream_id, len); 295 + 296 + return 0; 297 + } 298 + 299 + fn h2Header(_: ?*nghttp2.session, frame: *const nghttp2.frame, name: [*c]const u8, namelen: usize, value: [*c]const u8, valuelen: usize, _: u8, ud: ?*anyopaque) callconv(.c) c_int { 300 + const client: *Http2Client = @ptrCast(@alignCast(ud)); 301 + if (frame.hd.type != nghttp2.HEADERS) return 0; 302 + const req = client.findRequest(frame.hd.stream_id) orelse return 0; 303 + if (namelen == 7 and std.mem.eql(u8, name[0..7], ":status")) 304 + req.status_code = std.fmt.parseInt(u16, value[0..valuelen], 10) catch 0; 305 + if (std.mem.eql(u8, name[0..namelen], "content-encoding")) { 306 + if (std.mem.startsWith(u8, value[0..valuelen], "gzip")) { 307 + req.content_encoding = .gzip; 308 + } 309 + } 310 + return 0; 311 + } 312 + 313 + fn h2StreamClose(_: ?*nghttp2.session, stream_id: i32, error_code: u32, ud: ?*anyopaque) callconv(.c) c_int { 314 + const client: *Http2Client = @ptrCast(@alignCast(ud)); 315 + const req = client.findRequest(stream_id) orelse return 0; 316 + if (!req.done) { 317 + req.done = true; 318 + req.end_ns = @intCast(std.time.nanoTimestamp()); 319 + client.requests_done += 1; 320 + if (error_code != 0) { 321 + req.has_error = true; 322 + if (req.on_error) |cb| cb(FetchError.Http2Error, req.userdata); 323 + } else if (req.on_complete) |cb| cb(req.status_code, req.userdata); 324 + } 325 + return 0; 326 + } 327 + 328 + fn initH2(self: *Http2Client) !void { 329 + var callbacks: *nghttp2.session_callbacks = undefined; 330 + if (nghttp2.nghttp2_session_callbacks_new(&callbacks) != 0) return error.Http2Error; 331 + defer nghttp2.nghttp2_session_callbacks_del(callbacks); 332 + 333 + nghttp2.nghttp2_session_callbacks_set_send_callback2(callbacks, h2Send); 334 + nghttp2.nghttp2_session_callbacks_set_on_frame_recv_callback(callbacks, h2FrameRecv); 335 + nghttp2.nghttp2_session_callbacks_set_on_data_chunk_recv_callback(callbacks, h2DataChunk); 336 + nghttp2.nghttp2_session_callbacks_set_on_header_callback(callbacks, h2Header); 337 + nghttp2.nghttp2_session_callbacks_set_on_stream_close_callback(callbacks, h2StreamClose); 338 + 339 + var session: *nghttp2.session = undefined; 340 + if (nghttp2.nghttp2_session_client_new(&session, callbacks, self) != 0) return error.Http2Error; 341 + self.h2_session = session; 342 + 343 + var settings = [_]nghttp2.settings_entry{ 344 + .{ .settings_id = nghttp2.SETTINGS_MAX_CONCURRENT_STREAMS, .value = MAX_PENDING_REQUESTS }, 345 + .{ .settings_id = nghttp2.SETTINGS_INITIAL_WINDOW_SIZE, .value = 16 * 1024 * 1024 }, 346 + }; 347 + if (nghttp2.nghttp2_submit_settings(self.h2_session.?, nghttp2.FLAG_NONE, &settings, settings.len) != 0) return error.Http2Error; 348 + 349 + const conn_window_increase: i32 = (16 * 1024 * 1024) - 65535; 350 + _ = nghttp2.nghttp2_submit_window_update(self.h2_session.?, nghttp2.FLAG_NONE, 0, conn_window_increase); 351 + } 352 + 353 + fn flush(self: *Http2Client) !void { 354 + if (self.h2_session) |session| while (nghttp2.nghttp2_session_want_write(session) != 0) if (nghttp2.nghttp2_session_send(session) != 0) break; 355 + if (self.write_buf.items.len > 0) { 356 + const data = try self.allocator.dupe(u8, self.write_buf.items); 357 + self.write_buf.clearRetainingCapacity(); 358 + const wr = try self.allocator.create(uv.write_t); 359 + wr.data = data.ptr; 360 + var buf = uv.buf_t{ .base = data.ptr, .len = data.len }; 361 + if (tlsuv.tlsuv_stream_write(wr, &self.tls, &buf, onWrite) != 0) { 362 + self.allocator.free(data); 363 + self.allocator.destroy(wr); 364 + return error.ConnectionFailed; 365 + } 366 + } 367 + } 368 + 369 + fn onWrite(wr: *uv.write_t, _: c_int) callconv(.c) void { 370 + const data_ptr: [*]u8 = @ptrCast(wr.data); 371 + std.c.free(data_ptr); 372 + std.c.free(@ptrCast(wr)); 373 + } 374 + 375 + fn allocBuf(_: *uv.handle_t, size: usize, buf: *uv.buf_t) callconv(.c) void { 376 + buf.base = @ptrCast(std.c.malloc(size) orelse return); 377 + buf.len = size; 378 + } 379 + 380 + fn onRead(stream: *uv.stream_t, nread: isize, buf: *const uv.buf_t) callconv(.c) void { 381 + const tls: *tlsuv.stream_t = @ptrCast(@alignCast(stream)); 382 + const client: *Http2Client = @ptrCast(@alignCast(tls.data)); 383 + defer if (buf.base) |b| std.c.free(b); 384 + if (nread < 0) { 385 + for (client.requests[0..client.request_count]) |*req| if (!req.done) { 386 + req.done = true; 387 + req.has_error = true; 388 + client.requests_done += 1; 389 + if (req.on_error) |cb| cb(FetchError.ConnectionFailed, req.userdata); 390 + }; 391 + return; 392 + } 393 + if (nread > 0 and client.h2_session != null) { 394 + _ = nghttp2.nghttp2_session_mem_recv(client.h2_session.?, @ptrCast(buf.base), @intCast(nread)); 395 + client.flush() catch {}; 396 + } 397 + } 398 + 399 + fn onConnect(req: *uv.connect_t, status: c_int) callconv(.c) void { 400 + const ctx: *ConnectCtx = @ptrCast(@alignCast(req.data)); 401 + defer ctx.client.allocator.destroy(ctx); 402 + if (status < 0) { 403 + ctx.client.connected = -1; 404 + return; 405 + } 406 + ctx.client.connected = 1; 407 + ctx.client.tls.data = ctx.client; 408 + ctx.client.initH2() catch { 409 + ctx.client.connected = -1; 410 + return; 411 + }; 412 + _ = tlsuv.tlsuv_stream_read_start(&ctx.client.tls, allocBuf, onRead); 413 + ctx.client.flush() catch {}; 414 + } 415 + 416 + const ConnectCtx = struct { client: *Http2Client, req: uv.connect_t }; 417 + 418 + fn ensureConnected(self: *Http2Client) !void { 419 + if (self.connected > 0) return; 420 + if (self.connected < 0) return error.ConnectionFailed; 421 + 422 + var conn_start: u64 = @intCast(std.time.nanoTimestamp()); 423 + 424 + if (!self.connect_pending) { 425 + const ctx = try self.allocator.create(ConnectCtx); 426 + ctx.* = .{ .client = self, .req = .{} }; 427 + ctx.req.data = ctx; 428 + if (tlsuv.tlsuv_stream_connect(&ctx.req, &self.tls, self.host.ptr, if (self.use_tls) 443 else 80, onConnect) != 0) { 429 + self.allocator.destroy(ctx); 430 + return error.ConnectionFailed; 431 + } 432 + self.connect_pending = true; 433 + } 434 + 435 + var loop_count: u32 = 0; 436 + while (self.connected == 0) { 437 + _ = uv.uv_run(self.loop, uv.RUN_ONCE); 438 + loop_count += 1; 439 + } 440 + conn_start = debug.timer(" h2: tls connect", conn_start); 441 + debug.log(" h2: connect loop iterations={d}", .{loop_count}); 442 + if (self.connected < 0) return error.ConnectionFailed; 443 + } 444 + 445 + pub fn initiateConnectAsync(self: *Http2Client) !void { 446 + if (self.connected > 0) return; 447 + if (self.connected < 0) return error.ConnectionFailed; 448 + if (self.connect_pending) return; 449 + 450 + const ctx = try self.allocator.create(ConnectCtx); 451 + ctx.* = .{ .client = self, .req = .{} }; 452 + ctx.req.data = ctx; 453 + if (tlsuv.tlsuv_stream_connect(&ctx.req, &self.tls, self.host.ptr, if (self.use_tls) 443 else 80, onConnect) != 0) { 454 + self.allocator.destroy(ctx); 455 + return error.ConnectionFailed; 456 + } 457 + self.connect_pending = true; 458 + } 459 + 460 + fn makeNv(name: [:0]const u8, value: [:0]const u8) nghttp2.nv { 461 + return .{ 462 + .name = @constCast(name.ptr), 463 + .value = @constCast(value.ptr), 464 + .namelen = name.len, .valuelen = value.len, .flags = nghttp2.NV_FLAG_NONE 465 + }; 466 + } 467 + 468 + pub fn get(self: *Http2Client, path: []const u8, allocator: std.mem.Allocator) ![]u8 { 469 + return self.getWithAccept(path, "application/json", allocator); 470 + } 471 + 472 + pub fn getWithAccept(self: *Http2Client, path: []const u8, accept: [:0]const u8, allocator: std.mem.Allocator) ![]u8 { 473 + try self.ensureConnected(); 474 + if (self.request_count >= MAX_PENDING_REQUESTS) self.resetRequests(); 475 + const req = &self.requests[self.request_count]; self.request_count += 1; 476 + 477 + req.* = .{ 478 + .stream_id = 0, 479 + .path = try self.allocator.dupeZ(u8, path), 480 + .on_data = null, 481 + .on_complete = null, 482 + .on_error = null, 483 + .userdata = null, 484 + .response_body = .{}, 485 + .status_code = 0, 486 + .done = false, 487 + .has_error = false, 488 + .start_ns = @intCast(std.time.nanoTimestamp()), 489 + .end_ns = 0, 490 + .bytes = 0, 491 + .content_encoding = .identity, 492 + }; 493 + 494 + const session = self.h2_session orelse return error.Http2Error; 495 + 496 + var hdrs = [_]nghttp2.nv{ 497 + makeNv(":method", "GET"), 498 + makeNv(":path", req.path.?), 499 + makeNv(":scheme", "https"), 500 + makeNv(":authority", self.host), 501 + makeNv("accept", accept), 502 + makeNv("user-agent", user_agent) 503 + }; 504 + 505 + const sid = nghttp2.nghttp2_submit_request(session, null, &hdrs, hdrs.len, null, req); 506 + if (sid < 0) { 507 + self.request_count -= 1; 508 + if (req.path) |p| self.allocator.free(p); 509 + return error.Http2Error; 510 + } 511 + 512 + req.stream_id = sid; 513 + try self.flush(); 514 + while (!req.done) { 515 + _ = uv.uv_run(self.loop, uv.RUN_ONCE); 516 + try self.flush(); 517 + } 518 + 519 + if (req.has_error or req.status_code != 200) return error.ResponseError; 520 + return try allocator.dupe(u8, req.response_body.items); 521 + } 522 + 523 + pub fn getStream(self: *Http2Client, path: []const u8, on_data: *const fn ([]const u8, ?*anyopaque) void, on_complete: *const fn (u16, ?*anyopaque) void, on_error: *const fn (FetchError, ?*anyopaque) void, userdata: ?*anyopaque) !void { 524 + try self.ensureConnected(); 525 + const req = self.findOrAllocSlot() orelse return error.OutOfMemory; 526 + 527 + req.* = .{ 528 + .stream_id = 0, 529 + .path = try self.allocator.dupeZ(u8, path), 530 + .on_data = on_data, 531 + .on_complete = on_complete, 532 + .on_error = on_error, 533 + .userdata = userdata, 534 + .response_body = .{}, 535 + .status_code = 0, 536 + .done = false, 537 + .has_error = false, 538 + .start_ns = @intCast(std.time.nanoTimestamp()), 539 + .end_ns = 0, 540 + .bytes = 0, 541 + .content_encoding = .identity 542 + }; 543 + 544 + const session = self.h2_session orelse return error.Http2Error; 545 + 546 + var hdrs = [_]nghttp2.nv{ 547 + makeNv(":method", "GET"), 548 + makeNv(":path", req.path.?), 549 + makeNv(":scheme", "https"), 550 + makeNv(":authority", self.host), 551 + makeNv("accept", "*/*"), 552 + makeNv("user-agent", user_agent) 553 + }; 554 + 555 + const sid = nghttp2.nghttp2_submit_request(session, null, &hdrs, hdrs.len, null, req); 556 + if (sid < 0) { 557 + if (req.path) |p| self.allocator.free(p); 558 + req.stream_id = -1; 559 + return error.Http2Error; 560 + } 561 + 562 + req.stream_id = sid; 563 + try self.flush(); 564 + } 565 + 566 + pub fn run(self: *Http2Client) !void { 567 + const run_start: u64 = @intCast(std.time.nanoTimestamp()); 568 + var loop_count: u32 = 0; 569 + var last_done: usize = 0; 570 + var last_report: u64 = run_start; 571 + 572 + while (self.requests_done < self.request_count) { 573 + if (uv.uv_run(self.loop, uv.RUN_ONCE) == 0) break; 574 + try self.flush(); 575 + loop_count += 1; 576 + 577 + const now: u64 = @intCast(std.time.nanoTimestamp()); 578 + if (now - last_report > 1_000_000_000) { 579 + const done_delta = self.requests_done - last_done; 580 + debug.log(" h2: progress {d}/{d} (+{d} in last 1s) loops={d}", .{ 581 + self.requests_done, self.request_count, 582 + done_delta, loop_count, 583 + }); 584 + last_done = self.requests_done; 585 + last_report = now; 586 + } 587 + } 588 + 589 + const elapsed_ns: u64 = @intCast(@as(i128, @intCast(std.time.nanoTimestamp())) - @as(i128, run_start)); 590 + const elapsed_ms = elapsed_ns / 1_000_000; 591 + debug.log(" h2: run complete in {d}ms, {d} loops, {d}/{d} done", .{ 592 + elapsed_ms, loop_count, 593 + self.requests_done, self.request_count, 594 + }); 595 + 596 + var error_count: usize = 0; 597 + for (self.requests[0..self.request_count]) |req| { 598 + if (req.has_error) error_count += 1; 599 + } 600 + if (error_count > 0) { 601 + debug.log(" h2: {d} requests had errors", .{error_count}); 602 + return error.ResponseError; 603 + } 604 + } 605 + }; 606 + 607 + pub const TarballCtx = struct { 608 + handler: StreamHandler, 609 + done: bool, 610 + has_error: bool, 611 + url: []const u8, 612 + start_ns: u64, 613 + bytes: usize, 614 + }; 615 + 616 + const TarballStats = struct { 617 + url: []const u8, 618 + bytes: usize, 619 + elapsed_ms: u64, 620 + }; 621 + 622 + const TarballCallbacks = struct { 623 + fn onData(data: []const u8, ud: ?*anyopaque) void { 624 + const ctx: *TarballCtx = @ptrCast(@alignCast(ud)); 625 + ctx.bytes += data.len; 626 + ctx.handler.on_data(data, ctx.handler.user_data); 627 + } 628 + 629 + fn onComplete(status: u16, ud: ?*anyopaque) void { 630 + const ctx: *TarballCtx = @ptrCast(@alignCast(ud)); 631 + ctx.handler.on_complete(status, ctx.handler.user_data); 632 + ctx.done = true; 633 + } 634 + 635 + fn onError(err: FetchError, ud: ?*anyopaque) void { 636 + const ctx: *TarballCtx = @ptrCast(@alignCast(ud)); 637 + ctx.handler.on_error(err, ctx.handler.user_data); 638 + ctx.done = true; 639 + ctx.has_error = true; 640 + } 641 + }; 642 + 643 + pub const Fetcher = struct { 644 + allocator: std.mem.Allocator, 645 + registry_host: []const u8, 646 + meta_clients: [NUM_META_CONNECTIONS]?*Http2Client, 647 + meta_clients_initialized: bool, 648 + pending: std.ArrayListUnmanaged(PendingRequest), 649 + tarball_clients: [NUM_CONNECTIONS]?*Http2Client, 650 + tarball_clients_initialized: bool, 651 + tarball_contexts: std.ArrayListUnmanaged(*TarballCtx), 652 + tarball_round_robin: usize, 653 + tarball_stats: std.ArrayListUnmanaged(TarballStats), 654 + 655 + pub fn init(allocator: std.mem.Allocator, registry_host: []const u8) !*Fetcher { 656 + const f = try allocator.create(Fetcher); 657 + f.* = .{ 658 + .allocator = allocator, 659 + .registry_host = try allocator.dupe(u8, registry_host), 660 + .meta_clients = [_]?*Http2Client{null} ** NUM_META_CONNECTIONS, 661 + .meta_clients_initialized = false, 662 + .pending = .{}, 663 + .tarball_clients = [_]?*Http2Client{null} ** NUM_CONNECTIONS, 664 + .tarball_clients_initialized = false, 665 + .tarball_contexts = .{}, 666 + .tarball_round_robin = 0, 667 + .tarball_stats = .{}, 668 + }; 669 + return f; 670 + } 671 + 672 + pub fn deinit(self: *Fetcher) void { 673 + for (&self.meta_clients) |*maybe_client| { 674 + if (maybe_client.*) |c| { c.deinit(); maybe_client.* = null; } 675 + } 676 + for (self.pending.items) |req| self.allocator.free(req.url); 677 + self.pending.deinit(self.allocator); 678 + for (&self.tarball_clients) |*maybe_client| { 679 + if (maybe_client.*) |c| { c.deinit(); maybe_client.* = null; } 680 + } 681 + for (self.tarball_contexts.items) |ctx| { 682 + self.allocator.free(ctx.url); 683 + self.allocator.destroy(ctx); 684 + } 685 + self.tarball_contexts.deinit(self.allocator); 686 + for (self.tarball_stats.items) |stat| self.allocator.free(stat.url); 687 + self.tarball_stats.deinit(self.allocator); 688 + self.allocator.free(self.registry_host); 689 + self.allocator.destroy(self); 690 + } 691 + 692 + fn ensureMetaClients(self: *Fetcher) !void { 693 + if (self.meta_clients_initialized) return; 694 + 695 + for (&self.meta_clients, 0..) |*slot, i| { 696 + const client = Http2Client.init(self.allocator, self.registry_host, true) catch |err| { 697 + debug.log("fetcher: failed to init meta connection {d}: {}", .{ i, err }); 698 + continue; 699 + }; 700 + client.ensureConnected() catch |err| { 701 + debug.log("fetcher: failed to connect meta {d}: {}", .{ i, err }); 702 + client.deinit(); 703 + continue; 704 + }; 705 + slot.* = client; 706 + } 707 + 708 + var any_connected = false; 709 + for (self.meta_clients) |slot| { 710 + if (slot != null) { any_connected = true; break; } 711 + } 712 + 713 + if (!any_connected) return error.ConnectionFailed; 714 + self.meta_clients_initialized = true; 715 + } 716 + 717 + pub fn resetMetaClients(self: *Fetcher) void { 718 + for (&self.meta_clients) |*slot| { 719 + if (slot.*) |client| { client.deinit(); slot.* = null; } 720 + } 721 + self.meta_clients_initialized = false; 722 + } 723 + 724 + fn ensureTarballClients(self: *Fetcher) !void { 725 + if (self.tarball_clients_initialized) return; 726 + 727 + debug.log("fetcher: initializing {d} persistent connections", .{NUM_CONNECTIONS}); 728 + const init_start: u64 = @intCast(std.time.nanoTimestamp()); 729 + 730 + for (&self.tarball_clients, 0..) |*slot, i| { 731 + const client = Http2Client.init(self.allocator, self.registry_host, true) catch |err| { 732 + debug.log("fetcher: failed to init connection {d}: {}", .{ i, err }); 733 + continue; 734 + }; 735 + client.ensureConnected() catch |err| { 736 + debug.log("fetcher: failed to connect {d}: {}", .{ i, err }); 737 + client.deinit(); 738 + continue; 739 + }; 740 + slot.* = client; 741 + } 742 + 743 + var any_connected = false; 744 + for (self.tarball_clients) |slot| { 745 + if (slot != null) { any_connected = true; break; } 746 + } 747 + 748 + if (!any_connected) return error.ConnectionFailed; 749 + self.tarball_clients_initialized = true; 750 + 751 + _ = debug.timer("fetcher: connection pool init", init_start); 752 + } 753 + 754 + fn findAvailableClient(self: *Fetcher) ?struct { client: *Http2Client, idx: usize } { 755 + var attempts: usize = 0; 756 + while (attempts < NUM_CONNECTIONS) : (attempts += 1) { 757 + const idx = (self.tarball_round_robin + attempts) % NUM_CONNECTIONS; 758 + if (self.tarball_clients[idx]) |client| { if (client.hasCapacity()) return .{ .client = client, .idx = idx }; } 759 + } 760 + return null; 761 + } 762 + 763 + pub fn initiateTarballConnectionsAsync(self: *Fetcher) void { 764 + if (self.tarball_clients_initialized) return; 765 + debug.log("fetcher: initiating {d} tarball connections (async)", .{NUM_CONNECTIONS}); 766 + 767 + for (&self.tarball_clients, 0..) |*slot, i| { 768 + const client = Http2Client.init(self.allocator, self.registry_host, true) catch { 769 + continue; 770 + }; 771 + client.initiateConnectAsync() catch { 772 + client.deinit(); continue; 773 + }; 774 + slot.* = client; _ = i; 775 + } 776 + 777 + var any_connected = false; 778 + for (self.tarball_clients) |slot| { 779 + if (slot != null) { any_connected = true; break; } 780 + } 781 + 782 + if (any_connected) self.tarball_clients_initialized = true; 783 + } 784 + 785 + pub fn queueTarballAsync(self: *Fetcher, url: []const u8, handler: StreamHandler) !void { 786 + try self.ensureTarballClients(); 787 + const parsed = try ParsedUrl.parse(url); 788 + 789 + const available = self.findAvailableClient() orelse { 790 + try self.pending.append(self.allocator, .{ 791 + .url = try self.allocator.dupe(u8, url), 792 + .handler = handler, 793 + }); return; 794 + }; 795 + 796 + const ctx = try self.allocator.create(TarballCtx); 797 + ctx.* = .{ 798 + .handler = handler, 799 + .done = false, 800 + .has_error = false, 801 + .url = try self.allocator.dupe(u8, url), 802 + .start_ns = @intCast(std.time.nanoTimestamp()), 803 + .bytes = 0, 804 + }; 805 + 806 + try self.tarball_contexts.append( 807 + self.allocator, 808 + ctx 809 + ); 810 + 811 + try available.client.getStream( 812 + parsed.path, 813 + TarballCallbacks.onData, 814 + TarballCallbacks.onComplete, 815 + TarballCallbacks.onError, 816 + ctx, 817 + ); 818 + 819 + self.tarball_round_robin = (available.idx + 1) % NUM_CONNECTIONS; 820 + } 821 + 822 + pub fn tick(self: *Fetcher) usize { 823 + self.ensureTarballClients() catch return 0; 824 + const loop = uv.uv_default_loop(); 825 + 826 + for (&self.tarball_clients) |maybe_client| { 827 + if (maybe_client) |c| c.flush() catch {}; 828 + } 829 + _ = uv.uv_run(loop, uv.RUN_NOWAIT); 830 + 831 + for (&self.tarball_clients) |maybe_client| { 832 + if (maybe_client) |c| c.recycleCompletedRequests(); 833 + } 834 + 835 + const completed = self.cleanupCompletedContexts(); 836 + self.dispatchPending(); 837 + 838 + return completed; 839 + } 840 + 841 + fn cleanupCompletedContexts(self: *Fetcher) usize { 842 + var completed: usize = 0; var i: usize = 0; 843 + while (i < self.tarball_contexts.items.len) { 844 + const ctx = self.tarball_contexts.items[i]; 845 + if (ctx.done) { 846 + completed += 1; 847 + self.allocator.free(ctx.url); 848 + self.allocator.destroy(ctx); 849 + _ = self.tarball_contexts.swapRemove(i); 850 + } else i += 1; 851 + } 852 + return completed; 853 + } 854 + 855 + fn dispatchPending(self: *Fetcher) void { 856 + while (self.pending.items.len > 0) { 857 + const available = self.findAvailableClient() orelse break; 858 + const req = self.pending.pop() orelse break; 859 + 860 + const handler = req.handler orelse { 861 + self.allocator.free(req.url); continue; 862 + }; 863 + 864 + self.dispatchRequest(available.client, req.url, handler) catch |err| { 865 + handler.on_error(errToFetchError(err), handler.user_data); 866 + self.allocator.free(req.url); continue; 867 + }; 868 + } 869 + } 870 + 871 + fn dispatchRequest(self: *Fetcher, client: *Http2Client, url: []const u8, handler: StreamHandler) !void { 872 + const parsed = try ParsedUrl.parse(url); 873 + const ctx = try self.allocator.create(TarballCtx); 874 + 875 + ctx.* = .{ 876 + .handler = handler, 877 + .done = false, 878 + .has_error = false, 879 + .url = url, 880 + .start_ns = @intCast(std.time.nanoTimestamp()), 881 + .bytes = 0, 882 + }; 883 + 884 + errdefer self.allocator.destroy(ctx); 885 + try self.tarball_contexts.append(self.allocator, ctx); 886 + 887 + try client.getStream( 888 + parsed.path, 889 + TarballCallbacks.onData, 890 + TarballCallbacks.onComplete, 891 + TarballCallbacks.onError, 892 + ctx, 893 + ); 894 + } 895 + 896 + fn errToFetchError(err: anyerror) FetchError { 897 + return switch (err) { 898 + error.InvalidUrl => FetchError.InvalidUrl, 899 + error.OutOfMemory => FetchError.OutOfMemory, 900 + else => FetchError.Http2Error, 901 + }; 902 + } 903 + 904 + pub fn pendingTarballCount(self: *Fetcher) usize { 905 + return self.tarball_contexts.items.len; 906 + } 907 + 908 + pub fn finishTarballs(self: *Fetcher) void { 909 + const loop = uv.uv_default_loop(); 910 + var last_report: u64 = @intCast(std.time.nanoTimestamp()); 911 + var loops: usize = 0; 912 + var completed: usize = 0; 913 + const start = last_report; 914 + 915 + while (self.tarball_contexts.items.len > 0 or self.pending.items.len > 0) { 916 + for (&self.tarball_clients) |maybe_client| { 917 + if (maybe_client) |c| c.flush() catch {}; 918 + } 919 + 920 + if (uv.uv_run(loop, uv.RUN_ONCE) == 0 and self.pending.items.len == 0 and self.tarball_contexts.items.len == 0) break; 921 + loops += 1; 922 + 923 + for (&self.tarball_clients) |maybe_client| { 924 + if (maybe_client) |c| c.recycleCompletedRequests(); 925 + } 926 + 927 + var i: usize = 0; 928 + while (i < self.tarball_contexts.items.len) { 929 + const ctx = self.tarball_contexts.items[i]; 930 + if (ctx.done) { 931 + if (!ctx.has_error) { 932 + const elapsed_ms: u64 = @intCast((@as(u64, @intCast(std.time.nanoTimestamp())) - ctx.start_ns) / 1_000_000); 933 + const url_copy = self.allocator.dupe(u8, ctx.url) catch null; 934 + if (url_copy) |url| { 935 + self.tarball_stats.append(self.allocator, .{ .url = url, .bytes = ctx.bytes, .elapsed_ms = elapsed_ms }) catch {}; 936 + } 937 + } 938 + self.allocator.free(ctx.url); 939 + self.allocator.destroy(ctx); 940 + _ = self.tarball_contexts.swapRemove(i); 941 + completed += 1; 942 + } else { 943 + i += 1; 944 + } 945 + } 946 + 947 + while (self.pending.items.len > 0) { 948 + var queued = false; 949 + for (&self.tarball_clients, 0..) |maybe_client, conn_idx| { 950 + if (maybe_client) |client| { 951 + if (client.hasCapacity()) { 952 + const maybe_req = self.pending.pop(); 953 + const req = maybe_req orelse break; 954 + if (req.handler) |handler| { 955 + const parsed = ParsedUrl.parse(req.url) catch { 956 + handler.on_error(FetchError.InvalidUrl, handler.user_data); 957 + self.allocator.free(req.url); 958 + continue; 959 + }; 960 + 961 + const ctx = self.allocator.create(TarballCtx) catch { 962 + handler.on_error(FetchError.OutOfMemory, handler.user_data); 963 + self.allocator.free(req.url); 964 + continue; 965 + }; 966 + ctx.* = .{ 967 + .handler = handler, 968 + .done = false, 969 + .has_error = false, 970 + .url = req.url, 971 + .start_ns = @intCast(std.time.nanoTimestamp()), 972 + .bytes = 0, 973 + }; 974 + self.tarball_contexts.append(self.allocator, ctx) catch { 975 + self.allocator.destroy(ctx); 976 + self.allocator.free(req.url); 977 + continue; 978 + }; 979 + 980 + client.getStream( 981 + parsed.path, 982 + struct { 983 + fn onData(data: []const u8, ud: ?*anyopaque) void { 984 + const c: *TarballCtx = @ptrCast(@alignCast(ud)); 985 + c.bytes += data.len; 986 + c.handler.on_data(data, c.handler.user_data); 987 + } 988 + }.onData, 989 + struct { 990 + fn onComplete(status: u16, ud: ?*anyopaque) void { 991 + const c: *TarballCtx = @ptrCast(@alignCast(ud)); 992 + c.handler.on_complete(status, c.handler.user_data); 993 + if (debug.enabled) { 994 + const elapsed_ms: u64 = @intCast((@as(u64, @intCast(std.time.nanoTimestamp())) - c.start_ns) / 1_000_000); 995 + debug.log(" tarball: done {s} {d}ms {d} bytes status={d}", .{ c.url, elapsed_ms, c.bytes, status }); 996 + } 997 + c.done = true; 998 + } 999 + }.onComplete, 1000 + struct { 1001 + fn onError(err: FetchError, ud: ?*anyopaque) void { 1002 + const c: *TarballCtx = @ptrCast(@alignCast(ud)); 1003 + c.handler.on_error(err, c.handler.user_data); 1004 + if (debug.enabled) { 1005 + const elapsed_ms: u64 = @intCast((@as(u64, @intCast(std.time.nanoTimestamp())) - c.start_ns) / 1_000_000); 1006 + debug.log(" tarball: error {s} {d}ms {d} bytes", .{ c.url, elapsed_ms, c.bytes }); 1007 + } 1008 + c.done = true; 1009 + c.has_error = true; 1010 + } 1011 + }.onError, 1012 + ctx, 1013 + ) catch { 1014 + handler.on_error(FetchError.Http2Error, handler.user_data); 1015 + ctx.done = true; 1016 + }; 1017 + queued = true; 1018 + _ = conn_idx; 1019 + } else { 1020 + self.allocator.free(req.url); 1021 + } 1022 + break; 1023 + } 1024 + } 1025 + } 1026 + if (!queued) break; 1027 + } 1028 + 1029 + const now: u64 = @intCast(std.time.nanoTimestamp()); 1030 + if (now - last_report > 1_000_000_000) { 1031 + var total_bytes: usize = 0; 1032 + for (self.tarball_contexts.items) |ctx| { 1033 + total_bytes += ctx.bytes; 1034 + } 1035 + debug.log(" h2: {d} in-flight, {d} pending, {d} completed, {d} loops", .{ 1036 + self.tarball_contexts.items.len, 1037 + self.pending.items.len, 1038 + completed, 1039 + loops, 1040 + }); 1041 + debug.log(" h2: tarball progress in-flight bytes={d}", .{ total_bytes }); 1042 + last_report = now; 1043 + } 1044 + } 1045 + 1046 + const elapsed_ns: u64 = @intCast(@as(i128, @intCast(std.time.nanoTimestamp())) - @as(i128, start)); 1047 + debug.log("fetcher: finishTarballs completed in {d}ms, {d} loops, {d} completed", .{ 1048 + elapsed_ns / 1_000_000, 1049 + loops, 1050 + completed, 1051 + }); 1052 + if (debug.enabled and self.tarball_stats.items.len > 0) { 1053 + var top_time: [5]?TarballStats = .{null} ** 5; 1054 + var top_size: [5]?TarballStats = .{null} ** 5; 1055 + 1056 + for (self.tarball_stats.items) |stat| { 1057 + var idx_time: usize = top_time.len; 1058 + for (top_time, 0..) |slot, i| { 1059 + if (slot == null or stat.elapsed_ms > slot.?.elapsed_ms) { 1060 + idx_time = i; 1061 + break; 1062 + } 1063 + } 1064 + if (idx_time < top_time.len) { 1065 + var carry = stat; 1066 + var j = idx_time; 1067 + while (j < top_time.len) : (j += 1) { 1068 + const next = top_time[j]; 1069 + top_time[j] = carry; 1070 + if (next) |n| { 1071 + carry = n; 1072 + } else { 1073 + break; 1074 + } 1075 + } 1076 + } 1077 + 1078 + var idx_size: usize = top_size.len; 1079 + for (top_size, 0..) |slot, i| { 1080 + if (slot == null or stat.bytes > slot.?.bytes) { 1081 + idx_size = i; 1082 + break; 1083 + } 1084 + } 1085 + if (idx_size < top_size.len) { 1086 + var carry_size = stat; 1087 + var k = idx_size; 1088 + while (k < top_size.len) : (k += 1) { 1089 + const next_size = top_size[k]; 1090 + top_size[k] = carry_size; 1091 + if (next_size) |n| { 1092 + carry_size = n; 1093 + } else { 1094 + break; 1095 + } 1096 + } 1097 + } 1098 + } 1099 + 1100 + debug.log("fetcher: top tarballs by time", .{}); 1101 + for (top_time, 0..) |maybe_stat, i| { 1102 + if (maybe_stat) |stat| { 1103 + debug.log(" {d}. {s} {d}ms {d} bytes", .{ i + 1, stat.url, stat.elapsed_ms, stat.bytes }); 1104 + } 1105 + } 1106 + debug.log("fetcher: top tarballs by size", .{}); 1107 + for (top_size, 0..) |maybe_stat, i| { 1108 + if (maybe_stat) |stat| { 1109 + debug.log(" {d}. {s} {d} bytes {d}ms", .{ i + 1, stat.url, stat.bytes, stat.elapsed_ms }); 1110 + } 1111 + } 1112 + } 1113 + } 1114 + 1115 + pub fn fetchMetadata(self: *Fetcher, package_name: []const u8, allocator: std.mem.Allocator) ![]u8 { 1116 + return self.fetchMetadataFull(package_name, false, allocator); 1117 + } 1118 + 1119 + pub fn fetchMetadataFull(self: *Fetcher, package_name: []const u8, full: bool, allocator: std.mem.Allocator) ![]u8 { 1120 + try self.ensureMetaClients(); 1121 + for (self.meta_clients) |maybe_client| { 1122 + if (maybe_client) |client| { 1123 + var path_buf: [512]u8 = undefined; 1124 + const path_slice = std.fmt.bufPrint(&path_buf, "/{s}", .{package_name}) catch return error.OutOfMemory; 1125 + const accept: [:0]const u8 = if (full) "application/json" else "application/vnd.npm.install-v1+json"; 1126 + return client.getWithAccept(path_slice, accept, allocator); 1127 + } 1128 + } 1129 + return error.ConnectionFailed; 1130 + } 1131 + 1132 + pub const MetadataResult = struct { 1133 + name: []const u8, 1134 + data: ?[]u8, 1135 + compressed: bool, 1136 + has_error: bool, 1137 + }; 1138 + 1139 + pub fn fetchMetadataBatch(self: *Fetcher, names: []const []const u8, allocator: std.mem.Allocator) ![]MetadataResult { 1140 + if (names.len == 0) return &[_]MetadataResult{}; 1141 + 1142 + var total_start: u64 = @intCast(std.time.nanoTimestamp()); 1143 + try self.ensureMetaClients(); 1144 + total_start = debug.timer(" meta: get clients", total_start); 1145 + 1146 + var active_connections: usize = 0; 1147 + for (self.meta_clients) |maybe_client| { 1148 + if (maybe_client != null) active_connections += 1; 1149 + } if (active_connections == 0) return error.ConnectionFailed; 1150 + 1151 + debug.log(" meta: batch {d} packages across {d} connections", .{ names.len, active_connections }); 1152 + 1153 + var results = try allocator.alloc(MetadataResult, names.len); 1154 + for (results, 0..) |*r, i| { 1155 + r.* = .{ .name = names[i], .data = null, .compressed = false, .has_error = false }; 1156 + } 1157 + 1158 + const total_capacity = active_connections * (MAX_PENDING_REQUESTS - 1); 1159 + var offset: usize = 0; 1160 + var batch_num: usize = 0; 1161 + 1162 + var decompress_buf = std.ArrayListUnmanaged(u8){}; 1163 + defer decompress_buf.deinit(c_allocator); 1164 + 1165 + while (offset < names.len) { 1166 + const end = @min(offset + total_capacity, names.len); 1167 + var batch_start: u64 = @intCast(std.time.nanoTimestamp()); 1168 + debug.log(" meta: batch {d} ({d}-{d})", .{ batch_num, offset, end }); 1169 + 1170 + var queued: usize = 0; 1171 + var conn_idx: usize = 0; 1172 + for (offset..end) |i| { 1173 + const result = &results[i]; 1174 + const name = names[i]; 1175 + 1176 + var client: ?*Http2Client = null; 1177 + var attempts: usize = 0; 1178 + while (attempts < NUM_META_CONNECTIONS) : (attempts += 1) { 1179 + if (self.meta_clients[conn_idx]) |c| { 1180 + if (c.h2_session != null and c.connected == 1 and c.request_count < MAX_PENDING_REQUESTS - 1) { 1181 + client = c; break; 1182 + } 1183 + } 1184 + conn_idx = (conn_idx + 1) % NUM_META_CONNECTIONS; 1185 + } 1186 + 1187 + if (client == null) { 1188 + result.has_error = true; continue; 1189 + } 1190 + 1191 + const c = client.?; 1192 + const session = c.h2_session orelse { 1193 + result.has_error = true; continue; 1194 + }; 1195 + 1196 + var path_buf: [512]u8 = undefined; 1197 + const path = std.fmt.bufPrint(&path_buf, "/{s}", .{name}) catch { 1198 + result.has_error = true; 1199 + continue; 1200 + }; 1201 + 1202 + var hdrs = [_]nghttp2.nv{ 1203 + Http2Client.makeNv(":method", "GET"), 1204 + Http2Client.makeNv(":path", c.allocator.dupeZ(u8, path) catch { 1205 + result.has_error = true; continue; 1206 + }), 1207 + Http2Client.makeNv(":scheme", "https"), 1208 + Http2Client.makeNv(":authority", c.host), 1209 + Http2Client.makeNv("accept", "application/vnd.npm.install-v1+json"), 1210 + Http2Client.makeNv("accept-encoding", "gzip"), 1211 + Http2Client.makeNv("user-agent", user_agent), 1212 + }; 1213 + 1214 + const req = &c.requests[c.request_count]; 1215 + c.request_count += 1; 1216 + req.* = .{ 1217 + .stream_id = 0, 1218 + .path = hdrs[1].value[0..hdrs[1].valuelen :0], 1219 + .on_data = null, 1220 + .on_complete = null, 1221 + .on_error = null, 1222 + .userdata = result, 1223 + .response_body = .{}, 1224 + .status_code = 0, 1225 + .done = false, 1226 + .has_error = false, 1227 + .start_ns = @intCast(std.time.nanoTimestamp()), 1228 + .end_ns = 0, 1229 + .bytes = 0, 1230 + .content_encoding = .identity, 1231 + }; 1232 + 1233 + const sid = nghttp2.nghttp2_submit_request(session, null, &hdrs, hdrs.len, null, req); 1234 + if (sid < 0) { 1235 + c.request_count -= 1; 1236 + if (req.path) |p| c.allocator.free(p); 1237 + result.has_error = true; 1238 + continue; 1239 + } 1240 + req.stream_id = sid; 1241 + queued += 1; 1242 + conn_idx = (conn_idx + 1) % NUM_META_CONNECTIONS; 1243 + } 1244 + batch_start = debug.timer(" meta: queue requests", batch_start); 1245 + 1246 + for (self.meta_clients) |maybe_client| { 1247 + if (maybe_client) |c| c.flush() catch {}; 1248 + } 1249 + 1250 + const loop = uv.uv_default_loop(); 1251 + var all_done = false; 1252 + var loops: usize = 0; 1253 + const run_start: u64 = @intCast(std.time.nanoTimestamp()); 1254 + 1255 + while (!all_done) { 1256 + _ = uv.uv_run(loop, uv.RUN_ONCE); 1257 + loops += 1; 1258 + 1259 + all_done = true; 1260 + for (self.meta_clients) |maybe_client| { 1261 + if (maybe_client) |c| { 1262 + for (c.requests[0..c.request_count]) |*req| { 1263 + if (!req.done and !req.has_error) { all_done = false; break; } 1264 + } if (!all_done) break; 1265 + } 1266 + } 1267 + } 1268 + 1269 + const elapsed_ns: u64 = @intCast(@as(i128, @intCast(std.time.nanoTimestamp())) - @as(i128, run_start)); 1270 + debug.log(" h2: run complete in {d}ms, {d} loops", .{ elapsed_ns / 1_000_000, loops }); 1271 + batch_start = debug.timer(" meta: run h2 loop", batch_start); 1272 + 1273 + var slow_count: usize = 0; 1274 + var max_req_ms: u64 = 0; 1275 + var max_req_name: []const u8 = ""; 1276 + var total_bytes: usize = 0; 1277 + for (self.meta_clients) |maybe_client| { 1278 + if (maybe_client) |c| { 1279 + for (c.requests[0..c.request_count]) |*req| { 1280 + const result: *MetadataResult = @ptrCast(@alignCast(req.userdata)); 1281 + const end_ns = if (req.end_ns != 0) req.end_ns else @as(u64, @intCast(std.time.nanoTimestamp())); 1282 + const duration_ms: u64 = @intCast((end_ns - req.start_ns) / 1_000_000); 1283 + total_bytes += req.response_body.items.len; 1284 + if (duration_ms > max_req_ms) { 1285 + max_req_ms = duration_ms; 1286 + max_req_name = result.name; 1287 + } 1288 + if (duration_ms >= META_SLOW_LOG_MS) { 1289 + slow_count += 1; 1290 + debug.log(" meta: slow {s} {d}ms {d} bytes status={d}", .{ 1291 + result.name, 1292 + duration_ms, 1293 + req.response_body.items.len, 1294 + req.status_code, 1295 + }); 1296 + } 1297 + } 1298 + } 1299 + } 1300 + debug.log(" meta: summary slow={d} max={s} {d}ms total_bytes={d}", .{ slow_count, max_req_name, max_req_ms, total_bytes }); 1301 + 1302 + var success: usize = 0; 1303 + for (self.meta_clients) |maybe_client| { 1304 + if (maybe_client) |c| { 1305 + for (c.requests[0..c.request_count]) |*req| { 1306 + const result: *MetadataResult = @ptrCast(@alignCast(req.userdata)); 1307 + if (req.has_error or req.status_code != 200) { 1308 + result.has_error = true; 1309 + } else { 1310 + if (req.content_encoding == .gzip) { 1311 + decompress_buf.clearRetainingCapacity(); 1312 + const decomp = extractor.GzipDecompressor.init(allocator) catch { 1313 + result.has_error = true; 1314 + continue; 1315 + }; 1316 + defer decomp.deinit(); 1317 + _ = decomp.decompress(req.response_body.items, struct { 1318 + fn onChunk(data: []const u8, ctx: ?*anyopaque) anyerror!void { 1319 + const buf: *std.ArrayListUnmanaged(u8) = @ptrCast(@alignCast(ctx)); 1320 + try buf.appendSlice(c_allocator, data); 1321 + } 1322 + }.onChunk, &decompress_buf) catch { 1323 + result.has_error = true; 1324 + continue; 1325 + }; 1326 + result.data = allocator.dupe(u8, decompress_buf.items) catch null; 1327 + result.compressed = true; 1328 + } else { 1329 + result.data = allocator.dupe(u8, req.response_body.items) catch null; 1330 + } 1331 + if (result.data == null) result.has_error = true else success += 1; 1332 + } 1333 + } 1334 + c.resetRequests(); 1335 + } 1336 + } 1337 + _ = debug.timer(" meta: copy results", batch_start); 1338 + debug.log(" meta: queued={d} success={d}", .{ queued, success }); 1339 + 1340 + offset = end; 1341 + batch_num += 1; 1342 + } 1343 + 1344 + return results; 1345 + } 1346 + 1347 + pub const MetadataCallback = *const fn ( 1348 + name: []const u8, 1349 + data: ?[]const u8, 1350 + has_error: bool, 1351 + userdata: ?*anyopaque 1352 + ) void; 1353 + 1354 + pub fn fetchMetadataStreaming( 1355 + self: *Fetcher, 1356 + names: []const []const u8, 1357 + allocator: std.mem.Allocator, 1358 + callback: MetadataCallback, 1359 + userdata: ?*anyopaque, 1360 + ) !void { 1361 + if (names.len == 0) return; 1362 + 1363 + var total_start: u64 = @intCast(std.time.nanoTimestamp()); 1364 + try self.ensureMetaClients(); 1365 + total_start = debug.timer(" meta: get clients", total_start); 1366 + 1367 + var active_connections: usize = 0; 1368 + for (self.meta_clients) |maybe_client| { 1369 + if (maybe_client != null) active_connections += 1; 1370 + } 1371 + 1372 + if (active_connections == 0) return error.ConnectionFailed; 1373 + debug.log(" meta: streaming {d} packages across {d} connections", .{ names.len, active_connections }); 1374 + 1375 + var processed = try allocator.alloc(bool, names.len); 1376 + defer allocator.free(processed); 1377 + @memset(processed, false); 1378 + 1379 + const ResultTracker = struct { 1380 + name: []const u8, 1381 + index: usize, 1382 + }; 1383 + 1384 + var trackers = try allocator.alloc(ResultTracker, names.len); 1385 + defer allocator.free(trackers); 1386 + for (names, 0..) |name, i| { 1387 + trackers[i] = .{ .name = name, .index = i }; 1388 + } 1389 + 1390 + const total_capacity = active_connections * (MAX_PENDING_REQUESTS - 1); 1391 + var offset: usize = 0; 1392 + var batch_num: usize = 0; 1393 + 1394 + var decompress_buf = std.ArrayListUnmanaged(u8){}; 1395 + defer decompress_buf.deinit(c_allocator); 1396 + 1397 + while (offset < names.len) { 1398 + const end = @min(offset + total_capacity, names.len); 1399 + var batch_start: u64 = @intCast(std.time.nanoTimestamp()); 1400 + 1401 + debug.log(" meta: batch {d} ({d}-{d})", .{ batch_num, offset, end }); 1402 + 1403 + var queued: usize = 0; 1404 + var conn_idx: usize = 0; 1405 + for (offset..end) |i| { 1406 + const tracker = &trackers[i]; 1407 + const name = names[i]; 1408 + 1409 + var client: ?*Http2Client = null; 1410 + var attempts: usize = 0; 1411 + while (attempts < NUM_META_CONNECTIONS) : (attempts += 1) { 1412 + if (self.meta_clients[conn_idx]) |c| { 1413 + if (c.h2_session != null and c.connected == 1 and c.request_count < MAX_PENDING_REQUESTS - 1) { 1414 + client = c; break; 1415 + } 1416 + } 1417 + conn_idx = (conn_idx + 1) % NUM_META_CONNECTIONS; 1418 + } 1419 + if (client == null) continue; 1420 + 1421 + const c = client.?; 1422 + const session = c.h2_session orelse continue; 1423 + 1424 + var path_buf: [512]u8 = undefined; 1425 + const path = std.fmt.bufPrint(&path_buf, "/{s}", .{name}) catch continue; 1426 + 1427 + var hdrs = [_]nghttp2.nv{ 1428 + Http2Client.makeNv(":method", "GET"), 1429 + Http2Client.makeNv(":path", c.allocator.dupeZ(u8, path) catch continue), 1430 + Http2Client.makeNv(":scheme", "https"), 1431 + Http2Client.makeNv(":authority", c.host), 1432 + Http2Client.makeNv("accept", "application/vnd.npm.install-v1+json"), 1433 + Http2Client.makeNv("accept-encoding", "gzip"), 1434 + Http2Client.makeNv("user-agent", user_agent), 1435 + }; 1436 + 1437 + const req = &c.requests[c.request_count]; 1438 + c.request_count += 1; 1439 + req.* = .{ 1440 + .stream_id = 0, 1441 + .path = hdrs[1].value[0..hdrs[1].valuelen :0], 1442 + .on_data = null, 1443 + .on_complete = null, 1444 + .on_error = null, 1445 + .userdata = tracker, 1446 + .response_body = .{}, 1447 + .status_code = 0, 1448 + .done = false, 1449 + .has_error = false, 1450 + .start_ns = 0, 1451 + .end_ns = 0, 1452 + .bytes = 0, 1453 + .content_encoding = .identity, 1454 + }; 1455 + req.start_ns = @intCast(std.time.nanoTimestamp()); 1456 + 1457 + const sid = nghttp2.nghttp2_submit_request(session, null, &hdrs, hdrs.len, null, req); 1458 + if (sid < 0) { 1459 + c.request_count -= 1; 1460 + if (req.path) |p| c.allocator.free(p); 1461 + continue; 1462 + } 1463 + req.stream_id = sid; 1464 + queued += 1; 1465 + conn_idx = (conn_idx + 1) % NUM_META_CONNECTIONS; 1466 + } 1467 + batch_start = debug.timer(" meta: queue requests", batch_start); 1468 + 1469 + for (self.meta_clients) |maybe_client| { 1470 + if (maybe_client) |c| c.flush() catch {}; 1471 + } 1472 + 1473 + const loop = uv.uv_default_loop(); 1474 + var all_done = false; 1475 + var loops: usize = 0; 1476 + const run_start: u64 = @intCast(std.time.nanoTimestamp()); 1477 + 1478 + while (!all_done) { 1479 + _ = uv.uv_run(loop, uv.RUN_ONCE); 1480 + loops += 1; 1481 + 1482 + for (self.meta_clients) |maybe_client| { 1483 + if (maybe_client) |c| { 1484 + for (c.requests[0..c.request_count]) |*req| { 1485 + if (req.done or req.has_error) { 1486 + const tracker: *ResultTracker = @ptrCast(@alignCast(req.userdata)); 1487 + if (!processed[tracker.index]) { 1488 + processed[tracker.index] = true; 1489 + if (req.has_error or req.status_code != 200) { 1490 + callback(tracker.name, null, true, userdata); 1491 + } else if (req.content_encoding == .gzip) { 1492 + decompress_buf.clearRetainingCapacity(); 1493 + const decomp = extractor.GzipDecompressor.init(allocator) catch { 1494 + callback(tracker.name, null, true, userdata); 1495 + continue; 1496 + }; 1497 + defer decomp.deinit(); 1498 + _ = decomp.decompress(req.response_body.items, struct { 1499 + fn onChunk(data: []const u8, ctx: ?*anyopaque) anyerror!void { 1500 + const buf: *std.ArrayListUnmanaged(u8) = @ptrCast(@alignCast(ctx)); 1501 + try buf.appendSlice(c_allocator, data); 1502 + } 1503 + }.onChunk, &decompress_buf) catch { 1504 + callback(tracker.name, null, true, userdata); 1505 + continue; 1506 + }; 1507 + callback(tracker.name, decompress_buf.items, false, userdata); 1508 + } else { 1509 + callback(tracker.name, req.response_body.items, false, userdata); 1510 + } 1511 + } 1512 + } 1513 + } 1514 + } 1515 + } 1516 + 1517 + all_done = true; 1518 + for (self.meta_clients) |maybe_client| { 1519 + if (maybe_client) |c| { 1520 + for (c.requests[0..c.request_count]) |*req| { 1521 + if (!req.done and !req.has_error) { 1522 + all_done = false; 1523 + break; 1524 + } 1525 + } 1526 + if (!all_done) break; 1527 + } 1528 + } 1529 + } 1530 + 1531 + const elapsed_ns: u64 = @intCast(@as(i128, @intCast(std.time.nanoTimestamp())) - @as(i128, run_start)); 1532 + debug.log(" h2: run complete in {d}ms, {d} loops", .{ elapsed_ns / 1_000_000, loops }); 1533 + 1534 + var slow_count: usize = 0; 1535 + var max_req_ms: u64 = 0; 1536 + var max_req_name: []const u8 = ""; 1537 + var total_bytes: usize = 0; 1538 + for (self.meta_clients) |maybe_client| { 1539 + if (maybe_client) |c| { 1540 + for (c.requests[0..c.request_count]) |*req| { 1541 + const tracker: *ResultTracker = @ptrCast(@alignCast(req.userdata)); 1542 + const end_ns = if (req.end_ns != 0) req.end_ns else @as(u64, @intCast(std.time.nanoTimestamp())); 1543 + const duration_ms: u64 = @intCast((end_ns - req.start_ns) / 1_000_000); 1544 + total_bytes += req.response_body.items.len; 1545 + if (duration_ms > max_req_ms) { 1546 + max_req_ms = duration_ms; 1547 + max_req_name = tracker.name; 1548 + } 1549 + if (duration_ms >= META_SLOW_LOG_MS) { 1550 + slow_count += 1; 1551 + debug.log(" meta: slow {s} {d}ms {d} bytes status={d}", .{ 1552 + tracker.name, 1553 + duration_ms, 1554 + req.response_body.items.len, 1555 + req.status_code, 1556 + }); 1557 + } 1558 + } 1559 + } 1560 + } 1561 + debug.log(" meta: summary slow={d} max={s} {d}ms total_bytes={d}", .{ slow_count, max_req_name, max_req_ms, total_bytes }); 1562 + 1563 + for (self.meta_clients) |maybe_client| { 1564 + if (maybe_client) |c| c.resetRequests(); 1565 + } 1566 + 1567 + offset = end; 1568 + batch_num += 1; 1569 + } 1570 + } 1571 + 1572 + pub fn fetchTarball(self: *Fetcher, url: []const u8, handler: StreamHandler) !void { 1573 + try self.pending.append(self.allocator, .{ .url = try self.allocator.dupe(u8, url), .handler = handler }); 1574 + } 1575 + 1576 + pub fn run(self: *Fetcher) !void { 1577 + if (self.pending.items.len == 0 and self.tarball_contexts.items.len == 0) return; 1578 + 1579 + const run_start: u64 = @intCast(std.time.nanoTimestamp()); 1580 + const total_requests = self.pending.items.len + self.tarball_contexts.items.len; 1581 + 1582 + debug.log("fetcher: {d} tarballs to download (pending={d}, in-flight={d})", .{ 1583 + total_requests, 1584 + self.pending.items.len, 1585 + self.tarball_contexts.items.len, 1586 + }); 1587 + 1588 + try self.ensureTarballClients(); 1589 + self.finishTarballs(); 1590 + 1591 + const elapsed_ns: u64 = @intCast(@as(i128, @intCast(std.time.nanoTimestamp())) - @as(i128, run_start)); 1592 + debug.log("fetcher: {d} tarballs complete in {d}ms", .{ total_requests, elapsed_ns / 1_000_000 }); 1593 + } 1594 + };
+127
src/pkg/intern.zig
··· 1 + const std = @import("std"); 2 + 3 + pub const InternedString = struct { 4 + ptr: [*]const u8, 5 + len: u32, 6 + 7 + pub fn slice(self: InternedString) []const u8 { 8 + return self.ptr[0..self.len]; 9 + } 10 + 11 + pub fn eql(a: InternedString, b: InternedString) bool { 12 + return a.ptr == b.ptr and a.len == b.len; 13 + } 14 + 15 + pub fn hash(self: InternedString) u64 { 16 + return @intFromPtr(self.ptr); 17 + } 18 + 19 + pub const empty: InternedString = .{ .ptr = "", .len = 0 }; 20 + }; 21 + 22 + pub const StringPool = struct { 23 + allocator: std.mem.Allocator, 24 + strings: std.StringHashMap(InternedString), 25 + storage: std.ArrayListUnmanaged([]const u8), 26 + 27 + pub fn init(allocator: std.mem.Allocator) StringPool { 28 + return .{ 29 + .allocator = allocator, 30 + .strings = std.StringHashMap(InternedString).init(allocator), 31 + .storage = std.ArrayListUnmanaged([]const u8){}, 32 + }; 33 + } 34 + 35 + pub fn deinit(self: *StringPool) void { 36 + for (self.storage.items) |s| { 37 + self.allocator.free(s); 38 + } 39 + self.storage.deinit(self.allocator); 40 + self.strings.deinit(); 41 + } 42 + 43 + pub fn intern(self: *StringPool, str: []const u8) !InternedString { 44 + if (str.len == 0) return InternedString.empty; 45 + if (self.strings.get(str)) |interned| return interned; 46 + 47 + const owned = try self.allocator.dupe(u8, str); 48 + errdefer self.allocator.free(owned); 49 + 50 + try self.storage.append(self.allocator, owned); 51 + 52 + const interned = InternedString{ 53 + .ptr = owned.ptr, 54 + .len = @intCast(owned.len), 55 + }; 56 + 57 + try self.strings.put(owned, interned); 58 + return interned; 59 + } 60 + 61 + pub fn internOwned(self: *StringPool, owned: []const u8) !InternedString { 62 + if (owned.len == 0) return InternedString.empty; 63 + 64 + if (self.strings.get(owned)) |interned| { 65 + self.allocator.free(@constCast(owned)); 66 + return interned; 67 + } 68 + 69 + try self.storage.append(self.allocator, owned); 70 + 71 + const interned = InternedString{ 72 + .ptr = owned.ptr, 73 + .len = @intCast(owned.len), 74 + }; 75 + 76 + try self.strings.put(owned, interned); 77 + return interned; 78 + } 79 + 80 + pub fn stats(self: *const StringPool) Stats { 81 + var total_bytes: usize = 0; 82 + for (self.storage.items) |s| { 83 + total_bytes += s.len; 84 + } 85 + return .{ 86 + .string_count = self.storage.items.len, 87 + .total_bytes = total_bytes, 88 + }; 89 + } 90 + 91 + pub const Stats = struct { 92 + string_count: usize, 93 + total_bytes: usize, 94 + }; 95 + }; 96 + 97 + pub const CommonStrings = struct { 98 + pool: *StringPool, 99 + 100 + lodash: InternedString = InternedString.empty, 101 + react: InternedString = InternedString.empty, 102 + typescript: InternedString = InternedString.empty, 103 + webpack: InternedString = InternedString.empty, 104 + babel: InternedString = InternedString.empty, 105 + eslint: InternedString = InternedString.empty, 106 + jest: InternedString = InternedString.empty, 107 + express: InternedString = InternedString.empty, 108 + 109 + caret: InternedString = InternedString.empty, // ^ 110 + tilde: InternedString = InternedString.empty, // ~ 111 + 112 + pub fn init(pool: *StringPool) !CommonStrings { 113 + return .{ 114 + .pool = pool, 115 + .lodash = try pool.intern("lodash"), 116 + .react = try pool.intern("react"), 117 + .typescript = try pool.intern("typescript"), 118 + .webpack = try pool.intern("webpack"), 119 + .babel = try pool.intern("@babel/core"), 120 + .eslint = try pool.intern("eslint"), 121 + .jest = try pool.intern("jest"), 122 + .express = try pool.intern("express"), 123 + .caret = try pool.intern("^"), 124 + .tilde = try pool.intern("~"), 125 + }; 126 + } 127 + };
+336
src/pkg/json.zig
··· 1 + const std = @import("std"); 2 + 3 + pub const yyjson = @cImport({ 4 + @cInclude("yyjson.h"); 5 + }); 6 + 7 + pub const JsonError = error{ 8 + ParseError, 9 + OutOfMemory, 10 + InvalidType, 11 + KeyNotFound, 12 + IoError, 13 + }; 14 + 15 + pub const JsonDoc = struct { 16 + doc: *yyjson.yyjson_doc, 17 + 18 + pub fn parse(data: []const u8) !JsonDoc { 19 + const doc = yyjson.yyjson_read(data.ptr, data.len, 0); 20 + if (doc == null) return error.ParseError; 21 + return JsonDoc{ .doc = doc.? }; 22 + } 23 + 24 + pub fn parseFile(path: [:0]const u8) !JsonDoc { 25 + const doc = yyjson.yyjson_read_file(path.ptr, 0, null, null); 26 + if (doc == null) return error.ParseError; 27 + return JsonDoc{ .doc = doc.? }; 28 + } 29 + 30 + pub fn deinit(self: *JsonDoc) void { 31 + yyjson.yyjson_doc_free(self.doc); 32 + } 33 + 34 + pub fn root(self: *JsonDoc) JsonValue { 35 + return JsonValue{ .val = yyjson.yyjson_doc_get_root(self.doc).? }; 36 + } 37 + }; 38 + 39 + pub const JsonValue = struct { 40 + val: *yyjson.yyjson_val, 41 + 42 + pub fn getString(self: JsonValue, key: [:0]const u8) ?[]const u8 { 43 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 44 + if (!yyjson.yyjson_is_str(obj)) return null; 45 + const ptr = yyjson.yyjson_get_str(obj) orelse return null; 46 + const len = yyjson.yyjson_get_len(obj); 47 + return ptr[0..len]; 48 + } 49 + 50 + pub fn getInt(self: JsonValue, key: [:0]const u8) ?i64 { 51 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 52 + if (!yyjson.yyjson_is_int(obj)) return null; 53 + return yyjson.yyjson_get_sint(obj); 54 + } 55 + 56 + pub fn getUint(self: JsonValue, key: [:0]const u8) ?u64 { 57 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 58 + if (!yyjson.yyjson_is_uint(obj)) return null; 59 + return yyjson.yyjson_get_uint(obj); 60 + } 61 + 62 + pub fn getDouble(self: JsonValue, key: [:0]const u8) ?f64 { 63 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 64 + if (!yyjson.yyjson_is_real(obj)) return null; 65 + return yyjson.yyjson_get_real(obj); 66 + } 67 + 68 + pub fn getBool(self: JsonValue, key: [:0]const u8) ?bool { 69 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 70 + if (!yyjson.yyjson_is_bool(obj)) return null; 71 + return yyjson.yyjson_get_bool(obj); 72 + } 73 + 74 + pub fn getObject(self: JsonValue, key: [:0]const u8) ?JsonValue { 75 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 76 + if (!yyjson.yyjson_is_obj(obj)) return null; 77 + return JsonValue{ .val = obj }; 78 + } 79 + 80 + pub fn getArray(self: JsonValue, key: [:0]const u8) ?JsonValue { 81 + const obj = yyjson.yyjson_obj_get(self.val, key.ptr) orelse return null; 82 + if (!yyjson.yyjson_is_arr(obj)) return null; 83 + return JsonValue{ .val = obj }; 84 + } 85 + 86 + pub fn isNull(self: JsonValue) bool { 87 + return yyjson.yyjson_is_null(self.val); 88 + } 89 + 90 + pub fn isArray(self: JsonValue) bool { 91 + return yyjson.yyjson_is_arr(self.val); 92 + } 93 + 94 + pub fn isObject(self: JsonValue) bool { 95 + return yyjson.yyjson_is_obj(self.val); 96 + } 97 + 98 + pub fn arrayLen(self: JsonValue) usize { 99 + return yyjson.yyjson_arr_size(self.val); 100 + } 101 + 102 + pub fn arrayGet(self: JsonValue, index: usize) ?JsonValue { 103 + const elem = yyjson.yyjson_arr_get(self.val, index) orelse return null; 104 + return JsonValue{ .val = elem }; 105 + } 106 + 107 + pub fn asString(self: JsonValue) ?[]const u8 { 108 + if (!yyjson.yyjson_is_str(self.val)) return null; 109 + const ptr = yyjson.yyjson_get_str(self.val) orelse return null; 110 + const len = yyjson.yyjson_get_len(self.val); 111 + return ptr[0..len]; 112 + } 113 + 114 + pub const ObjectIterator = struct { 115 + iter: yyjson.yyjson_obj_iter, 116 + 117 + pub fn next(self: *ObjectIterator) ?struct { key: []const u8, value: JsonValue } { 118 + const key_val = yyjson.yyjson_obj_iter_next(&self.iter) orelse return null; 119 + const val = yyjson.yyjson_obj_iter_get_val(key_val) orelse return null; 120 + 121 + const key_ptr = yyjson.yyjson_get_str(key_val) orelse return null; 122 + const key_len = yyjson.yyjson_get_len(key_val); 123 + 124 + return .{ 125 + .key = key_ptr[0..key_len], 126 + .value = JsonValue{ .val = val }, 127 + }; 128 + } 129 + 130 + pub fn deinit(_: *ObjectIterator) void {} 131 + }; 132 + 133 + pub fn objectIterator(self: JsonValue) ?ObjectIterator { 134 + if (!yyjson.yyjson_is_obj(self.val)) return null; 135 + var iter: yyjson.yyjson_obj_iter = undefined; 136 + if (!yyjson.yyjson_obj_iter_init(self.val, &iter)) return null; 137 + return ObjectIterator{ .iter = iter }; 138 + } 139 + 140 + pub const ArrayIterator = struct { 141 + iter: yyjson.yyjson_arr_iter, 142 + 143 + pub fn next(self: *ArrayIterator) ?JsonValue { 144 + const val = yyjson.yyjson_arr_iter_next(&self.iter) orelse return null; 145 + return JsonValue{ .val = val }; 146 + } 147 + 148 + pub fn deinit(_: *ArrayIterator) void {} 149 + }; 150 + 151 + pub fn arrayIterator(self: JsonValue) ?ArrayIterator { 152 + if (!yyjson.yyjson_is_arr(self.val)) return null; 153 + var iter: yyjson.yyjson_arr_iter = undefined; 154 + if (!yyjson.yyjson_arr_iter_init(self.val, &iter)) return null; 155 + return ArrayIterator{ .iter = iter }; 156 + } 157 + }; 158 + 159 + pub const JsonWriter = struct { 160 + doc: *yyjson.yyjson_mut_doc, 161 + 162 + pub fn init() !JsonWriter { 163 + const doc = yyjson.yyjson_mut_doc_new(null); 164 + if (doc == null) return error.OutOfMemory; 165 + return JsonWriter{ .doc = doc.? }; 166 + } 167 + 168 + pub fn deinit(self: *JsonWriter) void { 169 + yyjson.yyjson_mut_doc_free(self.doc); 170 + } 171 + 172 + pub fn createObject(self: *JsonWriter) *yyjson.yyjson_mut_val { 173 + return yyjson.yyjson_mut_obj(self.doc).?; 174 + } 175 + 176 + pub fn createArray(self: *JsonWriter) *yyjson.yyjson_mut_val { 177 + return yyjson.yyjson_mut_arr(self.doc).?; 178 + } 179 + 180 + pub fn createString(self: *JsonWriter, str: []const u8) *yyjson.yyjson_mut_val { 181 + return yyjson.yyjson_mut_strncpy(self.doc, str.ptr, str.len).?; 182 + } 183 + 184 + pub fn createInt(self: *JsonWriter, val: i64) *yyjson.yyjson_mut_val { 185 + return yyjson.yyjson_mut_sint(self.doc, val).?; 186 + } 187 + 188 + pub fn createUint(self: *JsonWriter, val: u64) *yyjson.yyjson_mut_val { 189 + return yyjson.yyjson_mut_uint(self.doc, val).?; 190 + } 191 + 192 + pub fn createBool(self: *JsonWriter, val: bool) *yyjson.yyjson_mut_val { 193 + return yyjson.yyjson_mut_bool(self.doc, val).?; 194 + } 195 + 196 + pub fn createReal(self: *JsonWriter, val: f64) *yyjson.yyjson_mut_val { 197 + return yyjson.yyjson_mut_real(self.doc, val).?; 198 + } 199 + 200 + pub fn createNull(self: *JsonWriter) *yyjson.yyjson_mut_val { 201 + return yyjson.yyjson_mut_null(self.doc).?; 202 + } 203 + 204 + pub fn objectAdd(self: *JsonWriter, obj: *yyjson.yyjson_mut_val, key: []const u8, val: *yyjson.yyjson_mut_val) void { 205 + const key_val = yyjson.yyjson_mut_strncpy(self.doc, key.ptr, key.len); 206 + _ = yyjson.yyjson_mut_obj_add(obj, key_val, val); 207 + } 208 + 209 + pub fn arrayAppend(_: *JsonWriter, arr: *yyjson.yyjson_mut_val, val: *yyjson.yyjson_mut_val) void { 210 + _ = yyjson.yyjson_mut_arr_append(arr, val); 211 + } 212 + 213 + pub fn setRoot(self: *JsonWriter, val: *yyjson.yyjson_mut_val) void { 214 + yyjson.yyjson_mut_doc_set_root(self.doc, val); 215 + } 216 + 217 + pub fn write(self: *JsonWriter, allocator: std.mem.Allocator) ![]u8 { 218 + var len: usize = 0; 219 + const ptr = yyjson.yyjson_mut_write(self.doc, yyjson.YYJSON_WRITE_PRETTY_TWO_SPACES, &len); 220 + if (ptr == null) return error.OutOfMemory; 221 + defer std.c.free(ptr); 222 + 223 + const result = try allocator.alloc(u8, len); 224 + @memcpy(result, ptr[0..len]); 225 + return result; 226 + } 227 + 228 + pub fn writeToFile(self: *JsonWriter, path: [:0]const u8) !void { 229 + const success = yyjson.yyjson_mut_write_file(path.ptr, self.doc, yyjson.YYJSON_WRITE_PRETTY_TWO_SPACES, null, null); 230 + if (!success) return error.IoError; 231 + } 232 + }; 233 + 234 + pub const PackageJson = struct { 235 + name: []const u8, 236 + version: []const u8, 237 + dependencies: std.StringHashMap([]const u8), 238 + dev_dependencies: std.StringHashMap([]const u8), 239 + peer_dependencies: std.StringHashMap([]const u8), 240 + optional_dependencies: std.StringHashMap([]const u8), 241 + trusted_dependencies: std.StringHashMap(void), 242 + 243 + pub fn parse(allocator: std.mem.Allocator, path: [:0]const u8) !PackageJson { 244 + var doc = try JsonDoc.parseFile(path); 245 + defer doc.deinit(); 246 + 247 + const root_val = doc.root(); 248 + 249 + var pkg = PackageJson{ 250 + .name = "", 251 + .version = "", 252 + .dependencies = std.StringHashMap([]const u8).init(allocator), 253 + .dev_dependencies = std.StringHashMap([]const u8).init(allocator), 254 + .peer_dependencies = std.StringHashMap([]const u8).init(allocator), 255 + .optional_dependencies = std.StringHashMap([]const u8).init(allocator), 256 + .trusted_dependencies = std.StringHashMap(void).init(allocator), 257 + }; 258 + 259 + if (root_val.getString("name")) |s| { 260 + pkg.name = try allocator.dupe(u8, s); 261 + } 262 + 263 + if (root_val.getString("version")) |s| { 264 + pkg.version = try allocator.dupe(u8, s); 265 + } 266 + 267 + try parseDeps(allocator, root_val, "dependencies", &pkg.dependencies); 268 + try parseDeps(allocator, root_val, "devDependencies", &pkg.dev_dependencies); 269 + try parseDeps(allocator, root_val, "peerDependencies", &pkg.peer_dependencies); 270 + try parseDeps(allocator, root_val, "optionalDependencies", &pkg.optional_dependencies); 271 + 272 + if (root_val.getArray("trustedDependencies")) |arr| { 273 + for (0..arr.arrayLen()) |i| { 274 + const name = (arr.arrayGet(i) orelse continue).asString() orelse continue; 275 + try pkg.trusted_dependencies.put(try allocator.dupe(u8, name), {}); 276 + } 277 + } 278 + 279 + return pkg; 280 + } 281 + 282 + fn parseDeps( 283 + allocator: std.mem.Allocator, 284 + root_val: JsonValue, 285 + key: [:0]const u8, 286 + map: *std.StringHashMap([]const u8), 287 + ) !void { 288 + if (root_val.getObject(key)) |deps| { 289 + var iter = deps.objectIterator() orelse return; 290 + defer iter.deinit(); 291 + while (iter.next()) |entry| { 292 + const version = entry.value.asString() orelse continue; 293 + try map.put(try allocator.dupe(u8, entry.key), try allocator.dupe(u8, version)); 294 + } 295 + } 296 + } 297 + 298 + pub fn deinit(self: *PackageJson, allocator: std.mem.Allocator) void { 299 + if (self.name.len > 0) allocator.free(self.name); 300 + if (self.version.len > 0) allocator.free(self.version); 301 + 302 + var iter = self.dependencies.iterator(); 303 + while (iter.next()) |entry| { 304 + allocator.free(entry.key_ptr.*); 305 + allocator.free(entry.value_ptr.*); 306 + } 307 + self.dependencies.deinit(); 308 + 309 + iter = self.dev_dependencies.iterator(); 310 + while (iter.next()) |entry| { 311 + allocator.free(entry.key_ptr.*); 312 + allocator.free(entry.value_ptr.*); 313 + } 314 + self.dev_dependencies.deinit(); 315 + 316 + iter = self.peer_dependencies.iterator(); 317 + while (iter.next()) |entry| { 318 + allocator.free(entry.key_ptr.*); 319 + allocator.free(entry.value_ptr.*); 320 + } 321 + self.peer_dependencies.deinit(); 322 + 323 + iter = self.optional_dependencies.iterator(); 324 + while (iter.next()) |entry| { 325 + allocator.free(entry.key_ptr.*); 326 + allocator.free(entry.value_ptr.*); 327 + } 328 + self.optional_dependencies.deinit(); 329 + 330 + var trusted_iter = self.trusted_dependencies.keyIterator(); 331 + while (trusted_iter.next()) |key| { 332 + allocator.free(key.*); 333 + } 334 + self.trusted_dependencies.deinit(); 335 + } 336 + };
+561
src/pkg/linker.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + const json = @import("json.zig"); 4 + const debug = @import("debug.zig"); 5 + 6 + const PARALLEL_LINK_THRESHOLD = 500; 7 + const LINK_THREAD_COUNT = 8; 8 + 9 + pub fn createSymlinkOrCopy(dir: std.fs.Dir, target: []const u8, link_name: []const u8) !void { 10 + if (comptime builtin.os.tag == .windows) { 11 + try createSymlinkWindows(dir, target, link_name); 12 + } else try dir.symLink(target, link_name, .{}); 13 + } 14 + 15 + pub fn createSymlinkAbsolute(target: []const u8, link_path: []const u8) void { 16 + if (comptime builtin.os.tag == .windows) { 17 + createSymlinkAbsoluteWindows(target, link_path); 18 + } else std.posix.symlink(target, link_path) catch {}; 19 + } 20 + 21 + fn createSymlinkWindows(dir: std.fs.Dir, target: []const u8, link_name: []const u8) !void { 22 + if (comptime builtin.os.tag != .windows) return; 23 + var target_utf16: [std.fs.max_path_bytes]u16 = undefined; 24 + var link_utf16: [std.fs.max_path_bytes]u16 = undefined; 25 + const target_len = try std.unicode.utf8ToUtf16Le(&target_utf16, target); 26 + const link_len = try std.unicode.utf8ToUtf16Le(&link_utf16, link_name); 27 + target_utf16[target_len] = 0; 28 + 29 + _ = try std.os.windows.CreateSymbolicLink( 30 + dir.fd, link_utf16[0..link_len], 31 + target_utf16[0..target_len :0], false, 32 + ); 33 + } 34 + 35 + fn createSymlinkAbsoluteWindows(target: []const u8, link_path: []const u8) void { 36 + if (comptime builtin.os.tag != .windows) return; 37 + var target_utf16: [std.fs.max_path_bytes]u16 = undefined; 38 + var link_utf16: [std.fs.max_path_bytes]u16 = undefined; 39 + const target_len = std.unicode.utf8ToUtf16Le(&target_utf16, target) catch return; 40 + const link_len = std.unicode.utf8ToUtf16Le(&link_utf16, link_path) catch return; 41 + target_utf16[target_len] = 0; 42 + 43 + _ = std.os.windows.CreateSymbolicLink( 44 + null, link_utf16[0..link_len], 45 + target_utf16[0..target_len :0], false, 46 + ) catch {}; 47 + } 48 + 49 + pub const LinkError = error{ 50 + IoError, 51 + PathNotFound, 52 + CrossDevice, 53 + PermissionDenied, 54 + OutOfMemory, 55 + PathTooLong, 56 + }; 57 + 58 + pub const StatsSnapshot = struct { 59 + files_linked: u32, 60 + files_copied: u32, 61 + files_cloned: u32, 62 + bytes_linked: u64, 63 + bytes_copied: u64, 64 + dirs_created: u32, 65 + bins_linked: u32, 66 + packages_installed: u32, 67 + packages_skipped: u32, 68 + }; 69 + 70 + pub const LinkStats = struct { 71 + files_linked: std.atomic.Value(u32), 72 + files_copied: std.atomic.Value(u32), 73 + files_cloned: std.atomic.Value(u32), 74 + bytes_linked: std.atomic.Value(u64), 75 + bytes_copied: std.atomic.Value(u64), 76 + dirs_created: std.atomic.Value(u32), 77 + bins_linked: std.atomic.Value(u32), 78 + packages_installed: std.atomic.Value(u32), 79 + packages_skipped: std.atomic.Value(u32), 80 + 81 + pub fn init() LinkStats { 82 + return .{ 83 + .files_linked = std.atomic.Value(u32).init(0), 84 + .files_copied = std.atomic.Value(u32).init(0), 85 + .files_cloned = std.atomic.Value(u32).init(0), 86 + .bytes_linked = std.atomic.Value(u64).init(0), 87 + .bytes_copied = std.atomic.Value(u64).init(0), 88 + .dirs_created = std.atomic.Value(u32).init(0), 89 + .bins_linked = std.atomic.Value(u32).init(0), 90 + .packages_installed = std.atomic.Value(u32).init(0), 91 + .packages_skipped = std.atomic.Value(u32).init(0), 92 + }; 93 + } 94 + 95 + pub fn snapshot(self: *const LinkStats) StatsSnapshot { 96 + return .{ 97 + .files_linked = self.files_linked.load(.acquire), 98 + .files_copied = self.files_copied.load(.acquire), 99 + .files_cloned = self.files_cloned.load(.acquire), 100 + .bytes_linked = self.bytes_linked.load(.acquire), 101 + .bytes_copied = self.bytes_copied.load(.acquire), 102 + .dirs_created = self.dirs_created.load(.acquire), 103 + .bins_linked = self.bins_linked.load(.acquire), 104 + .packages_installed = self.packages_installed.load(.acquire), 105 + .packages_skipped = self.packages_skipped.load(.acquire), 106 + }; 107 + } 108 + }; 109 + 110 + pub const PackageLink = struct { 111 + cache_path: []const u8, 112 + node_modules_path: []const u8, 113 + name: []const u8, 114 + parent_path: ?[]const u8 = null, 115 + file_count: u32 = 0, 116 + has_bin: bool = true, 117 + }; 118 + 119 + pub const Linker = struct { 120 + allocator: std.mem.Allocator, 121 + stats: LinkStats, 122 + node_modules_dir: ?std.fs.Dir, 123 + bin_dir: ?std.fs.Dir, 124 + node_modules_path: []const u8, 125 + cross_device: std.atomic.Value(bool), 126 + 127 + pub fn init(allocator: std.mem.Allocator) Linker { 128 + return .{ 129 + .allocator = allocator, 130 + .stats = LinkStats.init(), 131 + .node_modules_dir = null, 132 + .bin_dir = null, 133 + .node_modules_path = "", 134 + .cross_device = std.atomic.Value(bool).init(false), 135 + }; 136 + } 137 + 138 + pub fn deinit(self: *Linker) void { 139 + if (self.node_modules_dir) |*d| d.close(); 140 + if (self.bin_dir) |*d| d.close(); 141 + if (self.node_modules_path.len > 0) self.allocator.free(self.node_modules_path); 142 + } 143 + 144 + pub fn setNodeModulesPath(self: *Linker, path: []const u8) !void { 145 + std.fs.cwd().makePath(path) catch |err| switch (err) { 146 + error.PathAlreadyExists => {}, 147 + else => return error.IoError, 148 + }; 149 + 150 + var new_nm_dir = try std.fs.cwd().openDir(path, .{}); 151 + errdefer new_nm_dir.close(); 152 + 153 + const new_path = try self.allocator.dupe(u8, path); 154 + errdefer self.allocator.free(new_path); 155 + 156 + const bin_path = try std.fmt.allocPrint(self.allocator, "{s}/.bin", .{path}); 157 + defer self.allocator.free(bin_path); 158 + std.fs.cwd().makePath(bin_path) catch |err| switch (err) { 159 + error.PathAlreadyExists => {}, 160 + else => return error.IoError, 161 + }; 162 + 163 + const new_bin_dir = try std.fs.cwd().openDir(bin_path, .{}); 164 + 165 + if (self.bin_dir) |*d| d.close(); 166 + if (self.node_modules_dir) |*d| d.close(); 167 + if (self.node_modules_path.len > 0) self.allocator.free(self.node_modules_path); 168 + 169 + self.node_modules_dir = new_nm_dir; 170 + self.node_modules_path = new_path; 171 + self.bin_dir = new_bin_dir; 172 + } 173 + 174 + pub fn linkPackage(self: *Linker, pkg: PackageLink) !void { 175 + const node_modules = self.node_modules_dir orelse return error.IoError; 176 + 177 + const install_path = if (pkg.parent_path) |parent| 178 + try std.fmt.allocPrint(self.allocator, "{s}/node_modules/{s}", .{ parent, pkg.name }) 179 + else 180 + try self.allocator.dupe(u8, pkg.name); 181 + defer self.allocator.free(install_path); 182 + 183 + const d = node_modules.openDir(install_path, .{}) catch null; 184 + if (d) |dir| { 185 + var dd = dir; 186 + defer dd.close(); 187 + if (dd.statFile("package.json")) |_| { 188 + _ = self.stats.packages_skipped.fetchAdd(1, .release); 189 + return; 190 + } else |_| {} 191 + } 192 + 193 + var source_dir = std.fs.cwd().openDir(pkg.cache_path, .{ .iterate = true }) catch { 194 + return error.PathNotFound; 195 + }; 196 + defer source_dir.close(); 197 + 198 + node_modules.makePath(install_path) catch |err| switch (err) { 199 + error.PathAlreadyExists => {}, 200 + else => return error.IoError, 201 + }; 202 + _ = self.stats.dirs_created.fetchAdd(1, .release); 203 + 204 + var dest_dir = node_modules.openDir(install_path, .{}) catch return error.IoError; 205 + defer dest_dir.close(); 206 + 207 + self.linkDirectoryWithHint(source_dir, dest_dir, pkg.file_count, pkg.name) catch |err| return err; 208 + 209 + if (pkg.parent_path == null and pkg.has_bin) try self.linkBinaries(pkg.name); 210 + _ = self.stats.packages_installed.fetchAdd(1, .release); 211 + } 212 + 213 + fn linkBinaries(self: *Linker, pkg_name: []const u8) !void { 214 + const bin_dir = self.bin_dir orelse return; 215 + const node_modules = self.node_modules_dir orelse return; 216 + 217 + var pkg_dir = node_modules.openDir(pkg_name, .{}) catch return; 218 + defer pkg_dir.close(); 219 + 220 + const pkg_json_file = pkg_dir.openFile("package.json", .{}) catch return; 221 + defer pkg_json_file.close(); 222 + 223 + const content = pkg_json_file.readToEndAlloc(self.allocator, 1024 * 1024) catch return; 224 + defer self.allocator.free(content); 225 + 226 + var doc = json.JsonDoc.parse(content) catch return; 227 + defer doc.deinit(); 228 + 229 + const root_val = doc.root(); 230 + 231 + if (root_val.getObject("bin")) |bin_obj| { 232 + var iter = bin_obj.objectIterator() orelse return; 233 + while (iter.next()) |entry| { 234 + const bin_path = entry.value.asString() orelse continue; 235 + self.createBinSymlink(pkg_name, entry.key, bin_path, bin_dir) catch continue; 236 + } 237 + } else if (root_val.getString("bin")) |bin_path| { 238 + const simple_name = if (std.mem.indexOf(u8, pkg_name, "/")) |slash| 239 + pkg_name[slash + 1 ..] 240 + else 241 + pkg_name; 242 + self.createBinSymlink(pkg_name, simple_name, bin_path, bin_dir) catch {}; 243 + } 244 + } 245 + 246 + fn createBinSymlink(self: *Linker, pkg_name: []const u8, cmd_name: []const u8, bin_path: []const u8, bin_dir: std.fs.Dir) !void { 247 + var normalized_path = bin_path; 248 + if (std.mem.startsWith(u8, normalized_path, "./")) { 249 + normalized_path = normalized_path[2..]; 250 + } 251 + 252 + const target = try std.fmt.allocPrint(self.allocator, "../{s}/{s}", .{ pkg_name, normalized_path }); 253 + defer self.allocator.free(target); 254 + 255 + bin_dir.deleteFile(cmd_name) catch {}; 256 + try createSymlinkOrCopy(bin_dir, target, cmd_name); 257 + 258 + _ = self.stats.bins_linked.fetchAdd(1, .release); 259 + } 260 + 261 + const FileWorkItem = struct { 262 + source_path: []const u8, 263 + dest_path: []const u8, 264 + kind: std.fs.Dir.Entry.Kind, 265 + link_target: ?[]const u8, 266 + }; 267 + 268 + fn linkDirectory(self: *Linker, source: std.fs.Dir, dest: std.fs.Dir) !void { 269 + try self.linkDirectorySequential(source, dest); 270 + } 271 + 272 + pub fn linkDirectoryWithHint(self: *Linker, source: std.fs.Dir, dest: std.fs.Dir, file_count: u32, name: []const u8) !void { 273 + if (file_count >= PARALLEL_LINK_THRESHOLD) { 274 + debug.log(" parallel link: {s} ({d} files)", .{ name, file_count }); 275 + try self.linkDirectoryParallel(source, dest); 276 + } else try self.linkDirectorySequential(source, dest); 277 + } 278 + 279 + fn linkDirectorySequential(self: *Linker, source: std.fs.Dir, dest: std.fs.Dir) !void { 280 + var iter = source.iterate(); 281 + while (try iter.next()) |entry| { 282 + switch (entry.kind) { 283 + .directory => { 284 + dest.makePath(entry.name) catch |err| switch (err) { 285 + error.PathAlreadyExists => {}, 286 + else => return error.IoError, 287 + }; 288 + _ = self.stats.dirs_created.fetchAdd(1, .release); 289 + 290 + var child_source = source.openDir(entry.name, .{ .iterate = true }) catch continue; 291 + defer child_source.close(); 292 + 293 + var child_dest = dest.openDir(entry.name, .{}) catch continue; 294 + defer child_dest.close(); 295 + 296 + try self.linkDirectorySequential(child_source, child_dest); 297 + }, 298 + .file => try self.linkFile(source, dest, entry.name), 299 + .sym_link => { 300 + var link_buf: [std.fs.max_path_bytes]u8 = undefined; 301 + const target = source.readLink(entry.name, &link_buf) catch continue; 302 + createSymlinkOrCopy(dest, target, entry.name) catch {}; 303 + }, 304 + else => {}, 305 + } 306 + } 307 + } 308 + 309 + const ParallelThreadContext = struct { 310 + linker: *Linker, 311 + items: []const FileWorkItem, 312 + source_base: std.fs.Dir, 313 + dest_base: std.fs.Dir, 314 + }; 315 + 316 + fn linkDirectoryParallel(self: *Linker, source: std.fs.Dir, dest: std.fs.Dir) !void { 317 + var work_items = std.ArrayListUnmanaged(FileWorkItem){}; 318 + defer { 319 + for (work_items.items) |item| { 320 + self.allocator.free(item.source_path); 321 + self.allocator.free(item.dest_path); 322 + if (item.link_target) |t| self.allocator.free(t); 323 + } 324 + work_items.deinit(self.allocator); 325 + } 326 + 327 + try self.collectWorkItems(source, dest, "", &work_items); 328 + if (work_items.items.len == 0) return; 329 + 330 + const items_slice = work_items.items; 331 + const chunk_size = (items_slice.len + LINK_THREAD_COUNT - 1) / LINK_THREAD_COUNT; 332 + 333 + var threads: [LINK_THREAD_COUNT]?std.Thread = undefined; 334 + for (&threads) |*t| t.* = null; 335 + var contexts: [LINK_THREAD_COUNT]ParallelThreadContext = undefined; 336 + 337 + var offset: usize = 0; 338 + for (0..LINK_THREAD_COUNT) |i| { 339 + if (offset >= items_slice.len) break; 340 + const end = @min(offset + chunk_size, items_slice.len); 341 + 342 + contexts[i] = .{ 343 + .linker = self, 344 + .items = items_slice[offset..end], 345 + .source_base = source, 346 + .dest_base = dest, 347 + }; 348 + 349 + threads[i] = std.Thread.spawn(.{}, processWorkItems, .{&contexts[i]}) catch |err| blk: { 350 + debug.log("Thread spawn failed for chunk {d}-{d}: {s}", .{ offset, end, @errorName(err) }); 351 + processWorkItems(&contexts[i]); 352 + break :blk null; 353 + }; offset = end; 354 + } 355 + 356 + for (&threads) |*t| if (t.*) |thread| thread.join(); 357 + } 358 + 359 + fn processWorkItems(ctx: *const ParallelThreadContext) void { 360 + for (ctx.items) |item| { 361 + switch (item.kind) { 362 + .file => { 363 + const src_dir_path = std.fs.path.dirname(item.source_path) orelse ""; 364 + const dst_dir_path = std.fs.path.dirname(item.dest_path) orelse ""; 365 + const filename = std.fs.path.basename(item.source_path); 366 + 367 + var src_dir = if (src_dir_path.len > 0) 368 + ctx.source_base.openDir(src_dir_path, .{}) catch continue 369 + else 370 + ctx.source_base; 371 + defer if (src_dir_path.len > 0) src_dir.close(); 372 + 373 + var dst_dir = if (dst_dir_path.len > 0) 374 + ctx.dest_base.openDir(dst_dir_path, .{}) catch continue 375 + else 376 + ctx.dest_base; 377 + defer if (dst_dir_path.len > 0) dst_dir.close(); 378 + 379 + ctx.linker.linkFile(src_dir, dst_dir, filename) catch {}; 380 + }, 381 + .sym_link => { 382 + if (item.link_target) |target| { 383 + const dst_dir_path = std.fs.path.dirname(item.dest_path) orelse ""; 384 + const filename = std.fs.path.basename(item.dest_path); 385 + 386 + var dst_dir = if (dst_dir_path.len > 0) 387 + ctx.dest_base.openDir(dst_dir_path, .{}) catch continue 388 + else 389 + ctx.dest_base; 390 + defer if (dst_dir_path.len > 0) dst_dir.close(); 391 + createSymlinkOrCopy(dst_dir, target, filename) catch {}; 392 + } 393 + }, 394 + else => {}, 395 + } 396 + } 397 + } 398 + 399 + fn collectWorkItems(self: *Linker, source: std.fs.Dir, dest: std.fs.Dir, prefix: []const u8, work_items: *std.ArrayListUnmanaged(FileWorkItem)) !void { 400 + var iter = source.iterate(); 401 + while (try iter.next()) |entry| { 402 + const rel_path = if (prefix.len > 0) 403 + try std.fmt.allocPrint(self.allocator, "{s}/{s}", .{ prefix, entry.name }) 404 + else 405 + try self.allocator.dupe(u8, entry.name); 406 + errdefer self.allocator.free(rel_path); 407 + 408 + switch (entry.kind) { 409 + .directory => { 410 + dest.makePath(rel_path) catch |err| switch (err) { 411 + error.PathAlreadyExists => {}, 412 + else => { 413 + self.allocator.free(rel_path); 414 + return error.IoError; 415 + }, 416 + }; 417 + _ = self.stats.dirs_created.fetchAdd(1, .release); 418 + 419 + var child_source = source.openDir(entry.name, .{ .iterate = true }) catch { 420 + self.allocator.free(rel_path); 421 + continue; 422 + }; 423 + defer child_source.close(); 424 + 425 + try self.collectWorkItems(child_source, dest, rel_path, work_items); 426 + self.allocator.free(rel_path); 427 + }, 428 + .file => { 429 + try work_items.append(self.allocator, .{ 430 + .source_path = rel_path, 431 + .dest_path = try self.allocator.dupe(u8, rel_path), 432 + .kind = .file, 433 + .link_target = null, 434 + }); 435 + }, 436 + .sym_link => { 437 + var link_buf: [std.fs.max_path_bytes]u8 = undefined; 438 + const target = source.readLink(entry.name, &link_buf) catch { 439 + self.allocator.free(rel_path); 440 + continue; 441 + }; 442 + try work_items.append(self.allocator, .{ 443 + .source_path = rel_path, 444 + .dest_path = try self.allocator.dupe(u8, rel_path), 445 + .kind = .sym_link, 446 + .link_target = try self.allocator.dupe(u8, target), 447 + }); 448 + }, 449 + else => self.allocator.free(rel_path), 450 + } 451 + } 452 + } 453 + 454 + fn linkFile(self: *Linker, source_dir: std.fs.Dir, dest_dir: std.fs.Dir, name: []const u8) !void { 455 + dest_dir.deleteFile(name) catch {}; 456 + 457 + if (comptime builtin.os.tag != .windows) { 458 + if (!self.cross_device.load(.acquire)) { 459 + if (linkAt(source_dir, name, dest_dir, name)) { 460 + _ = self.stats.files_linked.fetchAdd(1, .release); 461 + return; 462 + } else |err| { 463 + if (err == error.CrossDevice) { 464 + self.cross_device.store(true, .release); 465 + } else if (err != error.IoError) return err; 466 + } 467 + } 468 + } 469 + 470 + if (comptime builtin.os.tag == .macos) { 471 + if (fclonefileat(source_dir.fd, name, dest_dir.fd, name)) { 472 + _ = self.stats.files_cloned.fetchAdd(1, .release); 473 + return; 474 + } else |_| {} 475 + } 476 + 477 + try self.copyFile(source_dir, dest_dir, name); 478 + _ = self.stats.files_copied.fetchAdd(1, .release); 479 + } 480 + 481 + fn linkAt(source_dir: std.fs.Dir, source_name: []const u8, dest_dir: std.fs.Dir, dest_name: []const u8) !void { 482 + if (comptime builtin.os.tag == .windows) return error.IoError; 483 + 484 + var source_buf: [256]u8 = undefined; 485 + var dest_buf: [256]u8 = undefined; 486 + 487 + if (source_name.len >= source_buf.len or dest_name.len >= dest_buf.len) { 488 + return error.PathTooLong; 489 + } 490 + 491 + @memcpy(source_buf[0..source_name.len], source_name); 492 + source_buf[source_name.len] = 0; 493 + 494 + @memcpy(dest_buf[0..dest_name.len], dest_name); 495 + dest_buf[dest_name.len] = 0; 496 + 497 + const source_z: [*:0]const u8 = source_buf[0..source_name.len :0]; 498 + const dest_z: [*:0]const u8 = dest_buf[0..dest_name.len :0]; 499 + 500 + const result = std.c.linkat(source_dir.fd, source_z, dest_dir.fd, dest_z, 0); 501 + if (result != 0) { 502 + const errno = std.posix.errno(result); 503 + return switch (errno) { 504 + .XDEV => error.CrossDevice, 505 + .PERM, .ACCES => error.PermissionDenied, 506 + else => error.IoError, 507 + }; 508 + } 509 + } 510 + 511 + fn fclonefileat(src_fd: std.posix.fd_t, src_name: []const u8, dst_fd: std.posix.fd_t, dst_name: []const u8) !void { 512 + var src_buf: [256]u8 = undefined; 513 + var dst_buf: [256]u8 = undefined; 514 + 515 + if (src_name.len >= src_buf.len or dst_name.len >= dst_buf.len) { 516 + return error.PathTooLong; 517 + } 518 + 519 + @memcpy(src_buf[0..src_name.len], src_name); 520 + src_buf[src_name.len] = 0; 521 + 522 + @memcpy(dst_buf[0..dst_name.len], dst_name); 523 + dst_buf[dst_name.len] = 0; 524 + 525 + const src_z: [*:0]const u8 = src_buf[0..src_name.len :0]; 526 + const dst_z: [*:0]const u8 = dst_buf[0..dst_name.len :0]; 527 + 528 + const fclonefileat_fn = struct { 529 + extern "c" fn fclonefileat(c_int, [*:0]const u8, c_int, [*:0]const u8, c_uint) c_int; 530 + }.fclonefileat; 531 + 532 + const result = fclonefileat_fn(src_fd, src_z, dst_fd, dst_z, 0); 533 + if (result != 0) { 534 + return error.IoError; 535 + } 536 + } 537 + 538 + fn copyFile(self: *Linker, source_dir: std.fs.Dir, dest_dir: std.fs.Dir, name: []const u8) !void { 539 + _ = self; 540 + 541 + var source = source_dir.openFile(name, .{}) catch return error.IoError; 542 + defer source.close(); 543 + 544 + const stat = source.stat() catch return error.IoError; 545 + const mode = if (comptime builtin.os.tag != .windows) stat.mode else {}; 546 + 547 + var dest = dest_dir.createFile(name, .{ .mode = mode }) catch return error.IoError; 548 + defer dest.close(); 549 + 550 + var buf: [64 * 1024]u8 = undefined; 551 + while (true) { 552 + const bytes_read = source.read(&buf) catch return error.IoError; 553 + if (bytes_read == 0) break; 554 + dest.writeAll(buf[0..bytes_read]) catch return error.IoError; 555 + } 556 + } 557 + 558 + pub fn getStats(self: *const Linker) StatsSnapshot { 559 + return self.stats.snapshot(); 560 + } 561 + };
+402
src/pkg/lockfile.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + 4 + pub const MAGIC: u32 = 0x504B474C; 5 + pub const VERSION: u32 = 1; 6 + 7 + pub const StringRef = extern struct { 8 + offset: u32, 9 + len: u32, 10 + 11 + pub fn slice(self: StringRef, string_table: []const u8) []const u8 { 12 + const offset: usize = self.offset; 13 + const len: usize = self.len; 14 + if (offset >= string_table.len) return ""; 15 + const end = @min(offset + len, string_table.len); 16 + return string_table[offset..end]; 17 + } 18 + 19 + pub const empty: StringRef = .{ .offset = 0, .len = 0 }; 20 + }; 21 + 22 + pub const Header = extern struct { 23 + magic: u32 = MAGIC, 24 + version: u32 = VERSION, 25 + package_count: u32 = 0, 26 + dependency_count: u32 = 0, 27 + string_table_offset: u32 = 0, 28 + string_table_size: u32 = 0, 29 + package_array_offset: u32 = 0, 30 + dependency_array_offset: u32 = 0, 31 + hash_table_offset: u32 = 0, 32 + hash_table_size: u32 = 0, 33 + _reserved: [24]u8 = [_]u8{0} ** 24, 34 + 35 + pub fn validate(self: *const Header) bool { 36 + return self.magic == MAGIC and self.version == VERSION; 37 + } 38 + }; 39 + 40 + pub const PackageFlags = packed struct(u8) { 41 + dev: bool = false, 42 + optional: bool = false, 43 + peer: bool = false, 44 + bundled: bool = false, 45 + has_bin: bool = false, 46 + has_scripts: bool = false, 47 + direct: bool = false, 48 + _reserved: u1 = 0, 49 + }; 50 + 51 + pub const Package = extern struct { 52 + name: StringRef, 53 + version_major: u64, 54 + version_minor: u64, 55 + version_patch: u64, 56 + prerelease: StringRef, 57 + integrity: [64]u8, 58 + tarball_url: StringRef, 59 + parent_path: StringRef, 60 + deps_start: u32, 61 + deps_count: u32, 62 + flags: PackageFlags, 63 + _padding: [3]u8 = .{ 0, 0, 0 }, 64 + 65 + comptime { 66 + std.debug.assert(@sizeOf(Package) == 136); 67 + } 68 + 69 + pub fn versionString(self: *const Package, allocator: std.mem.Allocator, string_table: []const u8) ![]u8 { 70 + const prerelease_str = self.prerelease.slice(string_table); 71 + if (prerelease_str.len > 0) { 72 + return std.fmt.allocPrint(allocator, "{d}.{d}.{d}-{s}", .{ 73 + self.version_major, 74 + self.version_minor, 75 + self.version_patch, 76 + prerelease_str, 77 + }); 78 + } 79 + return std.fmt.allocPrint(allocator, "{d}.{d}.{d}", .{ 80 + self.version_major, 81 + self.version_minor, 82 + self.version_patch, 83 + }); 84 + } 85 + 86 + pub fn integrityHex(self: *const Package) [128]u8 { 87 + return std.fmt.bytesToHex(self.integrity, .lower); 88 + } 89 + 90 + pub fn installPath(self: *const Package, allocator: std.mem.Allocator, string_table: []const u8) ![]u8 { 91 + const parent = self.parent_path.slice(string_table); 92 + const name = self.name.slice(string_table); 93 + if (parent.len > 0) return std.fmt.allocPrint(allocator, "{s}/node_modules/{s}", .{ parent, name }); 94 + return allocator.dupe(u8, name); 95 + } 96 + 97 + pub fn isNested(self: *const Package) bool { 98 + return self.parent_path.len > 0; 99 + } 100 + }; 101 + 102 + pub const DependencyFlags = packed struct(u8) { 103 + peer: bool = false, 104 + dev: bool = false, 105 + optional: bool = false, 106 + _reserved: u5 = 0, 107 + }; 108 + 109 + pub const Dependency = extern struct { 110 + package_index: u32, 111 + constraint: StringRef, 112 + flags: DependencyFlags = .{}, 113 + _padding: [3]u8 = .{ 0, 0, 0 }, 114 + }; 115 + 116 + pub const HashBucket = extern struct { 117 + name_hash: u32, 118 + package_index: u32, 119 + pub const empty: HashBucket = .{ 120 + .name_hash = 0, 121 + .package_index = std.math.maxInt(u32) 122 + }; 123 + }; 124 + 125 + pub fn djb2Hash(str: []const u8) u32 { 126 + var hash: u32 = 5381; 127 + for (str) |c| hash = ((hash << 5) +% hash) +% c; 128 + return hash; 129 + } 130 + 131 + pub const Lockfile = struct { 132 + data: 133 + if (builtin.os.tag == .windows) []align(@alignOf(Header)) u8 134 + else []align(std.heap.page_size_min) const u8, 135 + header: *const Header, 136 + string_table: []const u8, 137 + packages: []const Package, 138 + dependencies: []const Dependency, 139 + hash_table: []const HashBucket, 140 + 141 + pub fn open(path: []const u8) !Lockfile { 142 + const file = try std.fs.cwd().openFile(path, .{}); 143 + defer file.close(); 144 + 145 + const stat = try file.stat(); 146 + if (stat.size < @sizeOf(Header)) { 147 + return error.InvalidLockfile; 148 + } 149 + 150 + if (comptime builtin.os.tag == .windows) { 151 + const data = try std.heap.c_allocator.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(@alignOf(Header)), stat.size); 152 + errdefer std.heap.c_allocator.free(data); 153 + 154 + const bytes_read = try file.readAll(data); 155 + if (bytes_read != stat.size) { 156 + std.heap.c_allocator.free(data); 157 + return error.InvalidLockfile; 158 + } 159 + 160 + return initFromDataWindows(data); 161 + } else { 162 + const data = try std.posix.mmap( 163 + null, stat.size, 164 + std.posix.PROT.READ, 165 + .{ .TYPE = .PRIVATE }, 166 + file.handle, 0, 167 + ); 168 + 169 + return initFromData(data); 170 + } 171 + } 172 + 173 + fn initFromDataWindows(data: []align(@alignOf(Header)) u8) !Lockfile { 174 + if (data.len < @sizeOf(Header)) return error.InvalidLockfile; 175 + 176 + const header: *const Header = @ptrCast(@alignCast(data.ptr)); 177 + if (!header.validate()) return error.InvalidLockfile; 178 + 179 + if (header.string_table_offset + header.string_table_size > data.len or 180 + header.package_array_offset + header.package_count * @sizeOf(Package) > data.len or 181 + header.dependency_array_offset + header.dependency_count * @sizeOf(Dependency) > data.len or 182 + header.hash_table_offset + header.hash_table_size * @sizeOf(HashBucket) > data.len) 183 + { return error.InvalidLockfile; } 184 + 185 + return .{ 186 + .data = data, 187 + .header = header, 188 + .string_table = data[header.string_table_offset..][0..header.string_table_size], 189 + .packages = @as([*]const Package, @ptrCast(@alignCast(data.ptr + header.package_array_offset)))[0..header.package_count], 190 + .dependencies = @as([*]const Dependency, @ptrCast(@alignCast(data.ptr + header.dependency_array_offset)))[0..header.dependency_count], 191 + .hash_table = @as([*]const HashBucket, @ptrCast(@alignCast(data.ptr + header.hash_table_offset)))[0..header.hash_table_size], 192 + }; 193 + } 194 + 195 + pub fn initFromData(data: []align(std.heap.page_size_min) const u8) !Lockfile { 196 + if (data.len < @sizeOf(Header)) { 197 + return error.InvalidLockfile; 198 + } 199 + 200 + const header: *const Header = @ptrCast(@alignCast(data.ptr)); 201 + if (!header.validate()) { 202 + return error.InvalidLockfile; 203 + } 204 + 205 + if (header.string_table_offset + header.string_table_size > data.len or 206 + header.package_array_offset + header.package_count * @sizeOf(Package) > data.len or 207 + header.dependency_array_offset + header.dependency_count * @sizeOf(Dependency) > data.len or 208 + header.hash_table_offset + header.hash_table_size * @sizeOf(HashBucket) > data.len) 209 + { return error.InvalidLockfile; } 210 + 211 + const string_table = data[header.string_table_offset..][0..header.string_table_size]; 212 + 213 + const packages_ptr: [*]const Package = @ptrCast(@alignCast(data.ptr + header.package_array_offset)); 214 + const packages = packages_ptr[0..header.package_count]; 215 + 216 + const deps_ptr: [*]const Dependency = @ptrCast(@alignCast(data.ptr + header.dependency_array_offset)); 217 + const dependencies = deps_ptr[0..header.dependency_count]; 218 + 219 + const hash_ptr: [*]const HashBucket = @ptrCast(@alignCast(data.ptr + header.hash_table_offset)); 220 + const hash_table = hash_ptr[0..header.hash_table_size]; 221 + 222 + return .{ 223 + .data = data, 224 + .header = header, 225 + .string_table = string_table, 226 + .packages = packages, 227 + .dependencies = dependencies, 228 + .hash_table = hash_table, 229 + }; 230 + } 231 + 232 + pub fn close(self: *Lockfile) void { 233 + if (comptime builtin.os.tag == .windows) { 234 + std.heap.c_allocator.free(self.data); 235 + } else std.posix.munmap(self.data); 236 + self.* = undefined; 237 + } 238 + 239 + pub fn lookupPackage(self: *const Lockfile, name: []const u8) ?*const Package { 240 + if (self.hash_table.len == 0) return null; 241 + 242 + const hash = djb2Hash(name); 243 + var index = hash % @as(u32, @intCast(self.hash_table.len)); 244 + var probes: u32 = 0; 245 + 246 + while (probes < self.hash_table.len) : (probes += 1) { 247 + const bucket = &self.hash_table[index]; 248 + if (bucket.package_index == std.math.maxInt(u32)) return null; 249 + if (bucket.name_hash == hash) { 250 + const pkg = &self.packages[bucket.package_index]; 251 + const pkg_name = pkg.name.slice(self.string_table); 252 + if (std.mem.eql(u8, pkg_name, name)) return pkg; 253 + } 254 + index = (index + 1) % @as(u32, @intCast(self.hash_table.len)); 255 + } 256 + return null; 257 + } 258 + 259 + pub fn getPackageDeps(self: *const Lockfile, pkg: *const Package) []const Dependency { 260 + if (pkg.deps_count == 0) return &[_]Dependency{}; 261 + if (pkg.deps_start >= self.dependencies.len or 262 + pkg.deps_start + pkg.deps_count > self.dependencies.len) return &[_]Dependency{}; 263 + return self.dependencies[pkg.deps_start..][0..pkg.deps_count]; 264 + } 265 + }; 266 + 267 + pub const LockfileWriter = struct { 268 + allocator: std.mem.Allocator, 269 + packages: std.ArrayListUnmanaged(Package), 270 + dependencies: std.ArrayListUnmanaged(Dependency), 271 + string_builder: std.ArrayListUnmanaged(u8), 272 + string_offsets: std.StringHashMap(StringRef), 273 + 274 + pub fn init(allocator: std.mem.Allocator) LockfileWriter { 275 + return .{ 276 + .allocator = allocator, 277 + .packages = .{}, 278 + .dependencies = .{}, 279 + .string_builder = .{}, 280 + .string_offsets = std.StringHashMap(StringRef).init(allocator), 281 + }; 282 + } 283 + 284 + pub fn deinit(self: *LockfileWriter) void { 285 + self.packages.deinit(self.allocator); 286 + self.dependencies.deinit(self.allocator); 287 + self.string_builder.deinit(self.allocator); 288 + var key_iter = self.string_offsets.keyIterator(); 289 + while (key_iter.next()) |key| { 290 + self.allocator.free(key.*); 291 + } 292 + self.string_offsets.deinit(); 293 + } 294 + 295 + pub fn internString(self: *LockfileWriter, str: []const u8) !StringRef { 296 + if (str.len == 0) return StringRef.empty; 297 + if (self.string_offsets.get(str)) |ref| return ref; 298 + 299 + if (self.string_builder.items.len > std.math.maxInt(u32)) return error.StringTableTooLarge; 300 + if (str.len > std.math.maxInt(u32)) return error.StringTooLarge; 301 + 302 + const offset: u32 = @intCast(self.string_builder.items.len); 303 + try self.string_builder.appendSlice(self.allocator, str); 304 + 305 + const ref = StringRef{ .offset = offset, .len = @intCast(str.len) }; 306 + const stored_str = try self.allocator.dupe(u8, str); 307 + 308 + errdefer self.allocator.free(stored_str); 309 + try self.string_offsets.put(stored_str, ref); 310 + 311 + return ref; 312 + } 313 + 314 + pub fn addPackage(self: *LockfileWriter, pkg: Package) !u32 { 315 + const index: u32 = @intCast(self.packages.items.len); 316 + try self.packages.append(self.allocator, pkg); 317 + return index; 318 + } 319 + 320 + pub fn addDependency(self: *LockfileWriter, dep: Dependency) !void { 321 + try self.dependencies.append(self.allocator, dep); 322 + } 323 + 324 + fn alignOffset(offset: u32, alignment: u32) u32 { 325 + const rem = offset % alignment; 326 + return if (rem == 0) offset else offset + (alignment - rem); 327 + } 328 + 329 + pub fn write(self: *LockfileWriter, path: []const u8) !void { 330 + const file = try std.fs.cwd().createFile(path, .{}); 331 + defer file.close(); 332 + 333 + const header_size: u32 = @sizeOf(Header); 334 + const string_table_offset = header_size; 335 + const string_table_size: u32 = @intCast(self.string_builder.items.len); 336 + 337 + const package_array_offset = alignOffset(string_table_offset + string_table_size, @alignOf(Package)); 338 + const package_pad_size = package_array_offset - (string_table_offset + string_table_size); 339 + const package_array_size: u32 = @intCast(self.packages.items.len * @sizeOf(Package)); 340 + 341 + const dependency_array_offset = alignOffset(package_array_offset + package_array_size, @alignOf(Dependency)); 342 + const dep_pad_size = dependency_array_offset - (package_array_offset + package_array_size); 343 + const dependency_array_size: u32 = @intCast(self.dependencies.items.len * @sizeOf(Dependency)); 344 + 345 + const hash_table_size: u32 = @intCast(@max(1, self.packages.items.len * 10 / 7)); 346 + var hash_table = try self.allocator.alloc(HashBucket, hash_table_size); 347 + defer self.allocator.free(hash_table); 348 + 349 + for (hash_table) |*bucket| { 350 + bucket.* = HashBucket.empty; 351 + } 352 + 353 + for (self.packages.items, 0..) |pkg, i| { 354 + const name = pkg.name.slice(self.string_builder.items); 355 + const hash = djb2Hash(name); 356 + var index = hash % hash_table_size; 357 + var probes: u32 = 0; 358 + 359 + while (hash_table[index].package_index != std.math.maxInt(u32) and probes < hash_table_size) : (probes += 1) { 360 + index = (index + 1) % hash_table_size; 361 + } 362 + if (probes >= hash_table_size) return error.HashTableFull; 363 + 364 + hash_table[index] = .{ 365 + .name_hash = hash, 366 + .package_index = @intCast(i), 367 + }; 368 + } 369 + 370 + const hash_table_offset = alignOffset(dependency_array_offset + dependency_array_size, @alignOf(HashBucket)); 371 + const hash_pad_size = hash_table_offset - (dependency_array_offset + dependency_array_size); 372 + 373 + const header = Header{ 374 + .package_count = @intCast(self.packages.items.len), 375 + .dependency_count = @intCast(self.dependencies.items.len), 376 + .string_table_offset = string_table_offset, 377 + .string_table_size = string_table_size, 378 + .package_array_offset = package_array_offset, 379 + .dependency_array_offset = dependency_array_offset, 380 + .hash_table_offset = hash_table_offset, 381 + .hash_table_size = hash_table_size, 382 + }; 383 + 384 + try file.writeAll(std.mem.asBytes(&header)); 385 + try file.writeAll(self.string_builder.items); 386 + 387 + if (package_pad_size > 0) { 388 + const padding = [_]u8{0} ** 8; 389 + try file.writeAll(padding[0..package_pad_size]); 390 + } try file.writeAll(std.mem.sliceAsBytes(self.packages.items)); 391 + 392 + if (dep_pad_size > 0) { 393 + const padding = [_]u8{0} ** 8; 394 + try file.writeAll(padding[0..dep_pad_size]); 395 + } try file.writeAll(std.mem.sliceAsBytes(self.dependencies.items)); 396 + 397 + if (hash_pad_size > 0) { 398 + const padding = [_]u8{0} ** 8; 399 + try file.writeAll(padding[0..hash_pad_size]); 400 + } try file.writeAll(std.mem.sliceAsBytes(hash_table)); 401 + } 402 + };
+166
src/pkg/metadata.c
··· 1 + #include "yyjson.h" 2 + #include <stdlib.h> 3 + #include <string.h> 4 + 5 + static inline void copy_string_field(yyjson_mut_doc *doc, yyjson_mut_val *dest, yyjson_val *src, const char *key) { 6 + yyjson_val *val = yyjson_obj_get(src, key); 7 + if (val && yyjson_is_str(val)) { 8 + yyjson_mut_obj_add_strncpy(doc, dest, key, yyjson_get_str(val), yyjson_get_len(val)); 9 + } 10 + } 11 + 12 + static inline void copy_string_array(yyjson_mut_doc *doc, yyjson_mut_val *dest, yyjson_val *src, const char *key) { 13 + yyjson_val *arr = yyjson_obj_get(src, key); 14 + if (!arr || !yyjson_is_arr(arr)) return; 15 + 16 + yyjson_mut_val *out_arr = yyjson_mut_arr(doc); 17 + yyjson_val *elem; 18 + size_t idx, max; 19 + yyjson_arr_foreach(arr, idx, max, elem) { 20 + if (yyjson_is_str(elem)) { 21 + yyjson_mut_arr_add_strncpy(doc, out_arr, yyjson_get_str(elem), yyjson_get_len(elem)); 22 + } 23 + } 24 + yyjson_mut_obj_add_val(doc, dest, key, out_arr); 25 + } 26 + 27 + static inline void copy_deps_object(yyjson_mut_doc *doc, yyjson_mut_val *dest, yyjson_val *src, const char *key) { 28 + yyjson_val *deps = yyjson_obj_get(src, key); 29 + if (!deps || !yyjson_is_obj(deps)) return; 30 + 31 + yyjson_mut_val *out_deps = yyjson_mut_obj(doc); 32 + yyjson_val *dep_key, *dep_val; 33 + size_t idx, max; 34 + yyjson_obj_foreach(deps, idx, max, dep_key, dep_val) { 35 + const char *k = yyjson_get_str(dep_key); 36 + size_t klen = yyjson_get_len(dep_key); 37 + 38 + if (yyjson_is_str(dep_val)) { 39 + yyjson_mut_val *mk = yyjson_mut_strncpy(doc, k, klen); 40 + yyjson_mut_val *mv = yyjson_mut_strncpy(doc, yyjson_get_str(dep_val), yyjson_get_len(dep_val)); 41 + yyjson_mut_obj_add(out_deps, mk, mv); 42 + } else if (yyjson_is_obj(dep_val)) { 43 + yyjson_mut_val *nested = yyjson_mut_obj(doc); 44 + yyjson_mut_val *mk = yyjson_mut_strncpy(doc, k, klen); 45 + yyjson_mut_obj_add(out_deps, mk, nested); 46 + 47 + yyjson_val *nk, *nv; 48 + size_t ni, nm; 49 + yyjson_obj_foreach(dep_val, ni, nm, nk, nv) { 50 + const char *nks = yyjson_get_str(nk); 51 + size_t nkl = yyjson_get_len(nk); 52 + if (yyjson_is_str(nv)) { 53 + yyjson_mut_val *nmk = yyjson_mut_strncpy(doc, nks, nkl); 54 + yyjson_mut_val *nmv = yyjson_mut_strncpy(doc, yyjson_get_str(nv), yyjson_get_len(nv)); 55 + yyjson_mut_obj_add(nested, nmk, nmv); 56 + } else if (yyjson_is_bool(nv)) { 57 + yyjson_mut_val *nmk = yyjson_mut_strncpy(doc, nks, nkl); 58 + yyjson_mut_obj_add(nested, nmk, yyjson_mut_bool(doc, yyjson_get_bool(nv))); 59 + } 60 + } 61 + } 62 + } 63 + yyjson_mut_obj_add_val(doc, dest, key, out_deps); 64 + } 65 + 66 + static inline void copy_bin_field(yyjson_mut_doc *doc, yyjson_mut_val *dest, yyjson_val *src) { 67 + yyjson_val *bin = yyjson_obj_get(src, "bin"); 68 + if (!bin) return; 69 + 70 + if (yyjson_is_str(bin)) { 71 + yyjson_mut_obj_add_strncpy(doc, dest, "bin", yyjson_get_str(bin), yyjson_get_len(bin)); 72 + } else if (yyjson_is_obj(bin)) { 73 + yyjson_mut_val *out_bin = yyjson_mut_obj(doc); 74 + yyjson_val *bk, *bv; 75 + size_t idx, max; 76 + yyjson_obj_foreach(bin, idx, max, bk, bv) { 77 + if (yyjson_is_str(bv)) { 78 + yyjson_mut_val *mk = yyjson_mut_strncpy(doc, yyjson_get_str(bk), yyjson_get_len(bk)); 79 + yyjson_mut_val *mv = yyjson_mut_strncpy(doc, yyjson_get_str(bv), yyjson_get_len(bv)); 80 + yyjson_mut_obj_add(out_bin, mk, mv); 81 + } 82 + } 83 + yyjson_mut_obj_add_val(doc, dest, "bin", out_bin); 84 + } 85 + } 86 + 87 + static inline void copy_dist(yyjson_mut_doc *doc, yyjson_mut_val *dest, yyjson_val *src) { 88 + yyjson_val *dist = yyjson_obj_get(src, "dist"); 89 + if (!dist || !yyjson_is_obj(dist)) return; 90 + 91 + yyjson_mut_val *out_dist = yyjson_mut_obj(doc); 92 + copy_string_field(doc, out_dist, dist, "tarball"); 93 + copy_string_field(doc, out_dist, dist, "integrity"); 94 + copy_string_field(doc, out_dist, dist, "shasum"); 95 + yyjson_mut_obj_add_val(doc, dest, "dist", out_dist); 96 + } 97 + 98 + char *strip_npm_metadata(const char *json_data, size_t json_len, size_t *out_len) { 99 + yyjson_doc *doc = yyjson_read(json_data, json_len, 0); 100 + if (!doc) return NULL; 101 + 102 + yyjson_val *root = yyjson_doc_get_root(doc); 103 + if (!root || !yyjson_is_obj(root)) { 104 + yyjson_doc_free(doc); 105 + return NULL; 106 + } 107 + 108 + yyjson_mut_doc *mut_doc = yyjson_mut_doc_new(NULL); 109 + if (!mut_doc) { 110 + yyjson_doc_free(doc); 111 + return NULL; 112 + } 113 + 114 + yyjson_mut_val *out_root = yyjson_mut_obj(mut_doc); 115 + yyjson_mut_doc_set_root(mut_doc, out_root); 116 + 117 + copy_string_field(mut_doc, out_root, root, "name"); 118 + 119 + yyjson_val *versions = yyjson_obj_get(root, "versions"); 120 + if (versions && yyjson_is_obj(versions)) { 121 + yyjson_mut_val *out_versions = yyjson_mut_obj(mut_doc); 122 + 123 + yyjson_val *vkey, *vval; 124 + size_t idx, max; 125 + yyjson_obj_foreach(versions, idx, max, vkey, vval) { 126 + if (!yyjson_is_obj(vval)) continue; 127 + 128 + yyjson_mut_val *out_ver = yyjson_mut_obj(mut_doc); 129 + yyjson_mut_val *mk = yyjson_mut_strncpy(mut_doc, yyjson_get_str(vkey), yyjson_get_len(vkey)); 130 + yyjson_mut_obj_add(out_versions, mk, out_ver); 131 + 132 + copy_string_field(mut_doc, out_ver, vval, "version"); 133 + copy_deps_object(mut_doc, out_ver, vval, "dependencies"); 134 + copy_deps_object(mut_doc, out_ver, vval, "peerDependencies"); 135 + copy_deps_object(mut_doc, out_ver, vval, "optionalDependencies"); 136 + copy_deps_object(mut_doc, out_ver, vval, "peerDependenciesMeta"); 137 + copy_dist(mut_doc, out_ver, vval); 138 + copy_string_array(mut_doc, out_ver, vval, "os"); 139 + copy_string_array(mut_doc, out_ver, vval, "cpu"); 140 + copy_string_array(mut_doc, out_ver, vval, "libc"); 141 + copy_bin_field(mut_doc, out_ver, vval); 142 + } 143 + yyjson_mut_obj_add_val(mut_doc, out_root, "versions", out_versions); 144 + } 145 + 146 + yyjson_val *dist_tags = yyjson_obj_get(root, "dist-tags"); 147 + if (dist_tags && yyjson_is_obj(dist_tags)) { 148 + yyjson_val *latest = yyjson_obj_get(dist_tags, "latest"); 149 + if (latest && yyjson_is_str(latest)) { 150 + yyjson_mut_val *out_tags = yyjson_mut_obj(mut_doc); 151 + yyjson_mut_obj_add_strncpy(mut_doc, out_tags, "latest", yyjson_get_str(latest), yyjson_get_len(latest)); 152 + yyjson_mut_obj_add_val(mut_doc, out_root, "dist-tags", out_tags); 153 + } 154 + } 155 + 156 + char *result = yyjson_mut_write(mut_doc, 0, out_len); 157 + 158 + yyjson_mut_doc_free(mut_doc); 159 + yyjson_doc_free(doc); 160 + 161 + return result; 162 + } 163 + 164 + void strip_metadata_free(char *ptr) { 165 + free(ptr); 166 + }
+94
src/pkg/nghttp2.zig
··· 1 + pub const session = opaque {}; 2 + pub const session_callbacks = opaque {}; 3 + 4 + pub const frame_hd = extern struct { 5 + length: usize, 6 + stream_id: i32, 7 + type: u8, 8 + flags: u8, 9 + reserved: u8, 10 + }; 11 + 12 + pub const frame = extern struct { 13 + hd: frame_hd, 14 + _pad: [256]u8 = undefined, 15 + }; 16 + 17 + pub const nv = extern struct { 18 + name: [*c]u8, 19 + value: [*c]u8, 20 + namelen: usize, 21 + valuelen: usize, 22 + flags: u8, 23 + }; 24 + 25 + pub const settings_entry = extern struct { 26 + settings_id: i32, 27 + value: u32, 28 + }; 29 + 30 + pub const FLAG_NONE: u8 = 0; 31 + pub const FLAG_END_STREAM: u8 = 0x01; 32 + pub const NV_FLAG_NONE: u8 = 0; 33 + pub const HEADERS: u8 = 0x01; 34 + pub const SETTINGS_MAX_CONCURRENT_STREAMS: i32 = 0x03; 35 + pub const SETTINGS_INITIAL_WINDOW_SIZE: i32 = 0x04; 36 + pub const ERR_NOMEM: isize = -901; 37 + 38 + pub const send_callback2 = ?*const fn ( 39 + ?*session, 40 + [*c]const u8, 41 + usize, 42 + c_int, 43 + ?*anyopaque, 44 + ) callconv(.c) isize; 45 + 46 + pub const on_frame_recv_callback = ?*const fn ( 47 + ?*session, 48 + *const frame, 49 + ?*anyopaque, 50 + ) callconv(.c) c_int; 51 + 52 + pub const on_data_chunk_recv_callback = ?*const fn ( 53 + ?*session, 54 + u8, 55 + i32, 56 + [*c]const u8, 57 + usize, 58 + ?*anyopaque, 59 + ) callconv(.c) c_int; 60 + 61 + pub const on_header_callback = ?*const fn ( 62 + ?*session, 63 + *const frame, 64 + [*c]const u8, 65 + usize, 66 + [*c]const u8, 67 + usize, 68 + u8, 69 + ?*anyopaque, 70 + ) callconv(.c) c_int; 71 + 72 + pub const on_stream_close_callback = ?*const fn ( 73 + ?*session, 74 + i32, 75 + u32, 76 + ?*anyopaque, 77 + ) callconv(.c) c_int; 78 + 79 + pub extern fn nghttp2_session_callbacks_new(**session_callbacks) c_int; 80 + pub extern fn nghttp2_session_callbacks_del(*session_callbacks) void; 81 + pub extern fn nghttp2_session_callbacks_set_send_callback2(*session_callbacks, send_callback2) void; 82 + pub extern fn nghttp2_session_callbacks_set_on_frame_recv_callback(*session_callbacks, on_frame_recv_callback) void; 83 + pub extern fn nghttp2_session_callbacks_set_on_data_chunk_recv_callback(*session_callbacks, on_data_chunk_recv_callback) void; 84 + pub extern fn nghttp2_session_callbacks_set_on_header_callback(*session_callbacks, on_header_callback) void; 85 + pub extern fn nghttp2_session_callbacks_set_on_stream_close_callback(*session_callbacks, on_stream_close_callback) void; 86 + pub extern fn nghttp2_session_client_new(**session, *session_callbacks, ?*anyopaque) c_int; 87 + pub extern fn nghttp2_session_del(*session) void; 88 + pub extern fn nghttp2_session_send(*session) c_int; 89 + pub extern fn nghttp2_session_mem_recv(*session, [*]const u8, usize) isize; 90 + pub extern fn nghttp2_session_want_write(*session) c_int; 91 + pub extern fn nghttp2_session_consume(*session, i32, usize) c_int; 92 + pub extern fn nghttp2_submit_settings(*session, u8, [*]settings_entry, usize) c_int; 93 + pub extern fn nghttp2_submit_request(*session, ?*anyopaque, [*]nv, usize, ?*anyopaque, ?*anyopaque) i32; 94 + pub extern fn nghttp2_submit_window_update(*session, u8, i32, i32) c_int;
+1790
src/pkg/resolver.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + const lockfile = @import("lockfile.zig"); 4 + const intern = @import("intern.zig"); 5 + const fetcher = @import("fetcher.zig"); 6 + const json = @import("json.zig"); 7 + const debug = @import("debug.zig"); 8 + const cache = @import("cache.zig"); 9 + 10 + pub const ResolveError = error{ 11 + InvalidPackageJson, 12 + NetworkError, 13 + NoMatchingVersion, 14 + CyclicDependency, 15 + OutOfMemory, 16 + ParseError, 17 + IoError, 18 + }; 19 + 20 + pub const Version = struct { 21 + major: u64, 22 + minor: u64, 23 + patch: u64, 24 + prerelease: ?[]const u8, 25 + build: ?[]const u8, 26 + 27 + pub fn parse(str: []const u8) !Version { 28 + var remaining = str; 29 + 30 + if (remaining.len > 0 and remaining[0] == 'v') { 31 + remaining = remaining[1..]; 32 + } 33 + 34 + const major_end = std.mem.indexOfScalar(u8, remaining, '.') orelse return error.InvalidVersion; 35 + const major = try std.fmt.parseInt(u64, remaining[0..major_end], 10); 36 + remaining = remaining[major_end + 1 ..]; 37 + 38 + const minor_end = std.mem.indexOfScalar(u8, remaining, '.') orelse return error.InvalidVersion; 39 + const minor = try std.fmt.parseInt(u64, remaining[0..minor_end], 10); 40 + remaining = remaining[minor_end + 1 ..]; 41 + 42 + var patch_end = remaining.len; 43 + var prerelease: ?[]const u8 = null; 44 + var build: ?[]const u8 = null; 45 + 46 + if (std.mem.indexOfScalar(u8, remaining, '-')) |dash| { 47 + patch_end = dash; 48 + const after_patch = remaining[dash + 1 ..]; 49 + if (std.mem.indexOfScalar(u8, after_patch, '+')) |plus| { 50 + prerelease = after_patch[0..plus]; 51 + build = after_patch[plus + 1 ..]; 52 + } else prerelease = after_patch; 53 + } else if (std.mem.indexOfScalar(u8, remaining, '+')) |plus| { 54 + patch_end = plus; 55 + build = remaining[plus + 1 ..]; 56 + } 57 + 58 + const patch = try std.fmt.parseInt(u64, remaining[0..patch_end], 10); 59 + 60 + return .{ 61 + .major = major, 62 + .minor = minor, 63 + .patch = patch, 64 + .prerelease = prerelease, 65 + .build = build, 66 + }; 67 + } 68 + 69 + pub fn order(a: Version, b: Version) std.math.Order { 70 + if (a.major != b.major) return std.math.order(a.major, b.major); 71 + if (a.minor != b.minor) return std.math.order(a.minor, b.minor); 72 + if (a.patch != b.patch) return std.math.order(a.patch, b.patch); 73 + 74 + if (a.prerelease == null and b.prerelease != null) return .gt; 75 + if (a.prerelease != null and b.prerelease == null) return .lt; 76 + if (a.prerelease == null and b.prerelease == null) return .eq; 77 + 78 + return orderPrerelease(a.prerelease.?, b.prerelease.?); 79 + } 80 + 81 + fn orderPrerelease(a: []const u8, b: []const u8) std.math.Order { 82 + var a_rest: []const u8 = a; 83 + var b_rest: []const u8 = b; 84 + 85 + while (true) { 86 + const a_end = std.mem.indexOfScalar(u8, a_rest, '.') orelse a_rest.len; 87 + const b_end = std.mem.indexOfScalar(u8, b_rest, '.') orelse b_rest.len; 88 + 89 + const a_id = a_rest[0..a_end]; 90 + const b_id = b_rest[0..b_end]; 91 + 92 + const cmp = compareIdentifier(a_id, b_id); 93 + if (cmp != .eq) return cmp; 94 + 95 + const a_done = a_end >= a_rest.len; 96 + const b_done = b_end >= b_rest.len; 97 + if (a_done and b_done) return .eq; 98 + if (a_done) return .lt; 99 + if (b_done) return .gt; 100 + 101 + a_rest = a_rest[a_end + 1 ..]; 102 + b_rest = b_rest[b_end + 1 ..]; 103 + } 104 + } 105 + 106 + fn compareIdentifier(a: []const u8, b: []const u8) std.math.Order { 107 + const a_num = parseNumeric(a); 108 + const b_num = parseNumeric(b); 109 + 110 + if (a_num != null and b_num != null) { 111 + return std.math.order(a_num.?, b_num.?); 112 + } 113 + 114 + if (a_num != null) return .lt; 115 + if (b_num != null) return .gt; 116 + 117 + return std.mem.order(u8, a, b); 118 + } 119 + 120 + fn parseNumeric(s: []const u8) ?u64 { 121 + if (s.len == 0) return null; 122 + var val: u64 = 0; 123 + for (s) |c| { 124 + if (c < '0' or c > '9') return null; 125 + val = val * 10 + (c - '0'); 126 + } 127 + return val; 128 + } 129 + 130 + pub fn format(self: Version, allocator: std.mem.Allocator) ![]u8 { 131 + if (self.prerelease) |pre| { 132 + return std.fmt.allocPrint(allocator, "{d}.{d}.{d}-{s}", .{ 133 + self.major, self.minor, self.patch, pre, 134 + }); 135 + } 136 + return std.fmt.allocPrint(allocator, "{d}.{d}.{d}", .{ 137 + self.major, self.minor, self.patch, 138 + }); 139 + } 140 + }; 141 + 142 + pub const Constraint = struct { 143 + kind: Kind, 144 + version: Version, 145 + 146 + pub const Kind = enum { 147 + exact, // 1.2.3 148 + caret, // ^1.2.3 (>=1.2.3 <2.0.0) 149 + tilde, // ~1.2.3 (>=1.2.3 <1.3.0) 150 + gte, // >=1.2.3 151 + gt, // >1.2.3 152 + lte, // <=1.2.3 153 + lt, // <1.2.3 154 + any, // * 155 + }; 156 + 157 + pub fn parse(str: []const u8) !Constraint { 158 + if (str.len == 0 or std.mem.eql(u8, str, "*") or std.mem.eql(u8, str, "latest")) { 159 + return .{ .kind = .any, .version = .{ .major = 0, .minor = 0, .patch = 0, .prerelease = null, .build = null } }; 160 + } 161 + 162 + var remaining = str; 163 + var kind: Kind = .exact; 164 + 165 + if (std.mem.lastIndexOf(u8, remaining, "||")) |or_idx| { 166 + remaining = std.mem.trim(u8, remaining[or_idx + 2 ..], " "); 167 + } 168 + 169 + if (std.mem.indexOf(u8, remaining, " ")) |space| { 170 + remaining = remaining[0..space]; 171 + } 172 + 173 + if (std.mem.startsWith(u8, remaining, "^")) { 174 + kind = .caret; 175 + remaining = remaining[1..]; 176 + } else if (std.mem.startsWith(u8, remaining, "~")) { 177 + kind = .tilde; 178 + remaining = remaining[1..]; 179 + } else if (std.mem.startsWith(u8, remaining, ">=")) { 180 + kind = .gte; 181 + remaining = remaining[2..]; 182 + } else if (std.mem.startsWith(u8, remaining, ">")) { 183 + kind = .gt; 184 + remaining = remaining[1..]; 185 + } else if (std.mem.startsWith(u8, remaining, "<=")) { 186 + kind = .lte; 187 + remaining = remaining[2..]; 188 + } else if (std.mem.startsWith(u8, remaining, "<")) { 189 + kind = .lt; 190 + remaining = remaining[1..]; 191 + } else if (std.mem.startsWith(u8, remaining, "=")) { 192 + remaining = remaining[1..]; 193 + } 194 + 195 + const dot_count = std.mem.count(u8, remaining, "."); 196 + if (dot_count == 0) { 197 + const major = std.fmt.parseInt(u64, remaining, 10) catch return .{ 198 + .kind = .any, 199 + .version = .{ .major = 0, .minor = 0, .patch = 0, .prerelease = null, .build = null }, 200 + }; 201 + return .{ 202 + .kind = if (kind == .exact) .caret else kind, 203 + .version = .{ .major = major, .minor = 0, .patch = 0, .prerelease = null, .build = null }, 204 + }; 205 + } else if (dot_count == 1) { 206 + var parts = std.mem.splitScalar(u8, remaining, '.'); 207 + const major = std.fmt.parseInt(u64, parts.next().?, 10) catch 0; 208 + const minor = std.fmt.parseInt(u64, parts.next().?, 10) catch 0; 209 + return .{ 210 + .kind = if (kind == .exact) .tilde else kind, 211 + .version = .{ .major = major, .minor = minor, .patch = 0, .prerelease = null, .build = null }, 212 + }; 213 + } 214 + 215 + const version = try Version.parse(remaining); 216 + return .{ .kind = kind, .version = version }; 217 + } 218 + 219 + pub fn satisfies(self: Constraint, v: Version) bool { 220 + switch (self.kind) { 221 + .any => return true, 222 + .exact => { 223 + if (v.major != self.version.major or v.minor != self.version.minor or v.patch != self.version.patch) return false; 224 + if (self.version.prerelease == null and v.prerelease == null) return true; 225 + if (self.version.prerelease == null or v.prerelease == null) return false; 226 + return std.mem.eql(u8, self.version.prerelease.?, v.prerelease.?); 227 + }, 228 + .caret => { 229 + // ^1.2.3 means >=1.2.3 <2.0.0 (for major > 0) 230 + // ^0.2.3 means >=0.2.3 <0.3.0 (for major = 0) 231 + // ^0.0.3 means >=0.0.3 <0.0.4 (for major = 0, minor = 0) 232 + if (v.order(self.version) == .lt) return false; 233 + if (self.version.major > 0) { 234 + return v.major == self.version.major; 235 + } else if (self.version.minor > 0) { 236 + return v.major == 0 and v.minor == self.version.minor; 237 + } else { 238 + return v.major == 0 and v.minor == 0 and v.patch == self.version.patch; 239 + } 240 + }, 241 + .tilde => { 242 + // ~1.2.3 means >=1.2.3 <1.3.0 243 + if (v.order(self.version) == .lt) return false; 244 + return v.major == self.version.major and v.minor == self.version.minor; 245 + }, 246 + .gte => return v.order(self.version) != .lt, 247 + .gt => return v.order(self.version) == .gt, 248 + .lte => return v.order(self.version) != .gt, 249 + .lt => return v.order(self.version) == .lt, 250 + } 251 + } 252 + }; 253 + 254 + pub const VersionInfo = struct { 255 + version: Version, 256 + version_str: []const u8, 257 + integrity: [64]u8, 258 + tarball_url: []const u8, 259 + dependencies: std.StringHashMap([]const u8), 260 + optional_dependencies: std.StringHashMap([]const u8), 261 + peer_dependencies: std.StringHashMap([]const u8), 262 + peer_dependencies_meta: std.StringHashMap(bool), 263 + os: ?[]const u8, cpu: ?[]const u8, libc: ?[]const u8, 264 + bin: std.StringHashMap([]const u8), 265 + allocator: std.mem.Allocator, 266 + 267 + pub fn deinit(self: *VersionInfo) void { 268 + self.allocator.free(self.version_str); 269 + self.allocator.free(self.tarball_url); 270 + if (self.version.prerelease) |pre| self.allocator.free(pre); 271 + if (self.version.build) |bld| self.allocator.free(bld); 272 + var iter = self.dependencies.iterator(); 273 + while (iter.next()) |entry| { 274 + self.allocator.free(entry.key_ptr.*); 275 + self.allocator.free(entry.value_ptr.*); 276 + } 277 + self.dependencies.deinit(); 278 + var opt_iter = self.optional_dependencies.iterator(); 279 + while (opt_iter.next()) |entry| { 280 + self.allocator.free(entry.key_ptr.*); 281 + self.allocator.free(entry.value_ptr.*); 282 + } 283 + self.optional_dependencies.deinit(); 284 + var peer_iter = self.peer_dependencies.iterator(); 285 + while (peer_iter.next()) |entry| { 286 + self.allocator.free(entry.key_ptr.*); 287 + self.allocator.free(entry.value_ptr.*); 288 + } 289 + self.peer_dependencies.deinit(); 290 + var peer_meta_iter = self.peer_dependencies_meta.iterator(); 291 + while (peer_meta_iter.next()) |entry| { 292 + self.allocator.free(entry.key_ptr.*); 293 + } 294 + self.peer_dependencies_meta.deinit(); 295 + if (self.os) |o| self.allocator.free(o); 296 + if (self.cpu) |c| self.allocator.free(c); 297 + if (self.libc) |l| self.allocator.free(l); 298 + var bin_iter = self.bin.iterator(); 299 + while (bin_iter.next()) |entry| { 300 + self.allocator.free(entry.key_ptr.*); 301 + self.allocator.free(entry.value_ptr.*); 302 + } 303 + self.bin.deinit(); 304 + } 305 + 306 + pub fn matchesPlatform(self: *const VersionInfo) bool { 307 + const current_os = comptime switch (builtin.os.tag) { 308 + .macos => "darwin", 309 + .linux => "linux", 310 + .windows => "win32", 311 + .freebsd => "freebsd", 312 + else => "unknown", 313 + }; 314 + 315 + const current_cpu = comptime switch (builtin.cpu.arch) { 316 + .aarch64 => "arm64", 317 + .x86_64 => "x64", 318 + .x86 => "ia32", 319 + .arm => "arm", 320 + else => "unknown", 321 + }; 322 + 323 + const current_libc: ?[]const u8 = comptime if (builtin.os.tag != .linux) null 324 + else if (builtin.abi == .gnu or builtin.abi == .gnueabi or builtin.abi == .gnueabihf) "glibc" 325 + else if (builtin.abi == .musl or builtin.abi == .musleabi or builtin.abi == .musleabihf) "musl" 326 + else null; 327 + 328 + if (self.os) |os_filter| if (!matchesFilter(os_filter, current_os)) return false; 329 + if (self.cpu) |cpu_filter| if (!matchesFilter(cpu_filter, current_cpu)) return false; 330 + 331 + if (self.libc) |libc_filter| { 332 + if (current_libc) |libc| if (!matchesFilter(libc_filter, libc)) return false; 333 + } 334 + 335 + return true; 336 + } 337 + 338 + fn matchesFilter(filter: []const u8, value: []const u8) bool { 339 + var has_positive = false; 340 + var matches = false; 341 + 342 + var iter = std.mem.splitScalar(u8, filter, ','); 343 + while (iter.next()) |part| { 344 + const trimmed = std.mem.trim(u8, part, " "); 345 + if (trimmed.len == 0) continue; 346 + 347 + if (trimmed[0] == '!') { 348 + if (std.mem.eql(u8, trimmed[1..], value)) return false; 349 + } else { 350 + has_positive = true; 351 + if (std.mem.eql(u8, trimmed, value)) matches = true; 352 + } 353 + } 354 + 355 + return if (has_positive) matches else true; 356 + } 357 + }; 358 + 359 + fn parseDepsMap( 360 + allocator: std.mem.Allocator, 361 + maybe_obj: ?std.json.Value, 362 + ) std.StringHashMap([]const u8) { 363 + var map = std.StringHashMap([]const u8).init(allocator); 364 + 365 + const deps_obj = maybe_obj orelse return map; 366 + if (deps_obj != .object) return map; 367 + 368 + for (deps_obj.object.keys(), deps_obj.object.values()) |dep_name, dep_ver| { 369 + if (dep_ver != .string) continue; 370 + 371 + const key = allocator.dupe(u8, dep_name) catch continue; 372 + const val = allocator.dupe(u8, dep_ver.string) catch { 373 + allocator.free(key); 374 + continue; 375 + }; 376 + 377 + map.put(key, val) catch { 378 + allocator.free(key); 379 + allocator.free(val); 380 + }; 381 + } 382 + 383 + return map; 384 + } 385 + 386 + fn parsePeerMeta( 387 + allocator: std.mem.Allocator, 388 + maybe_obj: ?std.json.Value, 389 + ) std.StringHashMap(bool) { 390 + var map = std.StringHashMap(bool).init(allocator); 391 + 392 + const meta_obj = maybe_obj orelse return map; 393 + if (meta_obj != .object) return map; 394 + 395 + for (meta_obj.object.keys(), meta_obj.object.values()) |dep_name, meta_val| { 396 + if (meta_val != .object) continue; 397 + 398 + const is_optional = if (meta_val.object.get("optional")) |opt| (opt == .bool and opt.bool) 399 + else false; 400 + 401 + if (is_optional) { 402 + const key = allocator.dupe(u8, dep_name) catch continue; 403 + map.put(key, true) catch allocator.free(key); 404 + } 405 + } 406 + return map; 407 + } 408 + 409 + pub const PackageMetadata = struct { 410 + allocator: std.mem.Allocator, 411 + name: []const u8, 412 + versions: std.ArrayListUnmanaged(VersionInfo), 413 + 414 + pub fn init(allocator: std.mem.Allocator, name: []const u8) !PackageMetadata { 415 + return .{ 416 + .allocator = allocator, 417 + .name = try allocator.dupe(u8, name), 418 + .versions = .{}, 419 + }; 420 + } 421 + 422 + pub fn deinit(self: *PackageMetadata) void { 423 + for (self.versions.items) |*v| { 424 + v.deinit(); 425 + } 426 + self.versions.deinit(self.allocator); 427 + self.allocator.free(self.name); 428 + } 429 + 430 + pub fn parseFromJson(allocator: std.mem.Allocator, json_data: []const u8) !PackageMetadata { 431 + const parsed = std.json.parseFromSlice(std.json.Value, allocator, json_data, .{}) catch { 432 + return error.ParseError; 433 + }; defer parsed.deinit(); 434 + 435 + const root = parsed.value; 436 + if (root != .object) return error.ParseError; 437 + 438 + const name = if (root.object.get("name")) |n| switch (n) { 439 + .string => |s| s, 440 + else => return error.ParseError, 441 + } else return error.ParseError; 442 + 443 + var metadata = try PackageMetadata.init(allocator, name); 444 + errdefer metadata.deinit(); 445 + 446 + const versions_obj = root.object.get("versions") orelse return metadata; 447 + if (versions_obj != .object) return metadata; 448 + 449 + for (versions_obj.object.keys(), versions_obj.object.values()) |version_str, version_data| { 450 + if (version_data != .object) continue; 451 + 452 + var version = Version.parse(version_str) catch continue; 453 + if (version.prerelease) |pre| { 454 + version.prerelease = allocator.dupe(u8, pre) catch null; 455 + } 456 + if (version.build) |bld| { 457 + version.build = allocator.dupe(u8, bld) catch null; 458 + } 459 + const dist = version_data.object.get("dist") orelse continue; 460 + if (dist != .object) continue; 461 + 462 + const tarball = if (dist.object.get("tarball")) |t| switch (t) { 463 + .string => |s| s, 464 + else => continue, 465 + } else continue; 466 + 467 + var integrity: [64]u8 = std.mem.zeroes([64]u8); 468 + if (dist.object.get("integrity")) |i| { 469 + if (i == .string) { 470 + const int_str = i.string; 471 + if (std.mem.startsWith(u8, int_str, "sha512-")) { 472 + const b64 = int_str[7..]; 473 + _ = std.base64.standard.Decoder.decode(&integrity, b64) catch {}; 474 + } 475 + } 476 + } else if (dist.object.get("shasum")) |s| { 477 + if (s == .string) { 478 + const hex = s.string; 479 + if (hex.len >= 40) { 480 + for (0..20) |i| integrity[i] = std.fmt.parseInt(u8, hex[i * 2 ..][0..2], 16) catch 0; 481 + } 482 + } 483 + } 484 + 485 + const deps = parseDepsMap(allocator, version_data.object.get("dependencies")); 486 + const opt_deps = parseDepsMap(allocator, version_data.object.get("optionalDependencies")); 487 + const peer_deps = parseDepsMap(allocator, version_data.object.get("peerDependencies")); 488 + const peer_meta = parsePeerMeta(allocator, version_data.object.get("peerDependenciesMeta")); 489 + 490 + var os_filter: ?[]const u8 = null; 491 + var cpu_filter: ?[]const u8 = null; 492 + 493 + if (version_data.object.get("os")) |os_arr| { 494 + if (os_arr == .array) { 495 + var os_buf = std.ArrayListUnmanaged(u8){}; 496 + for (os_arr.array.items, 0..) |item, i| { 497 + if (item == .string) { 498 + if (i > 0) os_buf.append(allocator, ',') catch {}; 499 + os_buf.appendSlice(allocator, item.string) catch {}; 500 + } 501 + } 502 + if (os_buf.items.len > 0) { 503 + os_filter = os_buf.toOwnedSlice(allocator) catch null; 504 + } else os_buf.deinit(allocator); 505 + } 506 + } 507 + 508 + if (version_data.object.get("cpu")) |cpu_arr| { 509 + if (cpu_arr == .array) { 510 + var cpu_buf = std.ArrayListUnmanaged(u8){}; 511 + for (cpu_arr.array.items, 0..) |item, i| { 512 + if (item == .string) { 513 + if (i > 0) cpu_buf.append(allocator, ',') catch {}; 514 + cpu_buf.appendSlice(allocator, item.string) catch {}; 515 + } 516 + } 517 + if (cpu_buf.items.len > 0) { 518 + cpu_filter = cpu_buf.toOwnedSlice(allocator) catch null; 519 + } else cpu_buf.deinit(allocator); 520 + } 521 + } 522 + 523 + var libc_filter: ?[]const u8 = null; 524 + if (version_data.object.get("libc")) |libc_arr| { 525 + if (libc_arr == .array) { 526 + var libc_buf = std.ArrayListUnmanaged(u8){}; 527 + for (libc_arr.array.items, 0..) |item, i| { 528 + if (item == .string) { 529 + if (i > 0) libc_buf.append(allocator, ',') catch {}; 530 + libc_buf.appendSlice(allocator, item.string) catch {}; 531 + } 532 + } 533 + if (libc_buf.items.len > 0) { 534 + libc_filter = libc_buf.toOwnedSlice(allocator) catch null; 535 + } else libc_buf.deinit(allocator); 536 + } 537 + } 538 + 539 + var bin = std.StringHashMap([]const u8).init(allocator); 540 + if (version_data.object.get("bin")) |bin_val| { 541 + if (bin_val == .object) { 542 + for (bin_val.object.keys(), bin_val.object.values()) |key, val| { 543 + if (val == .string) bin.put(allocator.dupe(u8, key) 544 + catch continue, allocator.dupe(u8, val.string) catch continue) catch {}; 545 + } 546 + } else if (bin_val == .string) { 547 + const bin_name = allocator.dupe(u8, name) catch continue; 548 + const bin_path = allocator.dupe(u8, bin_val.string) catch { 549 + allocator.free(bin_name); continue; 550 + }; 551 + bin.put(bin_name, bin_path) catch { 552 + allocator.free(bin_name); 553 + allocator.free(bin_path); 554 + }; 555 + } 556 + } 557 + 558 + try metadata.versions.append(allocator, .{ 559 + .version = version, 560 + .version_str = try allocator.dupe(u8, version_str), 561 + .integrity = integrity, 562 + .tarball_url = try allocator.dupe(u8, tarball), 563 + .dependencies = deps, 564 + .optional_dependencies = opt_deps, 565 + .peer_dependencies = peer_deps, 566 + .peer_dependencies_meta = peer_meta, 567 + .os = os_filter, .cpu = cpu_filter, .libc = libc_filter, 568 + .bin = bin, .allocator = allocator, 569 + }); 570 + } 571 + 572 + return metadata; 573 + } 574 + }; 575 + 576 + pub const ResolvedPackage = struct { 577 + name: intern.InternedString, 578 + version: Version, 579 + integrity: [64]u8, 580 + tarball_url: []const u8, 581 + dependencies: std.ArrayListUnmanaged(Dep), 582 + depth: u32, 583 + direct: bool, 584 + parent_path: ?[]const u8, 585 + has_bin: bool, 586 + allocator: std.mem.Allocator, 587 + 588 + pub const DepFlags = struct { 589 + peer: bool = false, 590 + dev: bool = false, 591 + optional: bool = false, 592 + }; 593 + 594 + pub const Dep = struct { 595 + name: intern.InternedString, 596 + constraint: []const u8, 597 + flags: DepFlags = .{}, 598 + }; 599 + 600 + pub fn deinit(self: *ResolvedPackage) void { 601 + self.allocator.free(self.tarball_url); 602 + if (self.parent_path) |p| self.allocator.free(p); 603 + for (self.dependencies.items) |dep| { 604 + self.allocator.free(dep.constraint); 605 + } 606 + self.dependencies.deinit(self.allocator); 607 + } 608 + 609 + pub fn installPath(self: *const ResolvedPackage, allocator: std.mem.Allocator) ![]const u8 { 610 + if (self.parent_path) |parent| { 611 + return std.fmt.allocPrint(allocator, "{s}/node_modules/{s}", .{ parent, self.name.slice() }); 612 + } return allocator.dupe(u8, self.name.slice()); 613 + } 614 + }; 615 + 616 + 617 + pub const OnPackageResolvedFn = *const fn ( 618 + pkg: *const ResolvedPackage, 619 + user_data: ?*anyopaque 620 + ) void; 621 + 622 + pub const Resolver = struct { 623 + allocator: std.mem.Allocator, 624 + cache_allocator: std.mem.Allocator, 625 + string_pool: *intern.StringPool, 626 + http: *fetcher.Fetcher, 627 + cache_db: ?*cache.CacheDB, 628 + resolved: std.StringHashMap(*ResolvedPackage), 629 + constraints: std.StringHashMap(std.ArrayListUnmanaged(Constraint)), 630 + in_progress: std.StringHashMap(void), 631 + registry_url: []const u8, 632 + metadata_cache: *std.StringHashMap(PackageMetadata), 633 + on_package_resolved: ?OnPackageResolvedFn, 634 + on_package_resolved_data: ?*anyopaque, 635 + 636 + pub fn init( 637 + allocator: std.mem.Allocator, 638 + cache_allocator: std.mem.Allocator, 639 + string_pool: *intern.StringPool, 640 + http: *fetcher.Fetcher, 641 + cache_db: ?*cache.CacheDB, 642 + registry_url: []const u8, 643 + metadata_cache: *std.StringHashMap(PackageMetadata), 644 + ) Resolver { 645 + return .{ 646 + .allocator = allocator, 647 + .cache_allocator = cache_allocator, 648 + .string_pool = string_pool, 649 + .http = http, 650 + .cache_db = cache_db, 651 + .resolved = std.StringHashMap(*ResolvedPackage).init(allocator), 652 + .constraints = std.StringHashMap(std.ArrayListUnmanaged(Constraint)).init(allocator), 653 + .in_progress = std.StringHashMap(void).init(allocator), 654 + .registry_url = registry_url, 655 + .metadata_cache = metadata_cache, 656 + .on_package_resolved = null, 657 + .on_package_resolved_data = null, 658 + }; 659 + } 660 + 661 + pub fn setOnPackageResolved(self: *Resolver, callback: OnPackageResolvedFn, user_data: ?*anyopaque) void { 662 + self.on_package_resolved = callback; 663 + self.on_package_resolved_data = user_data; 664 + } 665 + 666 + pub fn deinit(self: *Resolver) void { 667 + var key_iter = self.resolved.keyIterator(); 668 + while (key_iter.next()) |key| { 669 + self.allocator.free(key.*); 670 + } 671 + 672 + var iter = self.resolved.valueIterator(); 673 + while (iter.next()) |pkg| { 674 + pkg.*.deinit(); 675 + self.allocator.destroy(pkg.*); 676 + } self.resolved.deinit(); 677 + 678 + var cons_key_iter = self.constraints.keyIterator(); 679 + while (cons_key_iter.next()) |key| { 680 + self.allocator.free(key.*); 681 + } 682 + 683 + var cons_iter = self.constraints.valueIterator(); 684 + while (cons_iter.next()) |list| { 685 + list.deinit(self.allocator); 686 + } 687 + self.constraints.deinit(); 688 + self.in_progress.deinit(); 689 + } 690 + 691 + pub fn resolveFromPackageJson(self: *Resolver, path: []const u8) !void { 692 + const path_z = try self.allocator.dupeZ(u8, path); 693 + defer self.allocator.free(path_z); 694 + 695 + var pkg_json = try json.PackageJson.parse(self.allocator, path_z); 696 + defer pkg_json.deinit(self.allocator); 697 + 698 + debug.log("pass 1: collecting constraints", .{}); 699 + var pass1_start: u64 = @intCast(std.time.nanoTimestamp()); 700 + self.http.initiateTarballConnectionsAsync(); 701 + 702 + const ConstraintInfo = struct { 703 + constraint: Constraint, 704 + constraint_str: []const u8, 705 + requester: []const u8, 706 + depth: u32, 707 + }; 708 + 709 + var all_constraints = std.StringHashMap(std.ArrayListUnmanaged(ConstraintInfo)).init(self.allocator); 710 + defer { 711 + var iter = all_constraints.iterator(); 712 + while (iter.next()) |entry| { 713 + for (entry.value_ptr.items) |info| { 714 + self.allocator.free(info.constraint_str); 715 + self.allocator.free(info.requester); 716 + } 717 + entry.value_ptr.deinit(self.allocator); 718 + self.allocator.free(entry.key_ptr.*); 719 + } all_constraints.deinit(); 720 + } 721 + 722 + const CollectItem = struct { 723 + name: []const u8, 724 + constraint_str: []const u8, 725 + requester: []const u8, 726 + depth: u32, 727 + }; 728 + 729 + var collect_queue = std.ArrayListUnmanaged(CollectItem){}; 730 + defer collect_queue.deinit(self.allocator); 731 + 732 + var seen_collect = std.StringHashMap(void).init(self.allocator); 733 + defer { 734 + var key_iter = seen_collect.keyIterator(); 735 + while (key_iter.next()) |k| self.allocator.free(k.*); 736 + seen_collect.deinit(); 737 + } 738 + 739 + var dep_iter = pkg_json.dependencies.iterator(); 740 + while (dep_iter.next()) |entry| { 741 + try collect_queue.append(self.allocator, .{ 742 + .name = entry.key_ptr.*, 743 + .constraint_str = entry.value_ptr.*, 744 + .requester = "root", 745 + .depth = 0, 746 + }); 747 + } 748 + 749 + var dev_iter = pkg_json.dev_dependencies.iterator(); 750 + while (dev_iter.next()) |entry| { 751 + try collect_queue.append(self.allocator, .{ 752 + .name = entry.key_ptr.*, 753 + .constraint_str = entry.value_ptr.*, 754 + .requester = "root", 755 + .depth = 0, 756 + }); 757 + } 758 + 759 + var collect_level: u32 = 0; 760 + while (collect_queue.items.len > 0) { 761 + debug.log(" pass1 level {d}: {d} packages", .{ collect_level, collect_queue.items.len }); 762 + 763 + var to_fetch = std.ArrayListUnmanaged([]const u8){}; 764 + defer to_fetch.deinit(self.allocator); 765 + 766 + for (collect_queue.items) |item| { 767 + if (!self.metadata_cache.contains(item.name)) { 768 + var loaded_from_disk = false; 769 + if (self.cache_db) |db| { 770 + if (db.lookupMetadata(item.name, self.allocator)) |json_data| { 771 + const metadata = PackageMetadata.parseFromJson(self.cache_allocator, json_data) catch { 772 + self.allocator.free(json_data); 773 + continue; 774 + }; 775 + self.allocator.free(json_data); 776 + const cache_key = self.cache_allocator.dupe(u8, item.name) catch continue; 777 + self.metadata_cache.put(cache_key, metadata) catch { 778 + self.cache_allocator.free(cache_key); 779 + continue; 780 + }; 781 + loaded_from_disk = true; 782 + } 783 + } 784 + if (!loaded_from_disk) { 785 + var already_listed = false; 786 + for (to_fetch.items) |f| { 787 + if (std.mem.eql(u8, f, item.name)) { 788 + already_listed = true; 789 + break; 790 + } 791 + } 792 + if (!already_listed) try to_fetch.append(self.allocator, item.name); 793 + } 794 + } 795 + } 796 + 797 + const StreamContext = struct { 798 + resolver: *Resolver, 799 + prefetch_queue: *std.ArrayListUnmanaged([]const u8), 800 + collect_queue_items: []const CollectItem, 801 + allocator: std.mem.Allocator, 802 + 803 + fn onMetadata(name: []const u8, data: ?[]const u8, has_error: bool, userdata: ?*anyopaque) void { 804 + const ctx: *@This() = @ptrCast(@alignCast(userdata)); 805 + if (has_error or data == null) return; 806 + 807 + if (ctx.resolver.cache_db) |db| { 808 + db.insertMetadata(name, data.?) catch {}; 809 + } 810 + 811 + const metadata = PackageMetadata.parseFromJson(ctx.resolver.cache_allocator, data.?) catch return; 812 + const cache_key = ctx.resolver.cache_allocator.dupe(u8, name) catch return; 813 + ctx.resolver.metadata_cache.put(cache_key, metadata) catch { 814 + ctx.resolver.cache_allocator.free(cache_key); 815 + return; 816 + }; 817 + 818 + for (ctx.collect_queue_items) |item| { 819 + if (!std.mem.eql(u8, item.name, name)) continue; 820 + 821 + const constraint = Constraint.parse(item.constraint_str) catch continue; 822 + const best = ctx.resolver.selectBestVersion(&metadata, constraint) orelse continue; 823 + if (!best.matchesPlatform()) continue; 824 + 825 + var dep_it = best.dependencies.iterator(); 826 + while (dep_it.next()) |entry| { 827 + const dep_name = entry.key_ptr.*; 828 + if (ctx.resolver.metadata_cache.contains(dep_name)) continue; 829 + 830 + var already_queued = false; 831 + for (ctx.prefetch_queue.items) |q| { 832 + if (std.mem.eql(u8, q, dep_name)) { 833 + already_queued = true; break; 834 + } 835 + } 836 + if (!already_queued) ctx.prefetch_queue.append(ctx.allocator, dep_name) catch {}; 837 + } break; 838 + } 839 + } 840 + }; 841 + 842 + var next_collect = std.ArrayListUnmanaged(CollectItem){}; 843 + errdefer next_collect.deinit(self.allocator); 844 + 845 + var prefetch_queue = std.ArrayListUnmanaged([]const u8){}; 846 + defer prefetch_queue.deinit(self.allocator); 847 + 848 + if (to_fetch.items.len > 0) { 849 + var stream_ctx = StreamContext{ 850 + .resolver = self, 851 + .prefetch_queue = &prefetch_queue, 852 + .collect_queue_items = collect_queue.items, 853 + .allocator = self.allocator, 854 + }; 855 + 856 + try self.http.fetchMetadataStreaming( 857 + to_fetch.items, 858 + self.allocator, 859 + StreamContext.onMetadata, 860 + &stream_ctx, 861 + ); 862 + 863 + if (prefetch_queue.items.len > 0) { 864 + debug.log(" prefetch: queued {d} next-level packages", .{prefetch_queue.items.len}); 865 + self.http.fetchMetadataStreaming( 866 + prefetch_queue.items, 867 + self.allocator, 868 + StreamContext.onMetadata, 869 + &stream_ctx, 870 + ) catch {}; 871 + } 872 + } 873 + 874 + for (collect_queue.items) |item| { 875 + const seen_key = std.fmt.allocPrint(self.allocator, "{s}@{s}@{s}", .{ item.name, item.constraint_str, item.requester }) catch continue; 876 + if (seen_collect.contains(seen_key)) { 877 + self.allocator.free(seen_key); 878 + continue; 879 + } 880 + try seen_collect.put(seen_key, {}); 881 + 882 + const constraint = Constraint.parse(item.constraint_str) catch continue; 883 + const gop = try all_constraints.getOrPut(item.name); 884 + if (!gop.found_existing) { 885 + gop.key_ptr.* = try self.allocator.dupe(u8, item.name); 886 + gop.value_ptr.* = .{}; 887 + } 888 + try gop.value_ptr.append(self.allocator, .{ 889 + .constraint = constraint, 890 + .constraint_str = try self.allocator.dupe(u8, item.constraint_str), 891 + .requester = try self.allocator.dupe(u8, item.requester), 892 + .depth = item.depth, 893 + }); 894 + 895 + if (self.metadata_cache.get(item.name)) |metadata| { 896 + const best = self.selectBestVersion(&metadata, constraint) orelse continue; 897 + if (!best.matchesPlatform()) continue; 898 + 899 + var dep_it = best.dependencies.iterator(); 900 + while (dep_it.next()) |entry| { 901 + try next_collect.append(self.allocator, .{ 902 + .name = entry.key_ptr.*, 903 + .constraint_str = entry.value_ptr.*, 904 + .requester = item.name, 905 + .depth = item.depth + 1, 906 + }); 907 + } 908 + 909 + var opt_it = best.optional_dependencies.iterator(); 910 + while (opt_it.next()) |entry| { 911 + if (self.metadata_cache.get(entry.key_ptr.*)) |opt_meta| { 912 + const opt_con = Constraint.parse(entry.value_ptr.*) catch continue; 913 + const opt_best = self.selectBestVersion(&opt_meta, opt_con) orelse continue; 914 + if (!opt_best.matchesPlatform()) continue; 915 + } 916 + try next_collect.append(self.allocator, .{ 917 + .name = entry.key_ptr.*, 918 + .constraint_str = entry.value_ptr.*, 919 + .requester = item.name, 920 + .depth = item.depth + 1, 921 + }); 922 + } 923 + 924 + var peer_it = best.peer_dependencies.iterator(); 925 + while (peer_it.next()) |entry| { 926 + if (best.peer_dependencies_meta.contains(entry.key_ptr.*)) continue; 927 + try next_collect.append(self.allocator, .{ 928 + .name = entry.key_ptr.*, 929 + .constraint_str = entry.value_ptr.*, 930 + .requester = item.name, 931 + .depth = item.depth + 1, 932 + }); 933 + } 934 + } 935 + } 936 + 937 + collect_queue.deinit(self.allocator); 938 + collect_queue = next_collect; 939 + collect_level += 1; 940 + } 941 + 942 + pass1_start = debug.timer("pass 1 complete", pass1_start); 943 + debug.log(" collected constraints for {d} packages", .{all_constraints.count()}); 944 + debug.log("computing optimal versions", .{}); 945 + 946 + var optimal_versions = std.StringHashMap(*const VersionInfo).init(self.allocator); 947 + defer optimal_versions.deinit(); 948 + 949 + var pkg_iter = all_constraints.iterator(); 950 + while (pkg_iter.next()) |entry| { 951 + const pkg_name = entry.key_ptr.*; 952 + const constraint_list = entry.value_ptr.items; 953 + 954 + if (self.metadata_cache.get(pkg_name)) |metadata| { 955 + var plain_constraints = try self.allocator.alloc(Constraint, constraint_list.len); 956 + defer self.allocator.free(plain_constraints); 957 + for (constraint_list, 0..) |info, i| { 958 + plain_constraints[i] = info.constraint; 959 + } 960 + 961 + if (self.selectBestVersionForConstraints(&metadata, plain_constraints)) |best| { 962 + try optimal_versions.put(pkg_name, best); 963 + } else { 964 + const best = self.selectVersionSatisfyingMost(&metadata, constraint_list); 965 + if (best) |b| { 966 + if (b.version.prerelease) |pre| { 967 + debug.log(" {s}: optimal={d}.{d}.{d}-{s} (satisfies {d}/{d} constraints)", .{ 968 + pkg_name, b.version.major, b.version.minor, b.version.patch, pre, 969 + self.countSatisfied(&metadata, b, constraint_list), constraint_list.len, 970 + }); 971 + } else { 972 + debug.log(" {s}: optimal={d}.{d}.{d} (satisfies {d}/{d} constraints)", .{ 973 + pkg_name, b.version.major, b.version.minor, b.version.patch, 974 + self.countSatisfied(&metadata, b, constraint_list), constraint_list.len, 975 + }); 976 + } 977 + try optimal_versions.put(pkg_name, b); 978 + } 979 + } 980 + } 981 + } 982 + 983 + pass1_start = debug.timer("optimal versions computed", pass1_start); 984 + debug.log("pass 2: resolving with optimal versions", .{}); 985 + 986 + const WorkItem = struct { 987 + name: []const u8, 988 + constraint: []const u8, 989 + depth: u32, 990 + direct: bool, 991 + parent_name: ?[]const u8, 992 + }; 993 + 994 + var queue = std.ArrayListUnmanaged(WorkItem){}; 995 + defer { 996 + for (queue.items) |item| if (item.parent_name) |p| self.allocator.free(p); 997 + queue.deinit(self.allocator); 998 + } 999 + 1000 + dep_iter = pkg_json.dependencies.iterator(); 1001 + while (dep_iter.next()) |entry| { 1002 + try queue.append(self.allocator, .{ 1003 + .name = entry.key_ptr.*, 1004 + .constraint = entry.value_ptr.*, 1005 + .depth = 0, 1006 + .direct = true, 1007 + .parent_name = null, 1008 + }); 1009 + } 1010 + dev_iter = pkg_json.dev_dependencies.iterator(); 1011 + while (dev_iter.next()) |entry| { 1012 + try queue.append(self.allocator, .{ 1013 + .name = entry.key_ptr.*, 1014 + .constraint = entry.value_ptr.*, 1015 + .depth = 0, 1016 + .direct = true, 1017 + .parent_name = null, 1018 + }); 1019 + } 1020 + 1021 + var processed = std.StringHashMap(void).init(self.allocator); 1022 + defer { 1023 + var key_iter = processed.keyIterator(); 1024 + while (key_iter.next()) |k| self.allocator.free(k.*); 1025 + processed.deinit(); 1026 + } 1027 + 1028 + var level: u32 = 0; 1029 + while (queue.items.len > 0) { 1030 + const level_start: u64 = @intCast(std.time.nanoTimestamp()); 1031 + debug.log(" pass2 level {d}: {d} packages", .{ level, queue.items.len }); 1032 + 1033 + var next_queue = std.ArrayListUnmanaged(WorkItem){}; 1034 + errdefer next_queue.deinit(self.allocator); 1035 + 1036 + for (queue.items) |item| { 1037 + const key = std.fmt.allocPrint(self.allocator, "{s}@{s}", .{ item.name, item.constraint }) catch continue; 1038 + if (processed.contains(key)) { 1039 + self.allocator.free(key); 1040 + continue; 1041 + } 1042 + try processed.put(key, {}); 1043 + 1044 + const pkg = self.resolveSingleWithOptimal(item.name, item.constraint, item.depth, item.direct, item.parent_name, &optimal_versions) catch |err| { 1045 + debug.log(" failed to resolve {s}: {}", .{ item.name, err }); 1046 + continue; 1047 + }; 1048 + 1049 + const pkg_install_path = pkg.installPath(self.allocator) catch continue; 1050 + defer self.allocator.free(pkg_install_path); 1051 + 1052 + for (pkg.dependencies.items) |dep| { 1053 + const dep_key = std.fmt.allocPrint(self.allocator, "{s}@{s}", .{ dep.name.slice(), dep.constraint }) catch continue; 1054 + defer self.allocator.free(dep_key); 1055 + if (!processed.contains(dep_key)) { 1056 + try next_queue.append(self.allocator, .{ 1057 + .name = dep.name.slice(), 1058 + .constraint = dep.constraint, 1059 + .depth = item.depth + 1, 1060 + .direct = false, 1061 + .parent_name = try self.allocator.dupe(u8, pkg_install_path), 1062 + }); 1063 + } 1064 + } 1065 + } 1066 + _ = debug.timer(" resolve + queue next", level_start); 1067 + 1068 + const completed = self.http.tick(); 1069 + if (completed > 0) { 1070 + debug.log(" tarballs: {d} completed, {d} in flight", .{ completed, self.http.pendingTarballCount() }); 1071 + } 1072 + 1073 + for (queue.items) |item| if (item.parent_name) |p| self.allocator.free(p); 1074 + queue.deinit(self.allocator); 1075 + queue = next_queue; 1076 + level += 1; 1077 + } 1078 + } 1079 + 1080 + fn countSatisfied(_: *Resolver, _: *const PackageMetadata, version_info: *const VersionInfo, constraint_list: anytype) usize { 1081 + var count: usize = 0; 1082 + for (constraint_list) |info| { 1083 + if (info.constraint.satisfies(version_info.version)) count += 1; 1084 + } 1085 + return count; 1086 + } 1087 + 1088 + fn selectVersionSatisfyingMost(_: *Resolver, metadata: *const PackageMetadata, constraint_list: anytype) ?*const VersionInfo { 1089 + var best: ?*const VersionInfo = null; 1090 + var best_score: i64 = -1; 1091 + 1092 + var want_prerelease = false; 1093 + for (constraint_list) |info| { 1094 + if (info.constraint.version.prerelease != null) { want_prerelease = true; break; } 1095 + } 1096 + 1097 + for (metadata.versions.items) |*v| { 1098 + if (v.version.prerelease != null and !want_prerelease) continue; 1099 + if (!v.matchesPlatform()) continue; 1100 + 1101 + var score: i64 = 0; 1102 + for (constraint_list) |info| { 1103 + if (info.constraint.satisfies(v.version)) { 1104 + const weight: i64 = @intCast(1000 / (info.depth + 1)); 1105 + score += weight; 1106 + } 1107 + } 1108 + 1109 + if (score > best_score or (score == best_score and best != null and v.version.order(best.?.version) == .gt)) { 1110 + best = v; 1111 + best_score = score; 1112 + } 1113 + } 1114 + 1115 + return best; 1116 + } 1117 + 1118 + fn resolveSingleWithOptimal( 1119 + self: *Resolver, 1120 + name: []const u8, 1121 + constraint_str: []const u8, 1122 + depth: u32, 1123 + direct: bool, 1124 + parent_name: ?[]const u8, 1125 + optimal_versions: *std.StringHashMap(*const VersionInfo), 1126 + ) !*ResolvedPackage { 1127 + const constraint = try Constraint.parse(constraint_str); 1128 + 1129 + if (self.resolved.get(name)) |existing_pkg| { 1130 + if (constraint.satisfies(existing_pkg.version)) { 1131 + if (direct) existing_pkg.direct = true; 1132 + if (depth < existing_pkg.depth) existing_pkg.depth = depth; 1133 + return existing_pkg; 1134 + } 1135 + 1136 + if (parent_name) |parent| { 1137 + var metadata = try self.fetchMetadata(name); 1138 + const nested_best = self.selectBestVersion(&metadata, constraint) orelse return existing_pkg; 1139 + if (!nested_best.matchesPlatform()) return existing_pkg; 1140 + 1141 + debug.log(" nested: {s}@{d}.{d}.{d} under {s} (hoisted: {d}.{d}.{d})", .{ 1142 + name, 1143 + nested_best.version.major, 1144 + nested_best.version.minor, 1145 + nested_best.version.patch, 1146 + parent, 1147 + existing_pkg.version.major, 1148 + existing_pkg.version.minor, 1149 + existing_pkg.version.patch, 1150 + }); 1151 + 1152 + return try self.createNestedPackage(name, nested_best, depth, parent); 1153 + } 1154 + 1155 + return existing_pkg; 1156 + } 1157 + 1158 + var metadata = try self.fetchMetadata(name); 1159 + const version_info = blk: { 1160 + if (optimal_versions.get(name)) |optimal| { 1161 + if (constraint.satisfies(optimal.version) and optimal.matchesPlatform()) break :blk optimal; 1162 + } 1163 + break :blk self.selectBestVersion(&metadata, constraint) orelse return error.NoMatchingVersion; 1164 + }; 1165 + 1166 + if (!version_info.matchesPlatform()) return error.PlatformMismatch; 1167 + 1168 + const cons_gop = try self.constraints.getOrPut(name); 1169 + if (!cons_gop.found_existing) { 1170 + cons_gop.key_ptr.* = try self.allocator.dupe(u8, name); 1171 + cons_gop.value_ptr.* = .{}; 1172 + } try cons_gop.value_ptr.append(self.allocator, constraint); 1173 + 1174 + const pkg = try self.allocator.create(ResolvedPackage); 1175 + errdefer self.allocator.destroy(pkg); 1176 + 1177 + pkg.* = .{ 1178 + .name = try self.string_pool.intern(name), 1179 + .version = version_info.version, 1180 + .integrity = version_info.integrity, 1181 + .tarball_url = try self.allocator.dupe(u8, version_info.tarball_url), 1182 + .dependencies = .{}, 1183 + .depth = depth, 1184 + .direct = direct, 1185 + .parent_path = null, 1186 + .has_bin = version_info.bin.count() > 0, 1187 + .allocator = self.allocator, 1188 + }; 1189 + 1190 + const name_key = try self.allocator.dupe(u8, name); 1191 + errdefer self.allocator.free(name_key); 1192 + try self.resolved.put(name_key, pkg); 1193 + 1194 + var dep_it = version_info.dependencies.iterator(); 1195 + while (dep_it.next()) |entry| { 1196 + try pkg.dependencies.append(self.allocator, .{ 1197 + .name = try self.string_pool.intern(entry.key_ptr.*), 1198 + .constraint = try self.allocator.dupe(u8, entry.value_ptr.*), 1199 + .flags = .{}, 1200 + }); 1201 + } 1202 + 1203 + var opt_it = version_info.optional_dependencies.iterator(); 1204 + while (opt_it.next()) |entry| { 1205 + if (self.metadata_cache.get(entry.key_ptr.*)) |opt_meta| { 1206 + const opt_con = Constraint.parse(entry.value_ptr.*) catch continue; 1207 + const opt_best = self.selectBestVersion(&opt_meta, opt_con) orelse continue; 1208 + if (!opt_best.matchesPlatform()) continue; 1209 + } 1210 + try pkg.dependencies.append(self.allocator, .{ 1211 + .name = try self.string_pool.intern(entry.key_ptr.*), 1212 + .constraint = try self.allocator.dupe(u8, entry.value_ptr.*), 1213 + .flags = .{ .optional = true }, 1214 + }); 1215 + } 1216 + 1217 + var peer_it = version_info.peer_dependencies.iterator(); 1218 + while (peer_it.next()) |entry| { 1219 + if (version_info.peer_dependencies_meta.contains(entry.key_ptr.*)) continue; 1220 + try pkg.dependencies.append(self.allocator, .{ 1221 + .name = try self.string_pool.intern(entry.key_ptr.*), 1222 + .constraint = try self.allocator.dupe(u8, entry.value_ptr.*), 1223 + .flags = .{ .peer = true }, 1224 + }); 1225 + } 1226 + 1227 + if (self.on_package_resolved) |callback| { 1228 + callback(pkg, self.on_package_resolved_data); 1229 + } 1230 + 1231 + return pkg; 1232 + } 1233 + 1234 + fn resolveSingle(self: *Resolver, name: []const u8, constraint_str: []const u8, depth: u32, direct: bool, parent_name: ?[]const u8) !*ResolvedPackage { 1235 + const constraint = try Constraint.parse(constraint_str); 1236 + 1237 + if (self.resolved.get(name)) |existing_pkg| { 1238 + if (constraint.satisfies(existing_pkg.version)) { 1239 + if (direct) existing_pkg.direct = true; 1240 + if (depth < existing_pkg.depth) existing_pkg.depth = depth; 1241 + return existing_pkg; 1242 + } 1243 + 1244 + const cons_gop = try self.constraints.getOrPut(name); 1245 + if (!cons_gop.found_existing) { 1246 + cons_gop.key_ptr.* = try self.allocator.dupe(u8, name); 1247 + cons_gop.value_ptr.* = .{}; 1248 + } 1249 + try cons_gop.value_ptr.append(self.allocator, constraint); 1250 + 1251 + var metadata = try self.fetchMetadata(name); 1252 + const all_constraints = cons_gop.value_ptr.items; 1253 + const best = self.selectBestVersionForConstraints(&metadata, all_constraints); 1254 + 1255 + if (best) |b| { 1256 + if (!b.matchesPlatform()) { 1257 + return error.PlatformMismatch; 1258 + } 1259 + 1260 + if (b.version.order(existing_pkg.version) != .eq) { 1261 + debug.log(" re-resolve {s}: {d}.{d}.{d} -> {d}.{d}.{d}", .{ 1262 + name, 1263 + existing_pkg.version.major, 1264 + existing_pkg.version.minor, 1265 + existing_pkg.version.patch, 1266 + b.version.major, 1267 + b.version.minor, 1268 + b.version.patch, 1269 + }); 1270 + 1271 + existing_pkg.version = b.version; 1272 + existing_pkg.integrity = b.integrity; 1273 + self.allocator.free(existing_pkg.tarball_url); 1274 + existing_pkg.tarball_url = try self.allocator.dupe(u8, b.tarball_url); 1275 + existing_pkg.has_bin = b.bin.count() > 0; 1276 + 1277 + if (self.on_package_resolved) |callback| { 1278 + callback(existing_pkg, self.on_package_resolved_data); 1279 + } 1280 + } 1281 + 1282 + if (direct) existing_pkg.direct = true; 1283 + if (depth < existing_pkg.depth) existing_pkg.depth = depth; 1284 + return existing_pkg; 1285 + } else { 1286 + if (parent_name) |parent| { 1287 + const nested_best = self.selectBestVersion(&metadata, constraint) orelse { 1288 + debug.log(" nested: no version for {s} {s}", .{ name, constraint_str }); 1289 + return existing_pkg; 1290 + }; 1291 + 1292 + if (!nested_best.matchesPlatform()) return existing_pkg; 1293 + debug.log(" nested: {s}@{d}.{d}.{d} under {s} (hoisted: {d}.{d}.{d})", .{ 1294 + name, 1295 + nested_best.version.major, 1296 + nested_best.version.minor, 1297 + nested_best.version.patch, 1298 + parent, 1299 + existing_pkg.version.major, 1300 + existing_pkg.version.minor, 1301 + existing_pkg.version.patch, 1302 + }); 1303 + 1304 + return try self.createNestedPackage(name, nested_best, depth, parent); 1305 + } else { 1306 + debug.log(" version conflict for {s}: no version satisfies all constraints", .{name}); 1307 + return existing_pkg; 1308 + } 1309 + } 1310 + } 1311 + 1312 + const cons_gop = try self.constraints.getOrPut(name); 1313 + if (!cons_gop.found_existing) { 1314 + cons_gop.key_ptr.* = try self.allocator.dupe(u8, name); 1315 + cons_gop.value_ptr.* = .{}; 1316 + } 1317 + try cons_gop.value_ptr.append(self.allocator, constraint); 1318 + 1319 + var metadata = try self.fetchMetadata(name); 1320 + const best = self.selectBestVersion(&metadata, constraint) orelse { 1321 + return error.NoMatchingVersion; 1322 + }; 1323 + 1324 + if (!best.matchesPlatform()) { 1325 + return error.PlatformMismatch; 1326 + } 1327 + 1328 + const pkg = try self.allocator.create(ResolvedPackage); 1329 + errdefer self.allocator.destroy(pkg); 1330 + 1331 + pkg.* = .{ 1332 + .name = try self.string_pool.intern(name), 1333 + .version = best.version, 1334 + .integrity = best.integrity, 1335 + .tarball_url = try self.allocator.dupe(u8, best.tarball_url), 1336 + .dependencies = .{}, 1337 + .depth = depth, 1338 + .direct = direct, 1339 + .parent_path = null, 1340 + .has_bin = best.bin.count() > 0, 1341 + .allocator = self.allocator, 1342 + }; 1343 + 1344 + const name_key = try self.allocator.dupe(u8, name); 1345 + errdefer self.allocator.free(name_key); 1346 + try self.resolved.put(name_key, pkg); 1347 + 1348 + var dep_iter = best.dependencies.iterator(); 1349 + while (dep_iter.next()) |entry| { 1350 + const dep_name = entry.key_ptr.*; 1351 + const dep_constraint = entry.value_ptr.*; 1352 + 1353 + try pkg.dependencies.append(self.allocator, .{ 1354 + .name = try self.string_pool.intern(dep_name), 1355 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1356 + .flags = .{}, 1357 + }); 1358 + } 1359 + 1360 + var opt_iter = best.optional_dependencies.iterator(); 1361 + while (opt_iter.next()) |entry| { 1362 + const dep_name = entry.key_ptr.*; 1363 + const dep_constraint = entry.value_ptr.*; 1364 + 1365 + if (self.metadata_cache.get(dep_name)) |opt_metadata| { 1366 + const opt_constraint = Constraint.parse(dep_constraint) catch continue; 1367 + const opt_best = self.selectBestVersion(&opt_metadata, opt_constraint) orelse continue; 1368 + 1369 + if (!opt_best.matchesPlatform()) continue; 1370 + } 1371 + 1372 + try pkg.dependencies.append(self.allocator, .{ 1373 + .name = try self.string_pool.intern(dep_name), 1374 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1375 + .flags = .{ .optional = true }, 1376 + }); 1377 + } 1378 + 1379 + var peer_iter = best.peer_dependencies.iterator(); 1380 + while (peer_iter.next()) |entry| { 1381 + const dep_name = entry.key_ptr.*; 1382 + const dep_constraint = entry.value_ptr.*; 1383 + 1384 + if (best.peer_dependencies_meta.contains(dep_name)) continue; 1385 + try pkg.dependencies.append(self.allocator, .{ 1386 + .name = try self.string_pool.intern(dep_name), 1387 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1388 + .flags = .{ .peer = true }, 1389 + }); 1390 + } 1391 + 1392 + if (self.on_package_resolved) |callback| { 1393 + callback(pkg, self.on_package_resolved_data); 1394 + } 1395 + 1396 + return pkg; 1397 + } 1398 + 1399 + fn createNestedPackage(self: *Resolver, name: []const u8, version_info: *const VersionInfo, depth: u32, parent_path: []const u8) !*ResolvedPackage { 1400 + const nested_key = try std.fmt.allocPrint(self.allocator, "{s}/node_modules/{s}", .{ parent_path, name }); 1401 + errdefer self.allocator.free(nested_key); 1402 + 1403 + if (self.resolved.get(nested_key)) |existing| { 1404 + self.allocator.free(nested_key); 1405 + return existing; 1406 + } 1407 + 1408 + const pkg = try self.allocator.create(ResolvedPackage); 1409 + errdefer self.allocator.destroy(pkg); 1410 + 1411 + pkg.* = .{ 1412 + .name = try self.string_pool.intern(name), 1413 + .version = version_info.version, 1414 + .integrity = version_info.integrity, 1415 + .tarball_url = try self.allocator.dupe(u8, version_info.tarball_url), 1416 + .dependencies = .{}, 1417 + .depth = depth, 1418 + .direct = false, 1419 + .parent_path = try self.allocator.dupe(u8, parent_path), 1420 + .has_bin = version_info.bin.count() > 0, 1421 + .allocator = self.allocator, 1422 + }; 1423 + 1424 + try self.resolved.put(nested_key, pkg); 1425 + 1426 + var dep_iter = version_info.dependencies.iterator(); 1427 + while (dep_iter.next()) |entry| { 1428 + try pkg.dependencies.append(self.allocator, .{ 1429 + .name = try self.string_pool.intern(entry.key_ptr.*), 1430 + .constraint = try self.allocator.dupe(u8, entry.value_ptr.*), 1431 + .flags = .{}, 1432 + }); 1433 + } 1434 + 1435 + var opt_iter = version_info.optional_dependencies.iterator(); 1436 + while (opt_iter.next()) |entry| { 1437 + const dep_name = entry.key_ptr.*; 1438 + const dep_constraint = entry.value_ptr.*; 1439 + 1440 + if (self.metadata_cache.get(dep_name)) |opt_metadata| { 1441 + const opt_constraint = Constraint.parse(dep_constraint) catch continue; 1442 + const opt_best = self.selectBestVersion(&opt_metadata, opt_constraint) orelse continue; 1443 + if (!opt_best.matchesPlatform()) continue; 1444 + } 1445 + 1446 + try pkg.dependencies.append(self.allocator, .{ 1447 + .name = try self.string_pool.intern(dep_name), 1448 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1449 + .flags = .{ .optional = true }, 1450 + }); 1451 + } 1452 + 1453 + if (self.on_package_resolved) |callback| { 1454 + callback(pkg, self.on_package_resolved_data); 1455 + } 1456 + 1457 + return pkg; 1458 + } 1459 + 1460 + pub fn resolve(self: *Resolver, name: []const u8, constraint_str: []const u8, depth: u32) !*ResolvedPackage { 1461 + if (self.in_progress.contains(name)) { 1462 + return error.CyclicDependency; 1463 + } 1464 + const in_progress_key = try self.allocator.dupe(u8, name); 1465 + try self.in_progress.put(in_progress_key, {}); 1466 + defer { 1467 + _ = self.in_progress.remove(name); 1468 + self.allocator.free(in_progress_key); 1469 + } 1470 + 1471 + const constraint = try Constraint.parse(constraint_str); 1472 + const cons_gop = try self.constraints.getOrPut(name); 1473 + 1474 + if (!cons_gop.found_existing) { 1475 + cons_gop.key_ptr.* = try self.allocator.dupe(u8, name); 1476 + cons_gop.value_ptr.* = .{}; 1477 + } 1478 + try cons_gop.value_ptr.append(self.allocator, constraint); 1479 + 1480 + if (self.resolved.get(name)) |existing_pkg| { 1481 + if (constraint.satisfies(existing_pkg.version)) { 1482 + if (depth == 0) existing_pkg.direct = true; 1483 + if (depth < existing_pkg.depth) existing_pkg.depth = depth; 1484 + return existing_pkg; 1485 + } 1486 + 1487 + var metadata = try self.fetchMetadata(name); 1488 + const all_constraints = cons_gop.value_ptr.items; 1489 + const best = self.selectBestVersionForConstraints(&metadata, all_constraints) orelse { 1490 + debug.log(" version conflict for {s}: no version satisfies all constraints", .{name}); 1491 + return existing_pkg; 1492 + }; 1493 + 1494 + if (best.version.order(existing_pkg.version) != .eq) { 1495 + debug.log(" re-resolve {s}: {d}.{d}.{d} -> {d}.{d}.{d}", .{ 1496 + name, 1497 + existing_pkg.version.major, 1498 + existing_pkg.version.minor, 1499 + existing_pkg.version.patch, 1500 + best.version.major, 1501 + best.version.minor, 1502 + best.version.patch, 1503 + }); 1504 + 1505 + existing_pkg.version = best.version; 1506 + existing_pkg.integrity = best.integrity; 1507 + self.allocator.free(existing_pkg.tarball_url); 1508 + existing_pkg.tarball_url = try self.allocator.dupe(u8, best.tarball_url); 1509 + existing_pkg.has_bin = best.bin.count() > 0; 1510 + } 1511 + 1512 + if (depth == 0) existing_pkg.direct = true; 1513 + if (depth < existing_pkg.depth) existing_pkg.depth = depth; 1514 + return existing_pkg; 1515 + } 1516 + 1517 + var metadata = try self.fetchMetadata(name); 1518 + const all_constraints = cons_gop.value_ptr.items; 1519 + const best = self.selectBestVersionForConstraints(&metadata, all_constraints) orelse { 1520 + return error.NoMatchingVersion; 1521 + }; 1522 + 1523 + const pkg = try self.allocator.create(ResolvedPackage); 1524 + errdefer self.allocator.destroy(pkg); 1525 + 1526 + pkg.* = .{ 1527 + .name = try self.string_pool.intern(name), 1528 + .version = best.version, 1529 + .integrity = best.integrity, 1530 + .tarball_url = try self.allocator.dupe(u8, best.tarball_url), 1531 + .dependencies = .{}, 1532 + .depth = depth, 1533 + .direct = (depth == 0), 1534 + .parent_path = null, 1535 + .has_bin = best.bin.count() > 0, 1536 + .allocator = self.allocator, 1537 + }; 1538 + 1539 + const name_key = try self.allocator.dupe(u8, name); 1540 + errdefer self.allocator.free(name_key); 1541 + try self.resolved.put(name_key, pkg); 1542 + 1543 + var dep_iter = best.dependencies.iterator(); 1544 + while (dep_iter.next()) |entry| { 1545 + const dep_name = entry.key_ptr.*; 1546 + const dep_constraint = entry.value_ptr.*; 1547 + 1548 + _ = self.resolve(dep_name, dep_constraint, depth + 1) catch |err| { 1549 + std.log.debug("Skipping dep {s}: {}", .{ dep_name, err }); 1550 + continue; 1551 + }; 1552 + 1553 + try pkg.dependencies.append(self.allocator, .{ 1554 + .name = try self.string_pool.intern(dep_name), 1555 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1556 + .flags = .{}, 1557 + }); 1558 + } 1559 + 1560 + var opt_iter = best.optional_dependencies.iterator(); 1561 + while (opt_iter.next()) |entry| { 1562 + const dep_name = entry.key_ptr.*; 1563 + const dep_constraint = entry.value_ptr.*; 1564 + 1565 + const resolved_opt = self.resolve(dep_name, dep_constraint, depth + 1) catch { 1566 + continue; 1567 + }; 1568 + 1569 + const opt_metadata = self.fetchMetadata(dep_name) catch continue; 1570 + const opt_constraint = Constraint.parse(dep_constraint) catch continue; 1571 + const opt_best = self.selectBestVersion(&opt_metadata, opt_constraint) orelse continue; 1572 + 1573 + if (!opt_best.matchesPlatform()) { 1574 + if (self.resolved.fetchRemove(dep_name)) |kv| { 1575 + self.allocator.free(kv.key); 1576 + kv.value.deinit(); 1577 + self.allocator.destroy(kv.value); 1578 + } 1579 + continue; 1580 + } 1581 + 1582 + _ = resolved_opt; 1583 + try pkg.dependencies.append(self.allocator, .{ 1584 + .name = try self.string_pool.intern(dep_name), 1585 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1586 + .flags = .{ .optional = true }, 1587 + }); 1588 + } 1589 + 1590 + var peer_iter = best.peer_dependencies.iterator(); 1591 + while (peer_iter.next()) |entry| { 1592 + const dep_name = entry.key_ptr.*; 1593 + const dep_constraint = entry.value_ptr.*; 1594 + 1595 + if (best.peer_dependencies_meta.contains(dep_name)) continue; 1596 + _ = self.resolve(dep_name, dep_constraint, depth + 1) catch continue; 1597 + try pkg.dependencies.append(self.allocator, .{ 1598 + .name = try self.string_pool.intern(dep_name), 1599 + .constraint = try self.allocator.dupe(u8, dep_constraint), 1600 + .flags = .{ .peer = true }, 1601 + }); 1602 + } 1603 + 1604 + return pkg; 1605 + } 1606 + 1607 + fn fetchMetadata(self: *Resolver, name: []const u8) !PackageMetadata { 1608 + if (self.metadata_cache.get(name)) |cached| { 1609 + debug.log(" metadata: {s} (memory cache)", .{name}); 1610 + return PackageMetadata{ 1611 + .allocator = cached.allocator, 1612 + .name = cached.name, 1613 + .versions = cached.versions, 1614 + }; 1615 + } 1616 + 1617 + if (self.cache_db) |db| { 1618 + if (db.lookupMetadata(name, self.allocator)) |json_data| { 1619 + defer self.allocator.free(json_data); 1620 + const metadata = PackageMetadata.parseFromJson(self.cache_allocator, json_data) catch { 1621 + return self.fetchFromNetwork(name); 1622 + }; 1623 + 1624 + debug.log(" metadata: {s} (disk cache)", .{name}); 1625 + const cache_key = try self.cache_allocator.dupe(u8, name); 1626 + try self.metadata_cache.put(cache_key, metadata); 1627 + return self.metadata_cache.get(name).?; 1628 + } 1629 + } 1630 + 1631 + return self.fetchFromNetwork(name); 1632 + } 1633 + 1634 + fn fetchFromNetwork(self: *Resolver, name: []const u8) !PackageMetadata { 1635 + debug.log(" metadata: {s} (fetch)", .{name}); 1636 + const json_data = try self.http.fetchMetadata(name, self.allocator); 1637 + defer self.allocator.free(json_data); 1638 + 1639 + if (self.cache_db) |db| { 1640 + db.insertMetadata(name, json_data) catch {}; 1641 + } 1642 + 1643 + const metadata = try PackageMetadata.parseFromJson(self.cache_allocator, json_data); 1644 + const cache_key = try self.cache_allocator.dupe(u8, name); 1645 + 1646 + try self.metadata_cache.put(cache_key, metadata); 1647 + return self.metadata_cache.get(name).?; 1648 + } 1649 + 1650 + fn selectBestVersion(_: *Resolver, metadata: *const PackageMetadata, constraint: Constraint) ?*const VersionInfo { 1651 + var best: ?*const VersionInfo = null; 1652 + const want_prerelease = constraint.version.prerelease != null; 1653 + 1654 + for (metadata.versions.items) |*v| { 1655 + if (v.version.prerelease != null and !want_prerelease) continue; 1656 + if (constraint.satisfies(v.version)) { 1657 + if (best == null or v.version.order(best.?.version) == .gt) best = v; 1658 + } 1659 + } 1660 + 1661 + if (best != null or constraint.kind != .any) return best; 1662 + 1663 + for (metadata.versions.items) |*v| { 1664 + if (!constraint.satisfies(v.version)) continue; 1665 + if (best == null or v.version.order(best.?.version) == .gt) best = v; 1666 + } 1667 + 1668 + return best; 1669 + } 1670 + 1671 + fn selectBestVersionForConstraints(_: *Resolver, metadata: *const PackageMetadata, all_constraints: []const Constraint) ?*const VersionInfo { 1672 + var best: ?*const VersionInfo = null; 1673 + 1674 + var want_prerelease = false; 1675 + var all_any = true; 1676 + for (all_constraints) |c| { 1677 + if (c.version.prerelease != null) want_prerelease = true; 1678 + if (c.kind != .any) all_any = false; 1679 + } 1680 + 1681 + for (metadata.versions.items) |*v| { 1682 + if (v.version.prerelease != null and !want_prerelease) continue; 1683 + 1684 + var satisfies_all = true; 1685 + for (all_constraints) |c| { 1686 + if (!c.satisfies(v.version)) { 1687 + satisfies_all = false; break; 1688 + } 1689 + } 1690 + 1691 + if (satisfies_all) { 1692 + if (best == null or v.version.order(best.?.version) == .gt) best = v; 1693 + } 1694 + } 1695 + 1696 + if (best != null or !all_any) return best; 1697 + 1698 + for (metadata.versions.items) |*v| { 1699 + var satisfies_all = true; 1700 + for (all_constraints) |c| { 1701 + if (!c.satisfies(v.version)) { satisfies_all = false; break; } 1702 + } 1703 + if (satisfies_all and (best == null or v.version.order(best.?.version) == .gt)) best = v; 1704 + } 1705 + 1706 + return best; 1707 + } 1708 + 1709 + pub fn writeLockfile(self: *Resolver, path: []const u8) !void { 1710 + var writer = lockfile.LockfileWriter.init(self.allocator); 1711 + defer writer.deinit(); 1712 + 1713 + var pkg_indices = std.StringHashMap(u32).init(self.allocator); 1714 + defer { 1715 + var key_iter = pkg_indices.keyIterator(); 1716 + while (key_iter.next()) |k| self.allocator.free(k.*); 1717 + pkg_indices.deinit(); 1718 + } 1719 + 1720 + var idx: u32 = 0; 1721 + var iter = self.resolved.valueIterator(); 1722 + while (iter.next()) |pkg_ptr| { 1723 + const pkg = pkg_ptr.*; 1724 + const install_path = try pkg.installPath(self.allocator); 1725 + try pkg_indices.put(install_path, idx); 1726 + idx += 1; 1727 + } 1728 + 1729 + iter = self.resolved.valueIterator(); 1730 + while (iter.next()) |pkg_ptr| { 1731 + const pkg = pkg_ptr.*; 1732 + const name_ref = try writer.internString(pkg.name.slice()); 1733 + const url_ref = try writer.internString(pkg.tarball_url); 1734 + const prerelease_ref = if (pkg.version.prerelease) |pre| 1735 + try writer.internString(pre) 1736 + else lockfile.StringRef.empty; 1737 + const parent_ref = if (pkg.parent_path) |parent| 1738 + try writer.internString(parent) 1739 + else lockfile.StringRef.empty; 1740 + 1741 + const deps_start: u32 = @intCast(writer.dependencies.items.len); 1742 + const pkg_install_path = try pkg.installPath(self.allocator); 1743 + defer self.allocator.free(pkg_install_path); 1744 + 1745 + var deps_written: u32 = 0; 1746 + for (pkg.dependencies.items) |dep| { 1747 + const dep_name = dep.name.slice(); 1748 + var found_idx = pkg_indices.get(dep_name); 1749 + 1750 + if (found_idx == null) { 1751 + const nested_path = std.fmt.allocPrint(self.allocator, "{s}/node_modules/{s}", .{ pkg_install_path, dep_name }) catch continue; 1752 + defer self.allocator.free(nested_path); 1753 + found_idx = pkg_indices.get(nested_path); 1754 + } 1755 + 1756 + if (found_idx) |fi| { 1757 + const constraint_ref = try writer.internString(dep.constraint); 1758 + try writer.addDependency(.{ 1759 + .package_index = fi, 1760 + .constraint = constraint_ref, 1761 + .flags = .{ 1762 + .peer = dep.flags.peer, 1763 + .dev = dep.flags.dev, 1764 + .optional = dep.flags.optional, 1765 + }, 1766 + }); deps_written += 1; 1767 + } 1768 + } 1769 + 1770 + _ = try writer.addPackage(.{ 1771 + .name = name_ref, 1772 + .version_major = pkg.version.major, 1773 + .version_minor = pkg.version.minor, 1774 + .version_patch = pkg.version.patch, 1775 + .prerelease = prerelease_ref, 1776 + .integrity = pkg.integrity, 1777 + .tarball_url = url_ref, 1778 + .parent_path = parent_ref, 1779 + .deps_start = deps_start, 1780 + .deps_count = deps_written, 1781 + .flags = .{ 1782 + .direct = pkg.direct, 1783 + .has_bin = pkg.has_bin, 1784 + }, 1785 + }); 1786 + } 1787 + 1788 + try writer.write(path); 1789 + } 1790 + };
+2763
src/pkg/root.zig
··· 1 + const std = @import("std"); 2 + const builtin = @import("builtin"); 3 + 4 + pub const lockfile = @import("lockfile.zig"); 5 + pub const cache = @import("cache.zig"); 6 + pub const fetcher = @import("fetcher.zig"); 7 + pub const extractor = @import("extractor.zig"); 8 + pub const linker = @import("linker.zig"); 9 + pub const resolver = @import("resolver.zig"); 10 + pub const intern = @import("intern.zig"); 11 + pub const json = @import("json.zig"); 12 + pub const debug = @import("debug.zig"); 13 + 14 + const global_allocator: std.mem.Allocator = std.heap.c_allocator; 15 + 16 + fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 { 17 + if (builtin.os.tag == .windows) { 18 + const home_w = std.process.getenvW( 19 + std.unicode.utf8ToUtf16LeStringLiteral("USERPROFILE") 20 + ) orelse return error.NoHomeDir; 21 + return std.unicode.utf16LeToUtf8Alloc(allocator, home_w) catch error.NoHomeDir; 22 + } 23 + const home = std.posix.getenv("HOME") orelse return error.NoHomeDir; 24 + return allocator.dupe(u8, home); 25 + } 26 + 27 + pub const PkgError = enum(c_int) { 28 + ok = 0, 29 + out_of_memory = -1, 30 + invalid_lockfile = -2, 31 + io_error = -3, 32 + network_error = -4, 33 + cache_error = -5, 34 + extract_error = -6, 35 + resolve_error = -7, 36 + invalid_argument = -8, 37 + not_found = -9, 38 + integrity_mismatch = -10, 39 + }; 40 + 41 + pub const ProgressCallback = ?*const fn ( 42 + user_data: ?*anyopaque, 43 + phase: Phase, 44 + current: u32, 45 + total: u32, 46 + message: [*:0]const u8, 47 + ) callconv(.c) void; 48 + 49 + pub const Phase = enum(c_int) { 50 + resolving = 0, 51 + fetching = 1, 52 + extracting = 2, 53 + linking = 3, 54 + caching = 4, 55 + postinstall = 5, 56 + }; 57 + 58 + pub const PkgOptions = extern struct { 59 + cache_dir: ?[*:0]const u8 = null, 60 + registry_url: ?[*:0]const u8 = null, 61 + max_connections: u32 = 6, 62 + progress_callback: ProgressCallback = null, 63 + user_data: ?*anyopaque = null, 64 + verbose: bool = false, 65 + }; 66 + 67 + pub const CacheStats = extern struct { 68 + total_size: u64, 69 + db_size: u64, 70 + package_count: u32, 71 + }; 72 + 73 + pub const AddedPackage = extern struct { 74 + name: [*:0]const u8, 75 + version: [*:0]const u8, 76 + direct: bool, 77 + }; 78 + 79 + pub const InstallResult = extern struct { 80 + package_count: u32, 81 + cache_hits: u32, 82 + cache_misses: u32, 83 + files_linked: u32, 84 + files_copied: u32, 85 + packages_installed: u32, 86 + packages_skipped: u32, 87 + elapsed_ms: u64, 88 + }; 89 + 90 + pub const LifecycleScript = extern struct { 91 + name: [*:0]const u8, 92 + script: [*:0]const u8, 93 + }; 94 + 95 + const PkgInfo = extern struct { 96 + name: [*:0]const u8, 97 + version: [*:0]const u8, 98 + description: [*:0]const u8, 99 + license: [*:0]const u8, 100 + homepage: [*:0]const u8, 101 + tarball: [*:0]const u8, 102 + shasum: [*:0]const u8, 103 + integrity: [*:0]const u8, 104 + keywords: [*:0]const u8, 105 + published: [*:0]const u8, 106 + dep_count: u32, 107 + version_count: u32, 108 + unpacked_size: u64, 109 + }; 110 + 111 + const DistTag = extern struct { 112 + tag: [*:0]const u8, 113 + version: [*:0]const u8, 114 + }; 115 + 116 + const Maintainer = extern struct { 117 + name: [*:0]const u8, 118 + email: [*:0]const u8, 119 + }; 120 + 121 + const Dependency = extern struct { 122 + name: [*:0]const u8, 123 + version: [*:0]const u8, 124 + }; 125 + 126 + pub const PkgContext = struct { 127 + allocator: std.mem.Allocator, 128 + arena_state: std.heap.ArenaAllocator, 129 + string_pool: intern.StringPool, 130 + cache_db: ?*cache.CacheDB, 131 + http: ?*fetcher.Fetcher, 132 + options: PkgOptions, 133 + last_error: ?[:0]u8, 134 + cache_dir: []const u8, 135 + metadata_cache: std.StringHashMap(resolver.PackageMetadata), 136 + last_install_result: InstallResult, 137 + added_packages: std.ArrayListUnmanaged(AddedPackage), 138 + added_packages_storage: std.ArrayListUnmanaged([:0]u8), 139 + lifecycle_scripts: std.ArrayListUnmanaged(LifecycleScript), 140 + lifecycle_scripts_storage: std.ArrayListUnmanaged([:0]u8), 141 + info_dist_tags: std.ArrayListUnmanaged(DistTag), 142 + info_maintainers: std.ArrayListUnmanaged(Maintainer), 143 + info_dependencies: std.ArrayListUnmanaged(Dependency), 144 + info_storage: std.ArrayListUnmanaged([:0]u8), 145 + 146 + pub fn init(allocator: std.mem.Allocator, options: PkgOptions) !*PkgContext { 147 + const ctx = try allocator.create(PkgContext); 148 + errdefer allocator.destroy(ctx); 149 + 150 + const cache_path = if (options.cache_dir) |dir| std.mem.span(dir) 151 + else try getDefaultCacheDir(allocator); 152 + 153 + ctx.* = .{ 154 + .allocator = allocator, 155 + .arena_state = std.heap.ArenaAllocator.init(allocator), 156 + .string_pool = intern.StringPool.init(allocator), 157 + .cache_db = null, 158 + .http = null, 159 + .options = options, 160 + .last_error = null, 161 + .cache_dir = try allocator.dupe(u8, cache_path), 162 + .metadata_cache = std.StringHashMap(resolver.PackageMetadata).init(allocator), 163 + .last_install_result = .{ 164 + .package_count = 0, 165 + .cache_hits = 0, 166 + .cache_misses = 0, 167 + .files_linked = 0, 168 + .files_copied = 0, 169 + .packages_installed = 0, 170 + .packages_skipped = 0, 171 + .elapsed_ms = 0 172 + }, 173 + .added_packages = .{}, 174 + .added_packages_storage = .{}, 175 + .lifecycle_scripts = .{}, 176 + .lifecycle_scripts_storage = .{}, 177 + .info_dist_tags = .{}, 178 + .info_maintainers = .{}, 179 + .info_dependencies = .{}, 180 + .info_storage = .{}, 181 + }; 182 + 183 + debug.enabled = options.verbose; 184 + debug.log("init: cache_dir={s}", .{ctx.cache_dir}); 185 + ctx.cache_db = cache.CacheDB.open(ctx.cache_dir) catch |err| { 186 + ctx.setErrorFmt("Failed to open cache database: {}", .{err}); 187 + return error.CacheError; 188 + }; 189 + 190 + debug.log("init: cache database opened", .{}); 191 + const registry = if (options.registry_url) |url| 192 + std.mem.span(url) 193 + else 194 + "registry.npmjs.org"; 195 + 196 + ctx.http = fetcher.Fetcher.init(allocator, registry) catch |err| { 197 + ctx.setErrorFmt("Failed to initialize fetcher: {}", .{err}); 198 + return error.NetworkError; 199 + }; 200 + debug.log("init: http fetcher ready, registry={s}", .{registry}); 201 + 202 + return ctx; 203 + } 204 + 205 + pub fn deinit(self: *PkgContext) void { 206 + if (self.http) |h| h.deinit(); 207 + if (self.cache_db) |db| db.close(); 208 + var meta_iter = self.metadata_cache.valueIterator(); 209 + while (meta_iter.next()) |meta| meta.deinit(); 210 + self.metadata_cache.deinit(); 211 + self.string_pool.deinit(); 212 + self.arena_state.deinit(); 213 + if (self.last_error) |e| self.allocator.free(e); 214 + for (self.added_packages_storage.items) |s| self.allocator.free(s); 215 + self.added_packages_storage.deinit(self.allocator); 216 + self.added_packages.deinit(self.allocator); 217 + for (self.lifecycle_scripts_storage.items) |s| self.allocator.free(s); 218 + self.lifecycle_scripts_storage.deinit(self.allocator); 219 + self.lifecycle_scripts.deinit(self.allocator); 220 + for (self.info_storage.items) |s| self.allocator.free(s); 221 + self.info_storage.deinit(self.allocator); 222 + self.info_dist_tags.deinit(self.allocator); 223 + self.info_maintainers.deinit(self.allocator); 224 + self.info_dependencies.deinit(self.allocator); 225 + self.allocator.free(self.cache_dir); 226 + self.allocator.destroy(self); 227 + } 228 + 229 + pub fn setErrorFmt(self: *PkgContext, comptime fmt: []const u8, args: anytype) void { 230 + if (self.last_error) |e| self.allocator.free(e); 231 + self.last_error = std.fmt.allocPrintSentinel(self.allocator, fmt, args, 0) catch null; 232 + } 233 + 234 + pub fn setError(self: *PkgContext, msg: []const u8) void { 235 + if (self.last_error) |e| self.allocator.free(e); 236 + self.last_error = self.allocator.dupeZ(u8, msg) catch null; 237 + } 238 + 239 + fn getDefaultCacheDir(allocator: std.mem.Allocator) ![]const u8 { 240 + const home = try getHomeDir(allocator); 241 + defer allocator.free(home); 242 + return std.fmt.allocPrint(allocator, "{s}/.ant/pkg", .{home}); 243 + } 244 + 245 + fn reportProgress(self: *PkgContext, phase: Phase, current: u32, total: u32, message: [:0]const u8) void { 246 + if (self.options.progress_callback) |cb| { 247 + cb(self.options.user_data, phase, current, total, message.ptr); 248 + } 249 + } 250 + 251 + fn clearAddedPackages(self: *PkgContext) void { 252 + for (self.added_packages_storage.items) |s| self.allocator.free(s); 253 + self.added_packages_storage.clearRetainingCapacity(); 254 + self.added_packages.clearRetainingCapacity(); 255 + } 256 + 257 + fn clearLifecycleScripts(self: *PkgContext) void { 258 + for (self.lifecycle_scripts_storage.items) |s| self.allocator.free(s); 259 + self.lifecycle_scripts_storage.clearRetainingCapacity(); 260 + self.lifecycle_scripts.clearRetainingCapacity(); 261 + } 262 + 263 + fn clearInfo(self: *PkgContext) void { 264 + for (self.info_storage.items) |s| self.allocator.free(s); 265 + self.info_storage.clearRetainingCapacity(); 266 + self.info_dist_tags.clearRetainingCapacity(); 267 + self.info_maintainers.clearRetainingCapacity(); 268 + self.info_dependencies.clearRetainingCapacity(); 269 + } 270 + 271 + fn storeInfoString(self: *PkgContext, str: []const u8) ![*:0]const u8 { 272 + const z = try self.allocator.dupeZ(u8, str); 273 + try self.info_storage.append(self.allocator, z); 274 + return z.ptr; 275 + } 276 + 277 + fn addLifecycleScript(self: *PkgContext, name: []const u8, script: []const u8) !void { 278 + const name_z = try self.allocator.dupeZ(u8, name); 279 + errdefer self.allocator.free(name_z); 280 + const script_z = try self.allocator.dupeZ(u8, script); 281 + errdefer self.allocator.free(script_z); 282 + 283 + try self.lifecycle_scripts_storage.append(self.allocator, name_z); 284 + try self.lifecycle_scripts_storage.append(self.allocator, script_z); 285 + try self.lifecycle_scripts.append(self.allocator, .{ 286 + .name = name_z.ptr, 287 + .script = script_z.ptr, 288 + }); 289 + } 290 + 291 + fn addPackageToResults(self: *PkgContext, name: []const u8, version: []const u8, direct: bool) !void { 292 + for (self.added_packages.items) |pkg| { 293 + if (std.mem.eql(u8, std.mem.span(pkg.name), name)) return; 294 + } 295 + 296 + const name_z = try self.allocator.dupeZ(u8, name); 297 + errdefer self.allocator.free(name_z); 298 + const version_z = try self.allocator.dupeZ(u8, version); 299 + errdefer self.allocator.free(version_z); 300 + 301 + try self.added_packages_storage.append(self.allocator, name_z); 302 + try self.added_packages_storage.append(self.allocator, version_z); 303 + try self.added_packages.append(self.allocator, .{ 304 + .name = name_z.ptr, 305 + .version = version_z.ptr, 306 + .direct = direct, 307 + }); 308 + } 309 + 310 + pub fn install(self: *PkgContext, lockfile_path: []const u8, node_modules_path: []const u8) !void { 311 + _ = self.arena_state.reset(.retain_capacity); 312 + const arena_alloc = self.arena_state.allocator(); 313 + 314 + self.clearAddedPackages(); 315 + 316 + var timer = std.time.Timer.start() catch return error.OutOfMemory; 317 + var stage_start: u64 = @intCast(std.time.nanoTimestamp()); 318 + 319 + debug.log("install start: lockfile={s} node_modules={s}", .{ lockfile_path, node_modules_path }); 320 + 321 + var lf = lockfile.Lockfile.open(lockfile_path) catch { 322 + self.setError("Failed to open lockfile"); 323 + return error.InvalidLockfile; 324 + }; 325 + defer lf.close(); 326 + 327 + const pkg_count = lf.header.package_count; 328 + stage_start = debug.timer("lockfile open", stage_start); 329 + debug.log(" packages in lockfile: {d}", .{pkg_count}); 330 + 331 + var integrities = try arena_alloc.alloc([64]u8, pkg_count); 332 + for (lf.packages, 0..) |pkg, i| { 333 + integrities[i] = pkg.integrity; 334 + } 335 + 336 + const db = self.cache_db orelse return error.CacheError; 337 + var cache_hits = try db.batchLookup(integrities, arena_alloc); 338 + defer cache_hits.deinit(); 339 + stage_start = debug.timer("cache lookup", stage_start); 340 + 341 + var hit_set = std.AutoHashMap(u32, u32).init(arena_alloc); 342 + for (cache_hits.items) |hit| { 343 + try hit_set.put(hit.index, hit.file_count); 344 + } 345 + 346 + var misses = std.ArrayListUnmanaged(u32){}; 347 + for (0..pkg_count) |i| { 348 + if (!hit_set.contains(@intCast(i))) { 349 + try misses.append(arena_alloc, @intCast(i)); 350 + } 351 + } 352 + debug.log(" cache hits: {d}, misses: {d}", .{ cache_hits.items.len, misses.items.len }); 353 + 354 + var pkg_linker = linker.Linker.init(self.allocator); 355 + defer pkg_linker.deinit(); 356 + try pkg_linker.setNodeModulesPath(node_modules_path); 357 + 358 + for (cache_hits.items, 0..) |hit, i| { 359 + const pkg = &lf.packages[hit.index]; 360 + const pkg_name = pkg.name.slice(lf.string_table); 361 + const cache_path = try db.getPackagePath(&pkg.integrity, arena_alloc); 362 + const parent_path = pkg.parent_path.slice(lf.string_table); 363 + 364 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{pkg_name}, 0) catch continue; 365 + self.reportProgress(.linking, @intCast(i), @intCast(cache_hits.items.len), msg); 366 + 367 + try pkg_linker.linkPackage(.{ 368 + .cache_path = cache_path, 369 + .node_modules_path = node_modules_path, 370 + .name = pkg_name, 371 + .parent_path = if (parent_path.len > 0) parent_path else null, 372 + .file_count = hit.file_count, 373 + .has_bin = pkg.flags.has_bin, 374 + }); 375 + 376 + if (pkg.flags.direct) { 377 + const version_str = std.fmt.allocPrint(arena_alloc, "{d}.{d}.{d}", .{ 378 + pkg.version_major, 379 + pkg.version_minor, 380 + pkg.version_patch, 381 + }) catch continue; 382 + self.addPackageToResults(pkg_name, version_str, true) catch {}; 383 + } 384 + } 385 + stage_start = debug.timer("link cache hits", stage_start); 386 + 387 + if (misses.items.len > 0) { 388 + const http = self.http orelse return error.NetworkError; 389 + const PkgExtractCtx = struct { 390 + ext: *extractor.Extractor, 391 + pkg_idx: u32, 392 + integrity: [64]u8, 393 + cache_path: []const u8, 394 + pkg_name: []const u8, 395 + version_str: []const u8, 396 + direct: bool, 397 + parent_path: ?[]const u8, 398 + has_bin: bool, 399 + completed: bool, 400 + has_error: bool, 401 + }; 402 + 403 + var extract_contexts = try arena_alloc.alloc(PkgExtractCtx, misses.items.len); 404 + var valid_count: usize = 0; 405 + 406 + debug.log("queuing {d} tarball fetches...", .{misses.items.len}); 407 + 408 + for (misses.items, 0..) |pkg_idx, i| { 409 + const pkg = &lf.packages[pkg_idx]; 410 + const pkg_name = pkg.name.slice(lf.string_table); 411 + const tarball_url = pkg.tarball_url.slice(lf.string_table); 412 + const version_str = pkg.versionString(arena_alloc, lf.string_table) catch continue; 413 + const cache_path = db.getPackagePath(&pkg.integrity, arena_alloc) catch continue; 414 + 415 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{pkg_name}, 0) catch continue; 416 + self.reportProgress(.fetching, @intCast(i), @intCast(misses.items.len), msg); 417 + 418 + if (self.options.verbose) { 419 + debug.log(" queue: {s}@{s}", .{ pkg_name, version_str }); 420 + } 421 + 422 + const ext = extractor.Extractor.init(self.allocator, cache_path) catch continue; 423 + const parent_path_str = pkg.parent_path.slice(lf.string_table); 424 + 425 + extract_contexts[valid_count] = .{ 426 + .ext = ext, 427 + .pkg_idx = pkg_idx, 428 + .integrity = pkg.integrity, 429 + .cache_path = cache_path, 430 + .pkg_name = pkg_name, 431 + .version_str = version_str, 432 + .direct = pkg.flags.direct, 433 + .parent_path = if (parent_path_str.len > 0) parent_path_str else null, 434 + .has_bin = pkg.flags.has_bin, 435 + .completed = false, 436 + .has_error = false, 437 + }; 438 + 439 + http.fetchTarball(tarball_url, fetcher.StreamHandler.init( 440 + struct { 441 + fn onData(data: []const u8, user_data: ?*anyopaque) void { 442 + const ctx: *PkgExtractCtx = @ptrCast(@alignCast(user_data)); 443 + ctx.ext.feedCompressed(data) catch { 444 + ctx.has_error = true; 445 + }; 446 + } 447 + }.onData, 448 + struct { 449 + fn onComplete(_: u16, user_data: ?*anyopaque) void { 450 + const ctx: *PkgExtractCtx = @ptrCast(@alignCast(user_data)); 451 + ctx.completed = true; 452 + } 453 + }.onComplete, 454 + struct { 455 + fn onError(_: fetcher.FetchError, user_data: ?*anyopaque) void { 456 + const ctx: *PkgExtractCtx = @ptrCast(@alignCast(user_data)); 457 + ctx.has_error = true; 458 + ctx.completed = true; 459 + } 460 + }.onError, 461 + &extract_contexts[valid_count], 462 + )) catch continue; 463 + 464 + valid_count += 1; 465 + } 466 + 467 + stage_start = debug.timer("queue fetches", stage_start); 468 + debug.log("running event loop for {d} fetches...", .{valid_count}); 469 + 470 + http.run() catch {}; 471 + stage_start = debug.timer("fetch + extract", stage_start); 472 + 473 + var success_count: usize = 0; 474 + var error_count: usize = 0; 475 + for (extract_contexts[0..valid_count], 0..) |*ctx, i| { 476 + defer ctx.ext.deinit(); 477 + 478 + if (ctx.has_error) { 479 + error_count += 1; 480 + debug.log(" error: {s}", .{ctx.pkg_name}); 481 + continue; 482 + } 483 + success_count += 1; 484 + 485 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{ctx.pkg_name}, 0) catch continue; 486 + self.reportProgress(.linking, @intCast(i), @intCast(valid_count), msg); 487 + 488 + const stats = ctx.ext.stats(); 489 + if (stats.files >= 100) { 490 + debug.log(" extracted {s}: {d} files, {d} bytes", .{ ctx.pkg_name, stats.files, stats.bytes }); 491 + } 492 + 493 + db.insert(&.{ 494 + .integrity = ctx.integrity, 495 + .path = ctx.cache_path, 496 + .unpacked_size = stats.bytes, 497 + .file_count = stats.files, 498 + .cached_at = std.time.timestamp(), 499 + }, ctx.pkg_name, ctx.version_str) catch continue; 500 + 501 + self.addPackageToResults(ctx.pkg_name, ctx.version_str, ctx.direct) catch {}; 502 + 503 + pkg_linker.linkPackage(.{ 504 + .cache_path = ctx.cache_path, 505 + .node_modules_path = node_modules_path, 506 + .name = ctx.pkg_name, 507 + .parent_path = ctx.parent_path, 508 + .file_count = stats.files, 509 + .has_bin = ctx.has_bin, 510 + }) catch {}; 511 + } 512 + stage_start = debug.timer("cache insert + link misses", stage_start); 513 + debug.log(" fetched: {d} success, {d} errors", .{ success_count, error_count }); 514 + } 515 + 516 + db.sync(); 517 + stage_start = debug.timer("cache sync", stage_start); 518 + 519 + const link_stats = pkg_linker.getStats(); 520 + self.last_install_result = .{ 521 + .package_count = pkg_count, 522 + .cache_hits = @intCast(cache_hits.items.len), 523 + .cache_misses = @intCast(misses.items.len), 524 + .files_linked = link_stats.files_linked, 525 + .files_copied = link_stats.files_copied, 526 + .packages_installed = link_stats.packages_installed, 527 + .packages_skipped = link_stats.packages_skipped, 528 + .elapsed_ms = timer.read() / 1_000_000, 529 + }; 530 + } 531 + }; 532 + 533 + export fn pkg_init(options: *const PkgOptions) ?*PkgContext { 534 + return PkgContext.init(global_allocator, options.*) catch null; 535 + } 536 + 537 + export fn pkg_free(ctx: ?*PkgContext) void { 538 + if (ctx) |c| c.deinit(); 539 + } 540 + 541 + export fn pkg_install( 542 + ctx: ?*PkgContext, 543 + package_json_path: [*:0]const u8, 544 + lockfile_path: [*:0]const u8, 545 + node_modules_path: [*:0]const u8, 546 + ) PkgError { 547 + const c = ctx orelse return .invalid_argument; 548 + 549 + _ = c.arena_state.reset(.retain_capacity); 550 + const arena_alloc = c.arena_state.allocator(); 551 + 552 + c.install( 553 + std.mem.span(lockfile_path), 554 + std.mem.span(node_modules_path), 555 + ) catch |err| { 556 + return switch (err) { 557 + error.InvalidLockfile => .invalid_lockfile, 558 + error.CacheError => .cache_error, 559 + error.NetworkError => .network_error, 560 + error.OutOfMemory => .out_of_memory, 561 + else => .io_error, 562 + }; 563 + }; 564 + 565 + var pkg_json = json.PackageJson.parse(arena_alloc, std.mem.span(package_json_path)) catch return .ok; 566 + defer pkg_json.deinit(arena_alloc); 567 + if (pkg_json.trusted_dependencies.count() > 0) { 568 + runTrustedPostinstall(c, &pkg_json.trusted_dependencies, std.mem.span(node_modules_path), arena_alloc); 569 + } 570 + 571 + return .ok; 572 + } 573 + 574 + export fn pkg_get_install_result(ctx: ?*PkgContext, out: *InstallResult) PkgError { 575 + const c = ctx orelse return .invalid_argument; 576 + out.* = c.last_install_result; 577 + return .ok; 578 + } 579 + 580 + export fn pkg_get_added_count(ctx: ?*const PkgContext) u32 { 581 + const c = ctx orelse return 0; 582 + return @intCast(c.added_packages.items.len); 583 + } 584 + 585 + export fn pkg_get_added_package(ctx: ?*const PkgContext, index: u32, out: *AddedPackage) PkgError { 586 + const c = ctx orelse return .invalid_argument; 587 + if (index >= c.added_packages.items.len) return .invalid_argument; 588 + out.* = c.added_packages.items[index]; 589 + return .ok; 590 + } 591 + 592 + export fn pkg_discover_lifecycle_scripts( 593 + ctx: ?*PkgContext, 594 + node_modules_path: [*:0]const u8, 595 + ) PkgError { 596 + const c = ctx orelse return .invalid_argument; 597 + c.clearLifecycleScripts(); 598 + 599 + const nm_path = std.mem.span(node_modules_path); 600 + var nm_dir = std.fs.cwd().openDir(nm_path, .{ .iterate = true }) catch return .io_error; 601 + defer nm_dir.close(); 602 + 603 + var iter = nm_dir.iterate(); 604 + while (iter.next() catch null) |entry| { 605 + if (entry.kind != .directory) continue; 606 + if (entry.name[0] == '@') { 607 + var scope_dir = nm_dir.openDir(entry.name, .{ .iterate = true }) catch continue; 608 + defer scope_dir.close(); 609 + var scope_iter = scope_dir.iterate(); 610 + while (scope_iter.next() catch null) |scoped_entry| { 611 + if (scoped_entry.kind != .directory) continue; 612 + const full_name = std.fmt.allocPrint(c.allocator, "@{s}/{s}", .{ entry.name[1..], scoped_entry.name }) catch continue; 613 + defer c.allocator.free(full_name); 614 + discoverPackageScript(c, nm_path, full_name, scope_dir, scoped_entry.name); 615 + } 616 + } else { 617 + discoverPackageScript(c, nm_path, entry.name, nm_dir, entry.name); 618 + } 619 + } 620 + 621 + return .ok; 622 + } 623 + 624 + fn discoverPackageScript(ctx: *PkgContext, nm_path: []const u8, pkg_name: []const u8, parent_dir: std.fs.Dir, dir_name: []const u8) void { 625 + var pkg_dir = parent_dir.openDir(dir_name, .{}) catch return; 626 + defer pkg_dir.close(); 627 + 628 + pkg_dir.access(".postinstall", .{}) catch |err| { 629 + if (err != error.FileNotFound) return; 630 + const pkg_json = pkg_dir.openFile("package.json", .{}) catch return; 631 + defer pkg_json.close(); 632 + 633 + const content = pkg_json.readToEndAlloc(ctx.allocator, 1024 * 1024) catch return; 634 + defer ctx.allocator.free(content); 635 + 636 + var doc = json.JsonDoc.parse(content) catch return; 637 + defer doc.deinit(); 638 + 639 + const root = doc.root(); 640 + if (root.getObject("scripts")) |scripts| { 641 + const script = scripts.getString("postinstall") orelse 642 + scripts.getString("install") orelse return; 643 + 644 + if (std.mem.eql(u8, pkg_name, "esbuild")) return; 645 + ctx.addLifecycleScript(pkg_name, script) catch return; 646 + } 647 + return; 648 + }; 649 + _ = nm_path; 650 + } 651 + 652 + export fn pkg_get_lifecycle_script_count(ctx: ?*const PkgContext) u32 { 653 + const c = ctx orelse return 0; 654 + return @intCast(c.lifecycle_scripts.items.len); 655 + } 656 + 657 + export fn pkg_get_lifecycle_script(ctx: ?*const PkgContext, index: u32, out: *LifecycleScript) PkgError { 658 + const c = ctx orelse return .invalid_argument; 659 + if (index >= c.lifecycle_scripts.items.len) return .invalid_argument; 660 + out.* = c.lifecycle_scripts.items[index]; 661 + return .ok; 662 + } 663 + 664 + export fn pkg_run_postinstall( 665 + ctx: ?*PkgContext, 666 + node_modules_path: [*:0]const u8, 667 + package_names: [*]const [*:0]const u8, 668 + count: u32, 669 + ) PkgError { 670 + const c = ctx orelse return .invalid_argument; 671 + _ = c.arena_state.reset(.retain_capacity); 672 + const arena_alloc = c.arena_state.allocator(); 673 + 674 + var trusted = std.StringHashMap(void).init(arena_alloc); 675 + for (0..count) |i| { 676 + trusted.put(std.mem.span(package_names[i]), {}) catch continue; 677 + } 678 + 679 + runTrustedPostinstall(c, &trusted, std.mem.span(node_modules_path), arena_alloc); 680 + return .ok; 681 + } 682 + 683 + export fn pkg_add_trusted_dependencies( 684 + package_json_path: [*:0]const u8, 685 + package_names: [*]const [*:0]const u8, 686 + count: u32, 687 + ) PkgError { 688 + const allocator = std.heap.c_allocator; 689 + const path = std.mem.span(package_json_path); 690 + const path_z = package_json_path; 691 + 692 + debug.log("[trust] pkg_add_trusted_dependencies: path={s} count={d}", .{ path, count }); 693 + 694 + const file = std.fs.cwd().openFile(path, .{ .mode = .read_write }) catch |err| { 695 + debug.log("[trust] failed to open file: {}", .{err}); 696 + return .io_error; 697 + }; 698 + defer file.close(); 699 + 700 + const content = file.readToEndAlloc(allocator, 10 * 1024 * 1024) catch |err| { 701 + debug.log("[trust] failed to read file: {}", .{err}); 702 + return .io_error; 703 + }; 704 + defer allocator.free(content); 705 + 706 + debug.log("[trust] read {d} bytes from package.json", .{content.len}); 707 + 708 + const doc = json.yyjson.yyjson_read(content.ptr, content.len, 0); 709 + if (doc == null) { 710 + debug.log("[trust] failed to parse JSON", .{}); 711 + return .io_error; 712 + } 713 + defer json.yyjson.yyjson_doc_free(doc); 714 + 715 + const mdoc = json.yyjson.yyjson_doc_mut_copy(doc, null); 716 + if (mdoc == null) { 717 + debug.log("[trust] failed to create mutable doc", .{}); 718 + return .out_of_memory; 719 + } 720 + defer json.yyjson.yyjson_mut_doc_free(mdoc); 721 + 722 + const root = json.yyjson.yyjson_mut_doc_get_root(mdoc); 723 + if (root == null) { 724 + debug.log("[trust] failed to get root", .{}); 725 + return .io_error; 726 + } 727 + 728 + var trusted_arr = json.yyjson.yyjson_mut_obj_get(root, "trustedDependencies"); 729 + if (trusted_arr == null) { 730 + debug.log("[trust] creating new trustedDependencies array", .{}); 731 + trusted_arr = json.yyjson.yyjson_mut_arr(mdoc); 732 + if (trusted_arr == null) { 733 + debug.log("[trust] failed to create array", .{}); 734 + return .out_of_memory; 735 + } 736 + _ = json.yyjson.yyjson_mut_obj_add_val(mdoc, root, "trustedDependencies", trusted_arr); 737 + } else { 738 + debug.log("[trust] trustedDependencies array already exists", .{}); 739 + } 740 + 741 + var string_copies = std.ArrayListUnmanaged([:0]u8){}; 742 + defer { 743 + for (string_copies.items) |s| allocator.free(s); 744 + string_copies.deinit(allocator); 745 + } 746 + 747 + var added: u32 = 0; 748 + for (0..count) |i| { 749 + const pkg_name = std.mem.span(package_names[i]); 750 + var exists = false; 751 + 752 + var iter = json.yyjson.yyjson_mut_arr_iter{}; 753 + _ = json.yyjson.yyjson_mut_arr_iter_init(trusted_arr, &iter); 754 + while (json.yyjson.yyjson_mut_arr_iter_next(&iter)) |val| { 755 + if (json.yyjson.yyjson_mut_is_str(val)) { 756 + const existing = json.yyjson.yyjson_mut_get_str(val); 757 + if (existing != null and std.mem.eql(u8, std.mem.span(existing.?), pkg_name)) { 758 + exists = true; break; 759 + } 760 + } 761 + } 762 + 763 + if (!exists) { 764 + const name_copy = allocator.dupeZ(u8, pkg_name) catch continue; 765 + string_copies.append(allocator, name_copy) catch { 766 + allocator.free(name_copy); 767 + continue; 768 + }; 769 + const val = json.yyjson.yyjson_mut_str(mdoc, name_copy.ptr); 770 + if (val != null) { 771 + _ = json.yyjson.yyjson_mut_arr_append(trusted_arr, val); 772 + added += 1; 773 + debug.log("[trust] added {s}", .{pkg_name}); 774 + } 775 + } else { 776 + debug.log("[trust] {s} already in trustedDependencies", .{pkg_name}); 777 + } 778 + } 779 + debug.log("[trust] added {d} packages, writing file", .{added}); 780 + 781 + var write_err: json.yyjson.yyjson_write_err = undefined; 782 + const flags = json.yyjson.YYJSON_WRITE_PRETTY_TWO_SPACES | json.yyjson.YYJSON_WRITE_ESCAPE_UNICODE; 783 + const written = json.yyjson.yyjson_mut_write_file(path_z, mdoc, flags, null, &write_err); 784 + if (!written) { 785 + const msg = if (write_err.msg) |m| std.mem.span(m) else "unknown"; 786 + debug.log("[trust] failed to write file: code={d} msg={s}", .{ write_err.code, msg }); 787 + return .io_error; 788 + } 789 + 790 + debug.log("[trust] successfully wrote package.json", .{}); 791 + return .ok; 792 + } 793 + 794 + const InterleavedExtractCtx = struct { 795 + ext: *extractor.Extractor, 796 + integrity: [64]u8, 797 + cache_path: []const u8, 798 + pkg_name: []const u8, 799 + version_str: []const u8, 800 + direct: bool, 801 + parent_path: ?[]const u8, 802 + has_bin: bool, 803 + completed: bool, 804 + has_error: bool, 805 + queued: bool, 806 + parent: *InterleavedContext, 807 + }; 808 + 809 + const InterleavedContext = struct { 810 + allocator: std.mem.Allocator, 811 + arena_alloc: std.mem.Allocator, 812 + db: *cache.CacheDB, 813 + http: *fetcher.Fetcher, 814 + pkg_ctx: *PkgContext, 815 + extract_contexts: std.ArrayListUnmanaged(*InterleavedExtractCtx), 816 + queued_integrities: std.AutoHashMap([64]u8, void), 817 + callbacks_received: usize, 818 + integrity_duplicates: usize, 819 + cache_hits: usize, 820 + tarballs_queued: usize, 821 + tarballs_completed: std.atomic.Value(u32), 822 + 823 + fn init(allocator: std.mem.Allocator, arena_alloc: std.mem.Allocator, db: *cache.CacheDB, http: *fetcher.Fetcher, pkg_ctx: *PkgContext) InterleavedContext { 824 + return .{ 825 + .allocator = allocator, 826 + .arena_alloc = arena_alloc, 827 + .db = db, 828 + .http = http, 829 + .pkg_ctx = pkg_ctx, 830 + .extract_contexts = .{}, 831 + .queued_integrities = std.AutoHashMap([64]u8, void).init(arena_alloc), 832 + .callbacks_received = 0, 833 + .integrity_duplicates = 0, 834 + .cache_hits = 0, 835 + .tarballs_queued = 0, 836 + .tarballs_completed = std.atomic.Value(u32).init(0), 837 + }; 838 + } 839 + 840 + fn deinit(self: *InterleavedContext) void { 841 + self.extract_contexts.deinit(self.arena_alloc); 842 + self.queued_integrities.deinit(); 843 + } 844 + 845 + fn onPackageResolved(pkg: *const resolver.ResolvedPackage, user_data: ?*anyopaque) void { 846 + const self: *InterleavedContext = @ptrCast(@alignCast(user_data)); 847 + self.callbacks_received += 1; 848 + 849 + const pkg_name = pkg.name.slice(); 850 + const current: u32 = @intCast(self.callbacks_received); 851 + const msg = std.fmt.allocPrintSentinel(self.arena_alloc, "{s}", .{pkg_name}, 0) catch return; 852 + self.pkg_ctx.reportProgress(.resolving, current, current, msg); 853 + 854 + if (self.queued_integrities.contains(pkg.integrity)) { 855 + self.integrity_duplicates += 1; return; 856 + } self.queued_integrities.put(pkg.integrity, {}) catch return; 857 + 858 + if (self.db.hasIntegrity(&pkg.integrity)) { 859 + self.cache_hits += 1; return; 860 + } self.tarballs_queued += 1; 861 + 862 + const cache_path = self.db.getPackagePath(&pkg.integrity, self.arena_alloc) catch return; 863 + const version_str = std.fmt.allocPrint(self.arena_alloc, "{d}.{d}.{d}", .{ 864 + pkg.version.major, pkg.version.minor, pkg.version.patch, 865 + }) catch return; 866 + 867 + const ext = extractor.Extractor.init(self.allocator, cache_path) catch return; 868 + const ctx = self.arena_alloc.create(InterleavedExtractCtx) catch { 869 + ext.deinit(); return; 870 + }; 871 + 872 + ctx.* = .{ 873 + .ext = ext, 874 + .integrity = pkg.integrity, 875 + .cache_path = cache_path, 876 + .pkg_name = pkg.name.slice(), 877 + .version_str = version_str, 878 + .direct = pkg.direct, 879 + .parent_path = pkg.parent_path, 880 + .has_bin = pkg.has_bin, 881 + .completed = false, 882 + .has_error = false, 883 + .queued = false, 884 + .parent = self, 885 + }; 886 + 887 + self.http.fetchTarball(pkg.tarball_url, fetcher.StreamHandler.init( 888 + struct { 889 + fn onData(data: []const u8, ud: ?*anyopaque) void { 890 + const c: *InterleavedExtractCtx = @ptrCast(@alignCast(ud)); 891 + c.ext.feedCompressed(data) catch { c.has_error = true; }; 892 + } 893 + }.onData, 894 + struct { 895 + fn onComplete(_: u16, ud: ?*anyopaque) void { 896 + const c: *InterleavedExtractCtx = @ptrCast(@alignCast(ud)); 897 + c.completed = true; 898 + 899 + const completed = c.parent.tarballs_completed.fetchAdd(1, .monotonic) + 1; 900 + const total: u32 = @intCast(c.parent.tarballs_queued); 901 + 902 + var msg_buf: [256]u8 = undefined; 903 + const msg_len = std.fmt.bufPrint(&msg_buf, "{s}", .{c.pkg_name}) catch return; 904 + 905 + msg_buf[msg_len.len] = 0; 906 + c.parent.pkg_ctx.reportProgress(.fetching, completed, total, msg_buf[0..msg_len.len :0]); 907 + } 908 + }.onComplete, 909 + struct { 910 + fn onError(_: fetcher.FetchError, ud: ?*anyopaque) void { 911 + const c: *InterleavedExtractCtx = @ptrCast(@alignCast(ud)); 912 + c.has_error = true; 913 + c.completed = true; 914 + } 915 + }.onError, 916 + ctx, 917 + )) catch { 918 + ext.deinit(); 919 + self.arena_alloc.destroy(ctx); 920 + return; 921 + }; 922 + 923 + ctx.queued = true; 924 + self.extract_contexts.append(self.arena_alloc, ctx) catch return; 925 + 926 + debug.log(" queued tarball: {s}@{s}", .{ pkg.name.slice(), version_str }); 927 + } 928 + }; 929 + 930 + export fn pkg_resolve_and_install( 931 + ctx: ?*PkgContext, 932 + package_json_path: [*:0]const u8, 933 + lockfile_path: [*:0]const u8, 934 + node_modules_path: [*:0]const u8, 935 + ) PkgError { 936 + const c = ctx orelse return .invalid_argument; 937 + _ = c.arena_state.reset(.retain_capacity); 938 + const arena_alloc = c.arena_state.allocator(); 939 + 940 + var timer = std.time.Timer.start() catch return .out_of_memory; 941 + var stage_start: u64 = @intCast(std.time.nanoTimestamp()); 942 + 943 + debug.log("resolve+install (interleaved): package_json={s} lockfile={s} node_modules={s}", .{ 944 + std.mem.span(package_json_path), 945 + std.mem.span(lockfile_path), 946 + std.mem.span(node_modules_path), 947 + }); 948 + 949 + const http = c.http orelse return .network_error; 950 + http.resetMetaClients(); 951 + const db = c.cache_db orelse return .cache_error; 952 + 953 + const pkg_json_path_z = arena_alloc.dupeZ(u8, std.mem.span(package_json_path)) catch return .out_of_memory; 954 + var pkg_json = json.PackageJson.parse(arena_alloc, pkg_json_path_z) catch { 955 + c.setError("Failed to parse package.json"); 956 + return .io_error; 957 + }; defer pkg_json.deinit(arena_alloc); 958 + 959 + if (pkg_json.trusted_dependencies.count() > 0) { 960 + debug.log(" trusted dependencies: {d}", .{pkg_json.trusted_dependencies.count()}); 961 + } 962 + 963 + var interleaved = InterleavedContext.init(c.allocator, arena_alloc, db, http, c); 964 + defer interleaved.deinit(); 965 + 966 + var res = resolver.Resolver.init( 967 + arena_alloc, 968 + c.allocator, 969 + &c.string_pool, 970 + http, 971 + db, 972 + if (c.options.registry_url) |url| std.mem.span(url) else "https://registry.npmjs.org", 973 + &c.metadata_cache, 974 + ); defer res.deinit(); 975 + 976 + res.setOnPackageResolved(InterleavedContext.onPackageResolved, &interleaved); 977 + res.resolveFromPackageJson(std.mem.span(package_json_path)) catch |err| { 978 + c.setErrorFmt("Failed to resolve dependencies: {}", .{err}); 979 + return .resolve_error; 980 + }; 981 + 982 + stage_start = debug.timer("resolve + queue tarballs", stage_start); 983 + debug.log(" resolved {d} packages, callbacks={d} (dupes={d}), cache hits={d}, queued={d}", .{ 984 + res.resolved.count(), 985 + interleaved.callbacks_received, 986 + interleaved.integrity_duplicates, 987 + interleaved.cache_hits, 988 + interleaved.tarballs_queued, 989 + }); 990 + 991 + var direct_iter = res.resolved.valueIterator(); 992 + while (direct_iter.next()) |pkg_ptr| { 993 + const pkg = pkg_ptr.*; 994 + if (pkg.direct) { 995 + const version_str = std.fmt.allocPrint(arena_alloc, "{d}.{d}.{d}", .{ 996 + pkg.version.major, 997 + pkg.version.minor, 998 + pkg.version.patch, 999 + }) catch continue; 1000 + c.addPackageToResults(pkg.name.slice(), version_str, true) catch {}; 1001 + } 1002 + } 1003 + 1004 + var pkg_linker = linker.Linker.init(c.allocator); 1005 + defer pkg_linker.deinit(); 1006 + pkg_linker.setNodeModulesPath(std.mem.span(node_modules_path)) catch return .io_error; 1007 + 1008 + var cache_hit_jobs = std.ArrayListUnmanaged(linker.PackageLink){}; 1009 + var pkg_iter = res.resolved.valueIterator(); 1010 + while (pkg_iter.next()) |pkg_ptr| { 1011 + const pkg = pkg_ptr.*; 1012 + if (interleaved.queued_integrities.contains(pkg.integrity)) { 1013 + var is_download = false; 1014 + for (interleaved.extract_contexts.items) |ext_ctx| { 1015 + if (std.mem.eql(u8, &ext_ctx.integrity, &pkg.integrity)) { is_download = true; break; } 1016 + } 1017 + if (is_download) continue; 1018 + } 1019 + 1020 + var cache_entry = db.lookup(&pkg.integrity) orelse continue; 1021 + defer cache_entry.deinit(); 1022 + const cache_path = arena_alloc.dupe(u8, cache_entry.path) catch continue; 1023 + 1024 + cache_hit_jobs.append(arena_alloc, .{ 1025 + .cache_path = cache_path, 1026 + .node_modules_path = std.mem.span(node_modules_path), 1027 + .name = pkg.name.slice(), 1028 + .parent_path = pkg.parent_path, 1029 + .file_count = cache_entry.file_count, 1030 + .has_bin = pkg.has_bin, 1031 + }) catch continue; 1032 + } 1033 + 1034 + var tarball_thread: ?std.Thread = null; 1035 + if (interleaved.tarballs_queued > 0) { 1036 + debug.log("finishing {d} tarball downloads (pending={d})...", .{ 1037 + interleaved.tarballs_queued, 1038 + http.pending.items.len, 1039 + }); 1040 + tarball_thread = std.Thread.spawn(.{}, struct { 1041 + fn work(h: *fetcher.Fetcher) void { h.run() catch {}; } 1042 + }.work, .{http}) catch |err| blk: { 1043 + debug.log("warning: failed to spawn tarball thread: {}, running synchronously", .{err}); 1044 + http.run() catch {}; 1045 + break :blk null; 1046 + }; 1047 + } 1048 + 1049 + var linked_count: usize = 0; 1050 + for (cache_hit_jobs.items, 0..) |job, i| { 1051 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{job.name}, 0) catch continue; 1052 + c.reportProgress(.linking, @intCast(i), @intCast(cache_hit_jobs.items.len), msg); 1053 + pkg_linker.linkPackage(job) catch continue; 1054 + linked_count += 1; 1055 + } 1056 + 1057 + if (tarball_thread) |t| { 1058 + t.join(); 1059 + stage_start = debug.timer("finish tarballs + link cache hits", stage_start); 1060 + } else stage_start = debug.timer("link cache hits", stage_start); 1061 + debug.log(" linked {d} from cache", .{linked_count}); 1062 + 1063 + res.writeLockfile(std.mem.span(lockfile_path)) catch |err| { 1064 + c.setErrorFmt("Failed to write lockfile: {}", .{err}); 1065 + return .io_error; 1066 + }; 1067 + stage_start = debug.timer("write lockfile", stage_start); 1068 + 1069 + var success_count: usize = 0; 1070 + var error_count: usize = 0; 1071 + 1072 + const LinkJobWithSize = struct { 1073 + job: linker.PackageLink, 1074 + size: u64, 1075 + }; 1076 + 1077 + var cache_entries = std.ArrayListUnmanaged(cache.CacheDB.NamedCacheEntry){}; 1078 + var link_jobs = std.ArrayListUnmanaged(LinkJobWithSize){}; 1079 + const current_time = std.time.timestamp(); 1080 + const nm_path = std.mem.span(node_modules_path); 1081 + 1082 + for (interleaved.extract_contexts.items) |ext_ctx| { 1083 + if (ext_ctx.has_error or !ext_ctx.completed) { 1084 + error_count += 1; 1085 + debug.log(" error: {s}", .{ext_ctx.pkg_name}); continue; 1086 + } 1087 + success_count += 1; 1088 + 1089 + const stats = ext_ctx.ext.stats(); 1090 + cache_entries.append(arena_alloc, .{ 1091 + .entry = .{ 1092 + .integrity = ext_ctx.integrity, 1093 + .path = ext_ctx.cache_path, 1094 + .unpacked_size = stats.bytes, 1095 + .file_count = stats.files, 1096 + .cached_at = current_time, 1097 + }, 1098 + .name = ext_ctx.pkg_name, 1099 + .version = ext_ctx.version_str, 1100 + }) catch continue; 1101 + 1102 + link_jobs.append(arena_alloc, .{ 1103 + .job = .{ 1104 + .cache_path = ext_ctx.cache_path, 1105 + .node_modules_path = nm_path, 1106 + .name = ext_ctx.pkg_name, 1107 + .parent_path = ext_ctx.parent_path, 1108 + .file_count = stats.files, 1109 + .has_bin = ext_ctx.has_bin, 1110 + }, 1111 + .size = stats.bytes, 1112 + }) catch continue; 1113 + 1114 + c.addPackageToResults(ext_ctx.pkg_name, ext_ctx.version_str, ext_ctx.direct) catch {}; 1115 + } 1116 + 1117 + for (cache_entries.items, 0..) |entry, i| { 1118 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{entry.name}, 0) catch continue; 1119 + c.reportProgress(.caching, @intCast(i), @intCast(cache_entries.items.len), msg); 1120 + } 1121 + db.batchInsertNamed(cache_entries.items) catch {}; 1122 + stage_start = debug.timer("cache insert (batch)", stage_start); 1123 + 1124 + const total_jobs: u32 = @intCast(link_jobs.items.len); 1125 + var link_counter = std.atomic.Value(u32).init(0); 1126 + const LARGE_LINK_BYTES: u64 = 2 * 1024 * 1024; 1127 + 1128 + std.sort.heap(LinkJobWithSize, link_jobs.items, {}, struct { 1129 + fn lessThan(_: void, a: LinkJobWithSize, b: LinkJobWithSize) bool { 1130 + return a.size > b.size; 1131 + } 1132 + }.lessThan); 1133 + 1134 + var split_idx: usize = link_jobs.items.len; 1135 + for (link_jobs.items, 0..) |job, i| { 1136 + if (job.size < LARGE_LINK_BYTES) { 1137 + split_idx = i; 1138 + break; 1139 + } 1140 + } 1141 + 1142 + const large_jobs = link_jobs.items[0..split_idx]; 1143 + const small_jobs = link_jobs.items[split_idx..]; 1144 + const phases = [_][]const LinkJobWithSize{ large_jobs, small_jobs }; 1145 + 1146 + var slow_link_count = std.atomic.Value(u32).init(0); 1147 + var max_link_ms = std.atomic.Value(u64).init(0); 1148 + var slow_link_names = std.ArrayListUnmanaged([]const u8){}; 1149 + defer slow_link_names.deinit(c.allocator); 1150 + var slow_link_lock = std.Thread.Mutex{}; 1151 + 1152 + for (phases) |phase_jobs| { 1153 + if (phase_jobs.len == 0) continue; 1154 + const num_threads = @min(8, phase_jobs.len); 1155 + if (c.options.verbose and phase_jobs.len == large_jobs.len) { 1156 + debug.log(" linking large packages first ({d} items)", .{phase_jobs.len}); 1157 + } 1158 + if (num_threads > 1 and phase_jobs.len > 4) { 1159 + var threads: [8]?std.Thread = .{null} ** 8; 1160 + const jobs_per_thread = (phase_jobs.len + num_threads - 1) / num_threads; 1161 + 1162 + for (0..num_threads) |t| { 1163 + const start_idx = t * jobs_per_thread; 1164 + const end_idx = @min(start_idx + jobs_per_thread, phase_jobs.len); 1165 + if (start_idx >= end_idx) break; 1166 + 1167 + threads[t] = std.Thread.spawn(.{}, struct { 1168 + fn work(lnk: *linker.Linker, jobs: []const LinkJobWithSize, pkg_ctx: *PkgContext, total: u32, counter: *std.atomic.Value(u32), slow_count: *std.atomic.Value(u32), max_ms: *std.atomic.Value(u64), names: *std.ArrayListUnmanaged([]const u8), lock: *std.Thread.Mutex, alloc: std.mem.Allocator) void { 1169 + for (jobs) |job_with_size| { 1170 + const job = job_with_size.job; 1171 + const current = counter.fetchAdd(1, .monotonic) + 1; 1172 + var msg_buf: [256]u8 = undefined; 1173 + const msg_len = std.fmt.bufPrint(&msg_buf, "{s}", .{job.name}) catch continue; 1174 + msg_buf[msg_len.len] = 0; 1175 + pkg_ctx.reportProgress(.linking, current, total, msg_buf[0..msg_len.len :0]); 1176 + const start = std.time.nanoTimestamp(); 1177 + lnk.linkPackage(job) catch {}; 1178 + const delta = std.time.nanoTimestamp() - start; 1179 + const elapsed_ms: u64 = if (delta < 0) 0 else @intCast(@as(u128, @intCast(delta)) / 1_000_000); 1180 + if (elapsed_ms > 100) { 1181 + _ = slow_count.fetchAdd(1, .monotonic); 1182 + lock.lock(); 1183 + const entry = std.fmt.allocPrint(alloc, "{s} {d}ms", .{ job.name, elapsed_ms }) catch null; 1184 + if (entry) |val| { 1185 + names.append(alloc, val) catch {}; 1186 + } 1187 + lock.unlock(); 1188 + var current_max = max_ms.load(.monotonic); 1189 + while (elapsed_ms > current_max) : (current_max = max_ms.load(.monotonic)) { 1190 + if (max_ms.cmpxchgWeak(current_max, elapsed_ms, .monotonic, .monotonic) == null) break; 1191 + } 1192 + } 1193 + } 1194 + } 1195 + }.work, .{ &pkg_linker, phase_jobs[start_idx..end_idx], c, total_jobs, &link_counter, &slow_link_count, &max_link_ms, &slow_link_names, &slow_link_lock, c.allocator }) catch null; 1196 + } 1197 + 1198 + for (&threads) |*t| { 1199 + if (t.*) |thread| thread.join(); 1200 + } 1201 + } else { 1202 + for (phase_jobs) |job_with_size| { 1203 + const job = job_with_size.job; 1204 + const current = link_counter.fetchAdd(1, .monotonic) + 1; 1205 + const msg = std.fmt.allocPrintSentinel(arena_alloc, "{s}", .{job.name}, 0) catch continue; 1206 + c.reportProgress(.linking, current, total_jobs, msg); 1207 + const start = std.time.nanoTimestamp(); 1208 + pkg_linker.linkPackage(job) catch {}; 1209 + const elapsed_ms: u64 = @intCast((@as(u64, @intCast(std.time.nanoTimestamp())) - @as(u64, @intCast(start))) / 1_000_000); 1210 + if (elapsed_ms > 100 and c.options.verbose) { 1211 + debug.log(" link slow: {s} {d}ms", .{ job.name, elapsed_ms }); 1212 + } 1213 + } 1214 + } 1215 + } 1216 + 1217 + if (c.options.verbose) { 1218 + debug.log(" link slow (>100ms): {d} max={d}ms", .{ slow_link_count.load(.monotonic), max_link_ms.load(.monotonic) }); 1219 + for (slow_link_names.items) |entry| { 1220 + debug.log(" link slow: {s}", .{entry}); 1221 + } 1222 + } 1223 + 1224 + for (slow_link_names.items) |entry| c.allocator.free(entry); 1225 + stage_start = debug.timer("link downloads (parallel)", stage_start); 1226 + 1227 + debug.log(" downloaded: {d} success, {d} errors", .{ success_count, error_count }); 1228 + for (interleaved.extract_contexts.items) |ext_ctx| ext_ctx.ext.deinit(); 1229 + 1230 + db.sync(); 1231 + _ = debug.timer("cache sync", stage_start); 1232 + 1233 + const link_stats = pkg_linker.getStats(); 1234 + c.last_install_result = .{ 1235 + .package_count = @intCast(res.resolved.count()), 1236 + .cache_hits = @intCast(interleaved.cache_hits), 1237 + .cache_misses = @intCast(interleaved.tarballs_queued), 1238 + .files_linked = link_stats.files_linked, 1239 + .files_copied = link_stats.files_copied, 1240 + .packages_installed = link_stats.packages_installed, 1241 + .packages_skipped = link_stats.packages_skipped, 1242 + .elapsed_ms = timer.read() / 1_000_000, 1243 + }; 1244 + 1245 + debug.log("total: {d} packages in {d}ms", .{ res.resolved.count(), c.last_install_result.elapsed_ms }); 1246 + 1247 + if (pkg_json.trusted_dependencies.count() > 0) { 1248 + runTrustedPostinstall(c, &pkg_json.trusted_dependencies, std.mem.span(node_modules_path), arena_alloc); 1249 + } 1250 + 1251 + return .ok; 1252 + } 1253 + 1254 + const PostinstallJob = struct { 1255 + pkg_name: []const u8, 1256 + pkg_dir: []const u8, 1257 + script: []const u8, 1258 + child: ?std.process.Child = null, 1259 + exit_code: ?u8 = null, 1260 + stderr: ?[]const u8 = null, 1261 + failed: bool = false, 1262 + }; 1263 + 1264 + fn runTrustedPostinstall( 1265 + ctx: *PkgContext, 1266 + trusted: *std.StringHashMap(void), 1267 + node_modules_path: []const u8, 1268 + allocator: std.mem.Allocator, 1269 + ) void { 1270 + var env_map = std.process.getEnvMap(allocator) catch return; 1271 + defer env_map.deinit(); 1272 + 1273 + const cwd = std.fs.cwd(); 1274 + const abs_nm_path = cwd.realpathAlloc(allocator, node_modules_path) catch return; 1275 + defer allocator.free(abs_nm_path); 1276 + 1277 + const bin_path = std.fmt.allocPrint(allocator, "{s}/.bin", .{abs_nm_path}) catch return; 1278 + defer allocator.free(bin_path); 1279 + 1280 + const current_path = env_map.get("PATH") orelse ""; 1281 + const new_path = if (builtin.os.tag == .windows) 1282 + std.fmt.allocPrint(allocator, "{s};{s}", .{ bin_path, current_path }) catch return 1283 + else 1284 + std.fmt.allocPrint(allocator, "{s}:{s}", .{ bin_path, current_path }) catch return; 1285 + defer allocator.free(new_path); 1286 + 1287 + env_map.put("PATH", new_path) catch return; 1288 + 1289 + var jobs = std.ArrayListUnmanaged(PostinstallJob){}; 1290 + defer { 1291 + for (jobs.items) |*job| if (job.stderr) |s| allocator.free(s); 1292 + jobs.deinit(allocator); 1293 + } 1294 + 1295 + var key_iter = trusted.keyIterator(); 1296 + while (key_iter.next()) |pkg_name_ptr| { 1297 + const pkg_name = pkg_name_ptr.*; 1298 + 1299 + const pkg_json_path = std.fmt.allocPrint(allocator, "{s}/{s}/package.json", .{ 1300 + node_modules_path, pkg_name, 1301 + }) catch continue; 1302 + defer allocator.free(pkg_json_path); 1303 + 1304 + const file = std.fs.cwd().openFile(pkg_json_path, .{}) catch continue; 1305 + defer file.close(); 1306 + 1307 + const content = file.readToEndAlloc(allocator, 1024 * 1024) catch continue; 1308 + defer allocator.free(content); 1309 + 1310 + var doc = json.JsonDoc.parse(content) catch continue; 1311 + defer doc.deinit(); 1312 + 1313 + const root = doc.root(); 1314 + 1315 + if (root.getObject("scripts")) |scripts| { 1316 + const script = scripts.getString("postinstall") orelse 1317 + scripts.getString("install") orelse continue; 1318 + 1319 + if (std.mem.eql(u8, pkg_name, "esbuild")) { 1320 + debug.log("ignoring esbuild lifecycle scripts", .{}); 1321 + continue; 1322 + } 1323 + 1324 + const pkg_dir = std.fmt.allocPrint(allocator, "{s}/{s}", .{ 1325 + node_modules_path, pkg_name, 1326 + }) catch continue; 1327 + 1328 + const marker_path = std.fmt.allocPrint(allocator, "{s}/.postinstall", .{pkg_dir}) catch continue; 1329 + defer allocator.free(marker_path); 1330 + if (std.fs.cwd().access(marker_path, .{})) |_| { 1331 + debug.log("postinstall already done: {s}", .{pkg_name}); 1332 + allocator.free(pkg_dir); 1333 + continue; 1334 + } else |_| {} 1335 + 1336 + jobs.append(allocator, .{ 1337 + .pkg_name = pkg_name, 1338 + .pkg_dir = pkg_dir, 1339 + .script = allocator.dupe(u8, script) catch continue, 1340 + }) catch continue; 1341 + } 1342 + } 1343 + 1344 + if (jobs.items.len == 0) return; 1345 + for (jobs.items) |job| debug.log("starting postinstall: {s}", .{job.pkg_name}); 1346 + 1347 + for (jobs.items, 0..) |*job, i| { 1348 + const msg = std.fmt.allocPrintSentinel(allocator, "{s}", .{job.pkg_name}, 0) catch continue; 1349 + ctx.reportProgress(.postinstall, @intCast(i), @intCast(jobs.items.len), msg); 1350 + debug.log("running postinstall: {s}", .{job.pkg_name}); 1351 + 1352 + const shell_argv: []const []const u8 = if (builtin.os.tag == .windows) 1353 + &[_][]const u8{ "cmd", "/c", job.script } 1354 + else 1355 + &[_][]const u8{ "sh", "-c", job.script }; 1356 + 1357 + var child = std.process.Child.init(shell_argv, allocator); 1358 + child.cwd = job.pkg_dir; 1359 + child.env_map = &env_map; 1360 + child.stderr_behavior = .Pipe; 1361 + child.stdout_behavior = .Pipe; 1362 + 1363 + child.spawn() catch { 1364 + job.failed = true; 1365 + continue; 1366 + }; 1367 + job.child = child; 1368 + } 1369 + 1370 + var scripts_run: u32 = 0; 1371 + for (jobs.items) |*job| { 1372 + if (job.child) |*child| { 1373 + var stdout_buf: std.ArrayList(u8) = .empty; 1374 + var stderr_buf: std.ArrayList(u8) = .empty; 1375 + 1376 + child.collectOutput(allocator, &stdout_buf, &stderr_buf, 1024 * 1024) catch {}; 1377 + 1378 + const term = child.wait() catch { 1379 + stdout_buf.deinit(allocator); 1380 + stderr_buf.deinit(allocator); 1381 + job.failed = true; 1382 + continue; 1383 + }; 1384 + 1385 + if (stdout_buf.items.len > 0) { 1386 + var line_iter = std.mem.splitScalar(u8, stdout_buf.items, '\n'); 1387 + while (line_iter.next()) |line| { 1388 + if (line.len > 0) debug.log(" {s}: {s}", .{ job.pkg_name, line }); 1389 + } 1390 + } stdout_buf.deinit(allocator); 1391 + 1392 + switch (term) { 1393 + .Exited => |code| { 1394 + if (code != 0) { 1395 + job.exit_code = code; 1396 + job.stderr = if (stderr_buf.items.len > 0) stderr_buf.toOwnedSlice(allocator) catch null else null; 1397 + debug.log(" postinstall failed for {s}: exit code {d}", .{ job.pkg_name, code }); 1398 + if (job.stderr) |s| { 1399 + if (s.len > 0) debug.log(" stderr: {s}", .{s}); 1400 + } 1401 + } else { 1402 + stderr_buf.deinit(allocator); 1403 + scripts_run += 1; 1404 + const marker_path = std.fmt.allocPrint(allocator, "{s}/.postinstall", .{job.pkg_dir}) catch continue; 1405 + defer allocator.free(marker_path); 1406 + if (std.fs.cwd().createFile(marker_path, .{})) |f| f.close() else |_| {} 1407 + } 1408 + }, 1409 + .Signal => |sig| { 1410 + job.failed = true; 1411 + debug.log(" postinstall killed by signal {d}: {s}", .{ sig, job.pkg_name }); 1412 + stderr_buf.deinit(allocator); 1413 + }, 1414 + else => { 1415 + job.failed = true; 1416 + stderr_buf.deinit(allocator); 1417 + }, 1418 + } 1419 + } 1420 + } 1421 + 1422 + for (jobs.items) |job| { 1423 + allocator.free(job.pkg_dir); 1424 + allocator.free(job.script); 1425 + } 1426 + 1427 + if (scripts_run > 0) debug.log("ran {d} postinstall scripts", .{scripts_run}); 1428 + } 1429 + 1430 + export fn pkg_add( 1431 + ctx: ?*PkgContext, 1432 + package_json_path: [*:0]const u8, 1433 + package_spec: [*:0]const u8, 1434 + dev: bool, 1435 + ) PkgError { 1436 + const c = ctx orelse return .invalid_argument; 1437 + _ = c.arena_state.reset(.retain_capacity); 1438 + const arena_alloc = c.arena_state.allocator(); 1439 + 1440 + const pkg_json_str = std.mem.span(package_json_path); 1441 + const spec_str = std.mem.span(package_spec); 1442 + 1443 + var pkg_name: []const u8 = spec_str; 1444 + var version_constraint: []const u8 = "latest"; 1445 + 1446 + if (std.mem.indexOf(u8, spec_str, "@")) |at_idx| { 1447 + if (at_idx == 0) { 1448 + if (std.mem.indexOfPos(u8, spec_str, 1, "@")) |second_at| { 1449 + pkg_name = spec_str[0..second_at]; 1450 + version_constraint = spec_str[second_at + 1 ..]; 1451 + } 1452 + } else { 1453 + pkg_name = spec_str[0..at_idx]; 1454 + version_constraint = spec_str[at_idx + 1 ..]; 1455 + } 1456 + } 1457 + 1458 + const http = c.http orelse return .network_error; 1459 + var res = resolver.Resolver.init( 1460 + arena_alloc, 1461 + c.allocator, 1462 + &c.string_pool, 1463 + http, 1464 + c.cache_db, 1465 + if (c.options.registry_url) |url| std.mem.span(url) else "https://registry.npmjs.org", 1466 + &c.metadata_cache, 1467 + ); defer res.deinit(); 1468 + 1469 + const resolved_pkg = res.resolve(pkg_name, version_constraint, 0) catch |err| { 1470 + c.setErrorFmt("Failed to resolve {s}: {}", .{ pkg_name, err }); 1471 + return .resolve_error; 1472 + }; 1473 + 1474 + const content = blk: { 1475 + const file = std.fs.cwd().openFile(pkg_json_str, .{ .mode = .read_only }) catch |err| { 1476 + if (err == error.FileNotFound) break :blk "{}"; 1477 + c.setError("Failed to open package.json"); 1478 + return .io_error; 1479 + }; 1480 + defer file.close(); 1481 + break :blk file.readToEndAlloc(arena_alloc, 10 * 1024 * 1024) catch { 1482 + c.setError("Failed to read package.json"); 1483 + return .io_error; 1484 + }; 1485 + }; 1486 + 1487 + const parsed = std.json.parseFromSlice(std.json.Value, arena_alloc, content, .{}) catch { 1488 + c.setError("Failed to parse package.json"); 1489 + return .invalid_argument; 1490 + }; defer parsed.deinit(); 1491 + 1492 + if (parsed.value != .object) { 1493 + c.setError("Invalid package.json format"); 1494 + return .invalid_argument; 1495 + } 1496 + 1497 + const version_str = resolved_pkg.version.format(arena_alloc) catch { 1498 + return .out_of_memory; 1499 + }; 1500 + 1501 + const version_with_caret = std.fmt.allocPrint(arena_alloc, "^{s}", .{version_str}) catch { 1502 + return .out_of_memory; 1503 + }; 1504 + 1505 + const target_key = if (dev) "devDependencies" else "dependencies"; 1506 + 1507 + var deps = if (parsed.value.object.get(target_key)) |d| 1508 + if (d == .object) d.object else std.json.ObjectMap.init(arena_alloc) 1509 + else std.json.ObjectMap.init(arena_alloc); 1510 + 1511 + deps.put(pkg_name, .{ .string = version_with_caret }) catch { 1512 + return .out_of_memory; 1513 + }; 1514 + 1515 + var writer = json.JsonWriter.init() catch { 1516 + return .out_of_memory; 1517 + }; defer writer.deinit(); 1518 + 1519 + const root_obj = writer.createObject(); 1520 + writer.setRoot(root_obj); 1521 + 1522 + var found_target = false; 1523 + for (parsed.value.object.keys(), parsed.value.object.values()) |key, value| { 1524 + if (std.mem.eql(u8, key, target_key)) { 1525 + found_target = true; 1526 + const deps_obj = writer.createObject(); 1527 + var dep_iter = deps.iterator(); 1528 + while (dep_iter.next()) |entry| { 1529 + if (entry.value_ptr.* == .string) { 1530 + writer.objectAdd(deps_obj, entry.key_ptr.*, writer.createString(entry.value_ptr.string)); 1531 + } 1532 + } writer.objectAdd(root_obj, key, deps_obj); 1533 + } else { 1534 + const json_val = jsonValueToMut(&writer, value) catch continue; 1535 + writer.objectAdd(root_obj, key, json_val); 1536 + } 1537 + } 1538 + 1539 + if (!found_target) { 1540 + const deps_obj = writer.createObject(); 1541 + writer.objectAdd(deps_obj, pkg_name, writer.createString(version_with_caret)); 1542 + writer.objectAdd(root_obj, target_key, deps_obj); 1543 + } 1544 + 1545 + const pkg_json_z = arena_alloc.dupeZ(u8, pkg_json_str) catch { 1546 + return .out_of_memory; 1547 + }; 1548 + 1549 + writer.writeToFile(pkg_json_z) catch { 1550 + c.setError("Failed to write package.json"); 1551 + return .io_error; 1552 + }; 1553 + 1554 + return .ok; 1555 + } 1556 + 1557 + fn jsonValueToMut(writer: *json.JsonWriter, value: std.json.Value) !*json.yyjson.yyjson_mut_val { 1558 + return switch (value) { 1559 + .null => writer.createNull(), 1560 + .bool => |b| writer.createBool(b), 1561 + .integer => |i| writer.createInt(i), 1562 + .float => |f| writer.createReal(f), 1563 + .string => |s| writer.createString(s), 1564 + .array => |arr| blk: { 1565 + const json_arr = writer.createArray(); 1566 + for (arr.items) |item| { 1567 + const item_val = try jsonValueToMut(writer, item); 1568 + writer.arrayAppend(json_arr, item_val); 1569 + } 1570 + break :blk json_arr; 1571 + }, 1572 + .object => |obj| blk: { 1573 + const json_obj = writer.createObject(); 1574 + for (obj.keys(), obj.values()) |k, v| { 1575 + const v_mut = try jsonValueToMut(writer, v); 1576 + writer.objectAdd(json_obj, k, v_mut); 1577 + } 1578 + break :blk json_obj; 1579 + }, 1580 + .number_string => |s| writer.createString(s), 1581 + }; 1582 + } 1583 + 1584 + export fn pkg_remove( 1585 + ctx: ?*PkgContext, 1586 + package_json_path: [*:0]const u8, 1587 + package_name: [*:0]const u8, 1588 + ) PkgError { 1589 + const c = ctx orelse return .invalid_argument; 1590 + _ = c.arena_state.reset(.retain_capacity); 1591 + const arena_alloc = c.arena_state.allocator(); 1592 + 1593 + const pkg_json_str = std.mem.span(package_json_path); 1594 + const name_str = std.mem.span(package_name); 1595 + 1596 + const content = std.fs.cwd().readFileAlloc(arena_alloc, pkg_json_str, 10 * 1024 * 1024) catch { 1597 + c.setError("Failed to read package.json"); 1598 + return .io_error; 1599 + }; 1600 + 1601 + const parsed = std.json.parseFromSlice(std.json.Value, arena_alloc, content, .{}) catch { 1602 + c.setError("Failed to parse package.json"); 1603 + return .invalid_argument; 1604 + }; 1605 + defer parsed.deinit(); 1606 + 1607 + if (parsed.value != .object) { 1608 + c.setError("Invalid package.json format"); 1609 + return .invalid_argument; 1610 + } 1611 + 1612 + const dep_keys = [_][]const u8{ 1613 + "dependencies", 1614 + "devDependencies", 1615 + "peerDependencies", 1616 + "optionalDependencies" 1617 + }; 1618 + 1619 + const found = found: { 1620 + for (dep_keys) |dep_key| { 1621 + const deps = parsed.value.object.get(dep_key) orelse continue; 1622 + if (deps != .object) continue; 1623 + if (deps.object.get(name_str) != null) break :found true; 1624 + } 1625 + break :found false; 1626 + }; 1627 + 1628 + if (!found) { 1629 + c.setErrorFmt("Package {s} not found in dependencies", .{name_str}); 1630 + return .not_found; 1631 + } 1632 + 1633 + var writer = json.JsonWriter.init() catch return .out_of_memory; 1634 + defer writer.deinit(); 1635 + 1636 + const root_obj = writer.createObject(); 1637 + writer.setRoot(root_obj); 1638 + 1639 + for (parsed.value.object.keys(), parsed.value.object.values()) |key, value| { 1640 + const is_dep_obj = for (dep_keys) |dk| { 1641 + if (std.mem.eql(u8, key, dk)) break true; 1642 + } else false; 1643 + 1644 + if (!is_dep_obj or value != .object) { 1645 + const val_mut = jsonValueToMut(&writer, value) catch continue; 1646 + writer.objectAdd(root_obj, key, val_mut); 1647 + continue; 1648 + } 1649 + 1650 + const filtered_obj = writer.createObject(); 1651 + for (value.object.keys(), value.object.values()) |dk, dv| { 1652 + if (std.mem.eql(u8, dk, name_str)) continue; 1653 + const dv_mut = jsonValueToMut(&writer, dv) catch continue; 1654 + writer.objectAdd(filtered_obj, dk, dv_mut); 1655 + } 1656 + writer.objectAdd(root_obj, key, filtered_obj); 1657 + } 1658 + 1659 + const pkg_json_z = arena_alloc.dupeZ(u8, pkg_json_str) catch return .out_of_memory; 1660 + writer.writeToFile(pkg_json_z) catch { 1661 + c.setError("Failed to write package.json"); 1662 + return .io_error; 1663 + }; 1664 + 1665 + return .ok; 1666 + } 1667 + 1668 + export fn pkg_error_string(ctx: ?*const PkgContext) [*:0]const u8 { 1669 + if (ctx) |c| if (c.last_error) |e| return e.ptr; 1670 + return "Unknown error"; 1671 + } 1672 + 1673 + export fn pkg_cache_sync(ctx: ?*PkgContext) void { 1674 + if (ctx) |c| if (c.cache_db) |db| db.sync(); 1675 + } 1676 + 1677 + export fn pkg_cache_stats(ctx: ?*PkgContext, out: *CacheStats) PkgError { 1678 + const c = ctx orelse return .invalid_argument; 1679 + const db = c.cache_db orelse return .cache_error; 1680 + 1681 + const stats = db.stats() catch return .cache_error; 1682 + out.* = .{ 1683 + .total_size = stats.cache_size, 1684 + .db_size = stats.db_size, 1685 + .package_count = @intCast(stats.entries), 1686 + }; 1687 + 1688 + return .ok; 1689 + } 1690 + 1691 + export fn pkg_cache_prune(ctx: ?*PkgContext, max_age_days: u32) i32 { 1692 + const c = ctx orelse return @intFromEnum(PkgError.invalid_argument); 1693 + const db = c.cache_db orelse return @intFromEnum(PkgError.cache_error); 1694 + 1695 + const pruned = db.prune(max_age_days) catch return @intFromEnum(PkgError.cache_error); 1696 + return @intCast(pruned); 1697 + } 1698 + 1699 + export fn pkg_get_bin_path( 1700 + node_modules_path: [*:0]const u8, 1701 + bin_name: [*:0]const u8, 1702 + out_path: [*]u8, 1703 + out_path_len: usize, 1704 + ) c_int { 1705 + const nm_path = std.mem.span(node_modules_path); 1706 + const name = std.mem.span(bin_name); 1707 + 1708 + var path_buf: [std.fs.max_path_bytes]u8 = undefined; 1709 + const bin_path = std.fmt.bufPrint(&path_buf, "{s}/.bin/{s}", .{ nm_path, name }) catch return -1; 1710 + 1711 + std.fs.cwd().access(bin_path, .{}) catch return -1; 1712 + 1713 + var real_path_buf: [std.fs.max_path_bytes]u8 = undefined; 1714 + const real_path = std.fs.cwd().realpath(bin_path, &real_path_buf) catch return -1; 1715 + 1716 + if (real_path.len >= out_path_len) return -1; 1717 + 1718 + @memcpy(out_path[0..real_path.len], real_path); 1719 + out_path[real_path.len] = 0; 1720 + 1721 + return @intCast(real_path.len); 1722 + } 1723 + 1724 + export fn pkg_list_bins( 1725 + node_modules_path: [*:0]const u8, 1726 + callback: ?*const fn ([*:0]const u8, ?*anyopaque) callconv(.c) void, 1727 + user_data: ?*anyopaque, 1728 + ) c_int { 1729 + const nm_path = std.mem.span(node_modules_path); 1730 + 1731 + var path_buf: [std.fs.max_path_bytes]u8 = undefined; 1732 + const bin_dir_path = std.fmt.bufPrint(&path_buf, "{s}/.bin", .{nm_path}) catch return -1; 1733 + 1734 + var dir = std.fs.cwd().openDir(bin_dir_path, .{ .iterate = true }) catch return -1; 1735 + defer dir.close(); 1736 + 1737 + var count: c_int = 0; 1738 + var iter = dir.iterate(); 1739 + while (iter.next() catch null) |entry| { 1740 + if (entry.kind == .sym_link or entry.kind == .file) { 1741 + if (callback) |cb| { 1742 + var name_buf: [256]u8 = undefined; 1743 + if (entry.name.len < name_buf.len) { 1744 + @memcpy(name_buf[0..entry.name.len], entry.name); 1745 + name_buf[entry.name.len] = 0; 1746 + cb(@ptrCast(&name_buf), user_data); 1747 + } 1748 + } 1749 + count += 1; 1750 + } 1751 + } 1752 + 1753 + return count; 1754 + } 1755 + 1756 + export fn pkg_list_package_bins( 1757 + node_modules_path: [*:0]const u8, 1758 + package_name: [*:0]const u8, 1759 + callback: ?*const fn ([*:0]const u8, ?*anyopaque) callconv(.c) void, 1760 + user_data: ?*anyopaque, 1761 + ) c_int { 1762 + const nm_path = std.mem.span(node_modules_path); 1763 + const pkg_name = std.mem.span(package_name); 1764 + 1765 + var path_buf: [std.fs.max_path_bytes]u8 = undefined; 1766 + const pkg_json_path = std.fmt.bufPrint(&path_buf, "{s}/{s}/package.json", .{ nm_path, pkg_name }) catch return -1; 1767 + 1768 + const file = std.fs.cwd().openFile(pkg_json_path, .{}) catch return -1; 1769 + defer file.close(); 1770 + 1771 + const content = file.readToEndAlloc(global_allocator, 1024 * 1024) catch return -1; 1772 + defer global_allocator.free(content); 1773 + 1774 + var doc = json.JsonDoc.parse(content) catch return -1; 1775 + defer doc.deinit(); 1776 + 1777 + const root_val = doc.root(); 1778 + var count: c_int = 0; 1779 + 1780 + if (root_val.getObject("bin")) |bin_obj| { 1781 + var iter = bin_obj.objectIterator() orelse return 0; 1782 + while (iter.next()) |entry| { 1783 + if (callback) |cb| { 1784 + var name_buf: [256]u8 = undefined; 1785 + if (entry.key.len < name_buf.len) { 1786 + @memcpy(name_buf[0..entry.key.len], entry.key); 1787 + name_buf[entry.key.len] = 0; 1788 + cb(@ptrCast(&name_buf), user_data); 1789 + } 1790 + } 1791 + count += 1; 1792 + } 1793 + } else if (root_val.getString("bin")) |_| { 1794 + const simple_name = if (std.mem.indexOf(u8, pkg_name, "/")) |slash| pkg_name[slash + 1 ..] 1795 + else pkg_name; 1796 + 1797 + if (callback) |cb| { 1798 + var name_buf: [256]u8 = undefined; 1799 + if (simple_name.len < name_buf.len) { 1800 + @memcpy(name_buf[0..simple_name.len], simple_name); 1801 + name_buf[simple_name.len] = 0; 1802 + cb(@ptrCast(&name_buf), user_data); 1803 + } 1804 + } 1805 + count = 1; 1806 + } 1807 + 1808 + return count; 1809 + } 1810 + 1811 + export fn pkg_get_script( 1812 + package_json_path: [*:0]const u8, 1813 + script_name: [*:0]const u8, 1814 + out_script: [*]u8, 1815 + out_script_len: usize, 1816 + ) c_int { 1817 + const allocator = global_allocator; 1818 + const name = std.mem.span(script_name); 1819 + 1820 + var doc = json.JsonDoc.parseFile(std.mem.span(package_json_path)) catch return -1; 1821 + defer doc.deinit(); 1822 + const root_val = doc.root(); 1823 + 1824 + if (root_val.getObject("scripts")) |scripts_obj| { 1825 + if (scripts_obj.getString(std.mem.span(script_name))) |script| { 1826 + if (script.len >= out_script_len) return -1; 1827 + @memcpy(out_script[0..script.len], script); 1828 + out_script[script.len] = 0; 1829 + return @intCast(script.len); 1830 + } 1831 + } 1832 + 1833 + if (std.mem.eql(u8, name, "start")) { 1834 + if (root_val.getString("main")) |main_file| { 1835 + const script = std.fmt.allocPrint(allocator, "ant {s}", .{main_file}) catch return -1; 1836 + defer allocator.free(script); 1837 + if (script.len >= out_script_len) return -1; 1838 + @memcpy(out_script[0..script.len], script); 1839 + out_script[script.len] = 0; 1840 + return @intCast(script.len); 1841 + } 1842 + 1843 + if (std.fs.cwd().access("server.js", .{})) |_| { 1844 + const script = "ant server.js"; 1845 + if (script.len >= out_script_len) return -1; 1846 + @memcpy(out_script[0..script.len], script); 1847 + out_script[script.len] = 0; 1848 + return @intCast(script.len); 1849 + } else |_| {} 1850 + } 1851 + 1852 + return -1; 1853 + } 1854 + 1855 + pub const ScriptResult = extern struct { 1856 + exit_code: c_int, 1857 + signal: c_int, 1858 + }; 1859 + 1860 + fn runScriptCommand( 1861 + allocator: std.mem.Allocator, 1862 + script: []const u8, 1863 + extra_args: ?[*:0]const u8, 1864 + env_map: *std.process.EnvMap, 1865 + ) !ScriptResult { 1866 + const final_script = if (extra_args) |args| blk: { 1867 + const args_str = std.mem.span(args); 1868 + if (args_str.len > 0) { 1869 + break :blk try std.fmt.allocPrint(allocator, "{s} {s}", .{ script, args_str }); 1870 + } 1871 + break :blk try allocator.dupe(u8, script); 1872 + } else try allocator.dupe(u8, script); 1873 + defer allocator.free(final_script); 1874 + 1875 + const script_z = try allocator.dupeZ(u8, final_script); 1876 + defer allocator.free(script_z); 1877 + 1878 + const shell_argv: []const []const u8 = if (builtin.os.tag == .windows) 1879 + &[_][]const u8{ "cmd", "/c", script_z } 1880 + else &[_][]const u8{ "sh", "-c", script_z }; 1881 + 1882 + var child = std.process.Child.init(shell_argv, allocator); 1883 + child.env_map = env_map; 1884 + 1885 + try child.spawn(); 1886 + const term = try child.wait(); 1887 + 1888 + return switch (term) { 1889 + .Exited => |code| .{ .exit_code = code, .signal = 0 }, 1890 + .Signal => |sig| .{ .exit_code = -1, .signal = @intCast(sig) }, 1891 + else => .{ .exit_code = -1, .signal = 0 }, 1892 + }; 1893 + } 1894 + 1895 + export fn pkg_run_script( 1896 + package_json_path: [*:0]const u8, 1897 + script_name: [*:0]const u8, 1898 + node_modules_path: [*:0]const u8, 1899 + extra_args: ?[*:0]const u8, 1900 + result: ?*ScriptResult, 1901 + ) PkgError { 1902 + const allocator = global_allocator; 1903 + const name = std.mem.span(script_name); 1904 + 1905 + var doc = json.JsonDoc.parseFile(std.mem.span(package_json_path)) catch return .io_error; 1906 + defer doc.deinit(); const root_val = doc.root(); 1907 + 1908 + var script_buf: [8192]u8 = undefined; 1909 + const script_len = pkg_get_script(package_json_path, script_name, &script_buf, script_buf.len); 1910 + 1911 + if (script_len < 0) return .not_found; 1912 + const script = script_buf[0..@intCast(script_len)]; 1913 + 1914 + var pre_script: ?[]const u8 = null; 1915 + var post_script: ?[]const u8 = null; 1916 + 1917 + if (root_val.getObject("scripts")) |scripts_obj| { 1918 + var pre_key_buf: [256]u8 = undefined; 1919 + var post_key_buf: [256]u8 = undefined; 1920 + 1921 + const pre_key = std.fmt.bufPrintZ(&pre_key_buf, "pre{s}", .{name}) catch null; 1922 + const post_key = std.fmt.bufPrintZ(&post_key_buf, "post{s}", .{name}) catch null; 1923 + 1924 + if (pre_key) |pk| pre_script = scripts_obj.getString(pk); 1925 + if (post_key) |pk| post_script = scripts_obj.getString(pk); 1926 + } 1927 + 1928 + var env_map = std.process.getEnvMap(allocator) catch return .out_of_memory; 1929 + defer env_map.deinit(); const nm_path = std.mem.span(node_modules_path); 1930 + 1931 + const cwd = std.fs.cwd(); 1932 + const abs_nm_path = cwd.realpathAlloc(allocator, nm_path) catch nm_path; 1933 + defer if (abs_nm_path.ptr != nm_path.ptr) allocator.free(abs_nm_path); 1934 + 1935 + const bin_path = std.fmt.allocPrint(allocator, "{s}/.bin", .{abs_nm_path}) catch return .out_of_memory; 1936 + defer allocator.free(bin_path); 1937 + 1938 + const current_path = env_map.get("PATH") orelse ""; 1939 + const new_path = if (builtin.os.tag == .windows) 1940 + std.fmt.allocPrint(allocator, "{s};{s}", .{ bin_path, current_path }) catch return .out_of_memory 1941 + else 1942 + std.fmt.allocPrint(allocator, "{s}:{s}", .{ bin_path, current_path }) catch return .out_of_memory; 1943 + defer allocator.free(new_path); 1944 + 1945 + env_map.put("PATH", new_path) catch return .out_of_memory; 1946 + env_map.put("npm_lifecycle_event", name) catch {}; 1947 + 1948 + if (root_val.getObject("config")) |config_obj| { 1949 + if (config_obj.objectIterator()) |*config_iter_ptr| { 1950 + var config_iter = config_iter_ptr.*; 1951 + while (config_iter.next()) |entry| { 1952 + if (entry.value.asString()) |value| { 1953 + const env_key = std.fmt.allocPrint(allocator, "npm_package_config_{s}", .{entry.key}) catch continue; 1954 + defer allocator.free(env_key); 1955 + env_map.put(env_key, value) catch {}; 1956 + } 1957 + } 1958 + } 1959 + } 1960 + 1961 + if (root_val.getString("name")) |pkg_name| env_map.put("npm_package_name", pkg_name) catch {}; 1962 + if (root_val.getString("version")) |pkg_version| env_map.put("npm_package_version", pkg_version) catch {}; 1963 + 1964 + if (pre_script) |pre| { 1965 + const pre_event = std.fmt.allocPrint(allocator, "pre{s}", .{name}) catch name; 1966 + defer if (pre_event.ptr != name.ptr) allocator.free(pre_event); 1967 + env_map.put("npm_lifecycle_event", pre_event) catch {}; 1968 + const pre_result = runScriptCommand(allocator, pre, null, &env_map) catch return .io_error; 1969 + if (pre_result.exit_code != 0) { 1970 + if (result) |r| r.* = pre_result; 1971 + return .ok; 1972 + } 1973 + } 1974 + 1975 + env_map.put("npm_lifecycle_event", name) catch {}; 1976 + const main_result = runScriptCommand(allocator, script, extra_args, &env_map) catch return .io_error; 1977 + 1978 + if (main_result.exit_code != 0) { 1979 + if (result) |r| r.* = main_result; 1980 + return .ok; 1981 + } 1982 + 1983 + if (post_script) |post| { 1984 + const post_event = std.fmt.allocPrint(allocator, "post{s}", .{name}) catch name; 1985 + defer if (post_event.ptr != name.ptr) allocator.free(post_event); 1986 + env_map.put("npm_lifecycle_event", post_event) catch {}; 1987 + const post_result = runScriptCommand(allocator, post, null, &env_map) catch return .io_error; 1988 + if (result) |r| r.* = post_result; 1989 + return .ok; 1990 + } 1991 + 1992 + if (result) |r| r.* = main_result; 1993 + return .ok; 1994 + } 1995 + 1996 + pub const DepType = packed struct(u8) { 1997 + peer: bool = false, 1998 + dev: bool = false, 1999 + optional: bool = false, 2000 + direct: bool = false, 2001 + _reserved: u4 = 0, 2002 + }; 2003 + 2004 + pub const DepCallback = ?*const fn ( 2005 + name: [*:0]const u8, 2006 + version: [*:0]const u8, 2007 + constraint: [*:0]const u8, 2008 + dep_type: DepType, 2009 + user_data: ?*anyopaque, 2010 + ) callconv(.c) void; 2011 + 2012 + pub const WhyInfo = extern struct { 2013 + target_version: [64]u8, 2014 + found: bool, 2015 + is_peer: bool, 2016 + is_dev: bool, 2017 + is_direct: bool, 2018 + }; 2019 + 2020 + export fn pkg_why_info( 2021 + lockfile_path: [*:0]const u8, 2022 + package_name: [*:0]const u8, 2023 + out: *WhyInfo, 2024 + ) c_int { 2025 + const lf = lockfile.Lockfile.open(std.mem.span(lockfile_path)) catch return -1; 2026 + defer @constCast(&lf).close(); 2027 + 2028 + const target_name = std.mem.span(package_name); 2029 + out.found = false; 2030 + out.is_peer = false; 2031 + out.is_dev = false; 2032 + out.is_direct = false; 2033 + @memset(&out.target_version, 0); 2034 + 2035 + for (lf.packages) |*pkg| { 2036 + const pkg_name = pkg.name.slice(lf.string_table); 2037 + if (std.mem.eql(u8, pkg_name, target_name)) { 2038 + const ver_str = pkg.versionString(global_allocator, lf.string_table) catch return -1; 2039 + defer global_allocator.free(ver_str); 2040 + if (ver_str.len < out.target_version.len) { 2041 + @memcpy(out.target_version[0..ver_str.len], ver_str); 2042 + out.target_version[ver_str.len] = 0; 2043 + } 2044 + out.found = true; 2045 + out.is_dev = pkg.flags.dev; 2046 + out.is_direct = pkg.flags.direct; 2047 + } 2048 + 2049 + const deps = lf.getPackageDeps(pkg); 2050 + for (deps) |dep| { 2051 + const dep_pkg = &lf.packages[dep.package_index]; 2052 + const dep_name = dep_pkg.name.slice(lf.string_table); 2053 + if (std.mem.eql(u8, dep_name, target_name) and dep.flags.peer) { 2054 + out.is_peer = true; 2055 + } 2056 + } 2057 + } 2058 + return 0; 2059 + } 2060 + 2061 + export fn pkg_why( 2062 + lockfile_path: [*:0]const u8, 2063 + package_name: [*:0]const u8, 2064 + callback: DepCallback, 2065 + user_data: ?*anyopaque, 2066 + ) c_int { 2067 + const lf = lockfile.Lockfile.open(std.mem.span(lockfile_path)) catch return -1; 2068 + defer @constCast(&lf).close(); 2069 + 2070 + const target_name = std.mem.span(package_name); 2071 + var count: c_int = 0; 2072 + 2073 + var name_buf: [512]u8 = undefined; 2074 + var ver_buf: [64]u8 = undefined; 2075 + var constraint_buf: [128]u8 = undefined; 2076 + 2077 + for (lf.packages) |*pkg| { 2078 + const deps = lf.getPackageDeps(pkg); 2079 + for (deps) |dep| { 2080 + const dep_pkg = &lf.packages[dep.package_index]; 2081 + const dep_name = dep_pkg.name.slice(lf.string_table); 2082 + 2083 + if (std.mem.eql(u8, dep_name, target_name)) { 2084 + const pkg_name = pkg.name.slice(lf.string_table); 2085 + const constraint = dep.constraint.slice(lf.string_table); 2086 + 2087 + if (callback) |cb| { 2088 + if (pkg_name.len < name_buf.len) { 2089 + @memcpy(name_buf[0..pkg_name.len], pkg_name); 2090 + name_buf[pkg_name.len] = 0; 2091 + 2092 + const ver_str = pkg.versionString(global_allocator, lf.string_table) catch continue; 2093 + defer global_allocator.free(ver_str); 2094 + if (ver_str.len < ver_buf.len) { 2095 + @memcpy(ver_buf[0..ver_str.len], ver_str); 2096 + ver_buf[ver_str.len] = 0; 2097 + 2098 + if (constraint.len < constraint_buf.len) { 2099 + @memcpy(constraint_buf[0..constraint.len], constraint); 2100 + constraint_buf[constraint.len] = 0; 2101 + 2102 + const dep_type = DepType{ 2103 + .peer = dep.flags.peer, 2104 + .dev = dep.flags.dev or pkg.flags.dev, 2105 + .optional = dep.flags.optional, 2106 + .direct = pkg.flags.direct, 2107 + }; 2108 + cb(@ptrCast(&name_buf), @ptrCast(&ver_buf), @ptrCast(&constraint_buf), dep_type, user_data); 2109 + } 2110 + } 2111 + } 2112 + } 2113 + count += 1; 2114 + } 2115 + } 2116 + } 2117 + 2118 + for (lf.packages) |*pkg| { 2119 + const pkg_name = pkg.name.slice(lf.string_table); 2120 + if (std.mem.eql(u8, pkg_name, target_name) and pkg.flags.direct) { 2121 + if (callback) |cb| { 2122 + const direct_str = "package.json"; 2123 + var direct_buf: [16]u8 = undefined; 2124 + @memcpy(direct_buf[0..direct_str.len], direct_str); 2125 + direct_buf[direct_str.len] = 0; 2126 + 2127 + var empty_buf: [1]u8 = .{0}; 2128 + var dep_buf: [16]u8 = undefined; 2129 + const constraint_str = "dependencies"; 2130 + @memcpy(dep_buf[0..constraint_str.len], constraint_str); 2131 + dep_buf[constraint_str.len] = 0; 2132 + 2133 + const dep_type = DepType{ 2134 + .peer = false, 2135 + .dev = pkg.flags.dev, 2136 + .optional = false, 2137 + .direct = true, 2138 + }; 2139 + cb(@ptrCast(&direct_buf), @ptrCast(&empty_buf), @ptrCast(&dep_buf), dep_type, user_data); 2140 + } 2141 + count += 1; 2142 + } 2143 + } 2144 + 2145 + return count; 2146 + } 2147 + 2148 + export fn pkg_list_scripts( 2149 + package_json_path: [*:0]const u8, 2150 + callback: ?*const fn ([*:0]const u8, [*:0]const u8, ?*anyopaque) callconv(.c) void, 2151 + user_data: ?*anyopaque, 2152 + ) c_int { 2153 + var doc = json.JsonDoc.parseFile(std.mem.span(package_json_path)) catch return -1; 2154 + defer doc.deinit(); 2155 + 2156 + const root_val = doc.root(); 2157 + const scripts_obj = root_val.getObject("scripts") orelse return 0; 2158 + 2159 + var iter = scripts_obj.objectIterator() orelse return 0; 2160 + defer iter.deinit(); 2161 + 2162 + var count: c_int = 0; 2163 + while (iter.next()) |entry| { 2164 + if (callback) |cb| { 2165 + var name_buf: [256]u8 = undefined; 2166 + var cmd_buf: [4096]u8 = undefined; 2167 + 2168 + if (entry.key.len < name_buf.len) { 2169 + @memcpy(name_buf[0..entry.key.len], entry.key); 2170 + name_buf[entry.key.len] = 0; 2171 + 2172 + if (entry.value.asString()) |cmd| { 2173 + if (cmd.len < cmd_buf.len) { 2174 + @memcpy(cmd_buf[0..cmd.len], cmd); 2175 + cmd_buf[cmd.len] = 0; 2176 + cb(@ptrCast(&name_buf), @ptrCast(&cmd_buf), user_data); 2177 + } 2178 + } 2179 + } 2180 + } 2181 + count += 1; 2182 + } 2183 + 2184 + return count; 2185 + } 2186 + 2187 + export fn pkg_info( 2188 + ctx: ?*PkgContext, 2189 + package_spec: [*:0]const u8, 2190 + out: *PkgInfo, 2191 + ) PkgError { 2192 + const c = ctx orelse return .invalid_argument; 2193 + c.clearInfo(); 2194 + _ = c.arena_state.reset(.retain_capacity); 2195 + const arena = c.arena_state.allocator(); 2196 + 2197 + const http = c.http orelse return .network_error; 2198 + const spec = std.mem.span(package_spec); 2199 + 2200 + var name: []const u8 = spec; 2201 + var requested_version: ?[]const u8 = null; 2202 + if (std.mem.lastIndexOf(u8, spec, "@")) |at_idx| { 2203 + if (at_idx > 0) { 2204 + name = spec[0..at_idx]; 2205 + requested_version = spec[at_idx + 1..]; 2206 + } 2207 + } 2208 + 2209 + const data = http.fetchMetadataFull(name, true, c.allocator) catch |err| { 2210 + c.setErrorFmt("Failed to fetch package info: {}", .{err}); 2211 + return .network_error; 2212 + }; defer c.allocator.free(data); 2213 + 2214 + var doc = json.JsonDoc.parse(data) catch { 2215 + c.setError("Failed to parse package metadata"); 2216 + return .resolve_error; 2217 + }; 2218 + 2219 + defer doc.deinit(); 2220 + const root = doc.root(); 2221 + 2222 + const versions_obj = root.getObject("versions") orelse { 2223 + c.setError("No versions found"); 2224 + return .not_found; 2225 + }; 2226 + 2227 + var versions_iter = versions_obj.objectIterator() orelse return .resolve_error; 2228 + defer versions_iter.deinit(); 2229 + var version_count: u32 = 0; 2230 + while (versions_iter.next()) |_| version_count += 1; 2231 + 2232 + var version_str: []const u8 = ""; 2233 + if (requested_version) |rv| { 2234 + version_str = rv; 2235 + } else if (root.getObject("dist-tags")) |tags| { 2236 + version_str = tags.getString("latest") orelse ""; 2237 + } 2238 + 2239 + const version_z = arena.dupeZ(u8, version_str) catch return .out_of_memory; 2240 + 2241 + const version_obj = versions_obj.getObject(version_z) orelse { 2242 + c.setErrorFmt("Version {s} not found", .{version_str}); 2243 + return .not_found; 2244 + }; 2245 + 2246 + var dep_count: u32 = 0; 2247 + if (version_obj.getObject("dependencies")) |deps| { 2248 + var deps_iter = deps.objectIterator() orelse return .resolve_error; 2249 + defer deps_iter.deinit(); 2250 + while (deps_iter.next()) |entry| { 2251 + dep_count += 1; 2252 + if (entry.value.asString()) |ver| { 2253 + c.info_dependencies.append(c.allocator, .{ 2254 + .name = c.storeInfoString(entry.key) catch continue, 2255 + .version = c.storeInfoString(ver) catch continue, 2256 + }) catch continue; 2257 + } 2258 + } 2259 + } 2260 + 2261 + const dist = version_obj.getObject("dist"); 2262 + const unpacked_size: u64 = if (dist) |d| @as(u64, @intCast(d.getInt("unpackedSize") orelse 0)) else 0; 2263 + 2264 + var keywords_buf = std.ArrayListUnmanaged(u8){}; 2265 + defer keywords_buf.deinit(c.allocator); 2266 + if (version_obj.getArray("keywords")) |kw_arr| { 2267 + var kw_iter = kw_arr.arrayIterator() orelse return .resolve_error; 2268 + defer kw_iter.deinit(); 2269 + var first = true; 2270 + while (kw_iter.next()) |kw_val| { 2271 + if (kw_val.asString()) |kw| { 2272 + if (!first) keywords_buf.appendSlice(c.allocator, ", ") catch {}; 2273 + keywords_buf.appendSlice(c.allocator, kw) catch {}; 2274 + first = false; 2275 + } 2276 + } 2277 + } 2278 + 2279 + out.* = .{ 2280 + .name = c.storeInfoString(root.getString("name") orelse name) catch return .out_of_memory, 2281 + .version = c.storeInfoString(version_str) catch return .out_of_memory, 2282 + .description = c.storeInfoString(version_obj.getString("description") orelse "") catch return .out_of_memory, 2283 + .license = c.storeInfoString(version_obj.getString("license") orelse "") catch return .out_of_memory, 2284 + .homepage = c.storeInfoString(version_obj.getString("homepage") orelse "") catch return .out_of_memory, 2285 + .tarball = c.storeInfoString(if (dist) |d| d.getString("tarball") orelse "" else "") catch return .out_of_memory, 2286 + .shasum = c.storeInfoString(if (dist) |d| d.getString("shasum") orelse "" else "") catch return .out_of_memory, 2287 + .integrity = c.storeInfoString(if (dist) |d| d.getString("integrity") orelse "" else "") catch return .out_of_memory, 2288 + .keywords = c.storeInfoString(keywords_buf.items) catch return .out_of_memory, 2289 + .published = c.storeInfoString(if (root.getObject("time")) |t| t.getString(version_z) orelse "" else "") catch return .out_of_memory, 2290 + .dep_count = dep_count, 2291 + .version_count = version_count, 2292 + .unpacked_size = unpacked_size, 2293 + }; 2294 + 2295 + if (root.getObject("dist-tags")) |tags| { 2296 + var tags_iter = tags.objectIterator() orelse return .ok; 2297 + defer tags_iter.deinit(); 2298 + while (tags_iter.next()) |entry| { 2299 + if (entry.value.asString()) |ver| { 2300 + c.info_dist_tags.append(c.allocator, .{ 2301 + .tag = c.storeInfoString(entry.key) catch continue, 2302 + .version = c.storeInfoString(ver) catch continue, 2303 + }) catch continue; 2304 + } 2305 + } 2306 + } 2307 + 2308 + if (root.getArray("maintainers")) |maint_arr| { 2309 + var maint_iter = maint_arr.arrayIterator() orelse return .ok; 2310 + defer maint_iter.deinit(); 2311 + while (maint_iter.next()) |maint_val| { 2312 + const maint_name = maint_val.getString("name") orelse continue; 2313 + const maint_email = maint_val.getString("email") orelse ""; 2314 + c.info_maintainers.append(c.allocator, .{ 2315 + .name = c.storeInfoString(maint_name) catch continue, 2316 + .email = c.storeInfoString(maint_email) catch continue, 2317 + }) catch continue; 2318 + } 2319 + } 2320 + 2321 + return .ok; 2322 + } 2323 + 2324 + export fn pkg_info_dist_tag_count(ctx: ?*const PkgContext) u32 { 2325 + const c = ctx orelse return 0; 2326 + return @intCast(c.info_dist_tags.items.len); 2327 + } 2328 + 2329 + export fn pkg_info_get_dist_tag(ctx: ?*const PkgContext, index: u32, out: *DistTag) PkgError { 2330 + const c = ctx orelse return .invalid_argument; 2331 + if (index >= c.info_dist_tags.items.len) return .invalid_argument; 2332 + out.* = c.info_dist_tags.items[index]; 2333 + return .ok; 2334 + } 2335 + 2336 + export fn pkg_info_maintainer_count(ctx: ?*const PkgContext) u32 { 2337 + const c = ctx orelse return 0; 2338 + return @intCast(c.info_maintainers.items.len); 2339 + } 2340 + 2341 + export fn pkg_info_get_maintainer(ctx: ?*const PkgContext, index: u32, out: *Maintainer) PkgError { 2342 + const c = ctx orelse return .invalid_argument; 2343 + if (index >= c.info_maintainers.items.len) return .invalid_argument; 2344 + out.* = c.info_maintainers.items[index]; 2345 + return .ok; 2346 + } 2347 + 2348 + export fn pkg_info_dependency_count(ctx: ?*const PkgContext) u32 { 2349 + const c = ctx orelse return 0; 2350 + return @intCast(c.info_dependencies.items.len); 2351 + } 2352 + 2353 + export fn pkg_info_get_dependency(ctx: ?*const PkgContext, index: u32, out: *Dependency) PkgError { 2354 + const c = ctx orelse return .invalid_argument; 2355 + if (index >= c.info_dependencies.items.len) return .invalid_argument; 2356 + out.* = c.info_dependencies.items[index]; 2357 + return .ok; 2358 + } 2359 + 2360 + export fn pkg_exec_temp( 2361 + ctx: ?*PkgContext, 2362 + package_spec: [*:0]const u8, 2363 + out_bin_path: [*]u8, 2364 + out_bin_path_len: usize, 2365 + ) PkgError { 2366 + const c = ctx orelse return .invalid_argument; 2367 + _ = c.arena_state.reset(.retain_capacity); 2368 + const arena_alloc = c.arena_state.allocator(); 2369 + 2370 + const spec_str = std.mem.span(package_spec); 2371 + 2372 + var pkg_name: []const u8 = spec_str; 2373 + var bin_name: []const u8 = spec_str; 2374 + var version_constraint: []const u8 = "latest"; 2375 + 2376 + if (std.mem.indexOf(u8, spec_str, "@")) |at_idx| { 2377 + if (at_idx == 0) { 2378 + if (std.mem.indexOfPos(u8, spec_str, 1, "@")) |second_at| { 2379 + pkg_name = spec_str[0..second_at]; 2380 + version_constraint = spec_str[second_at + 1 ..]; 2381 + } 2382 + } else { 2383 + pkg_name = spec_str[0..at_idx]; 2384 + version_constraint = spec_str[at_idx + 1 ..]; 2385 + } 2386 + } 2387 + 2388 + if (std.mem.lastIndexOfScalar(u8, pkg_name, '/')) |slash| { 2389 + bin_name = pkg_name[slash + 1 ..]; 2390 + } else { 2391 + bin_name = pkg_name; 2392 + } 2393 + 2394 + const exec_base = std.fmt.allocPrint(arena_alloc, "{s}/exec", .{c.cache_dir}) catch return .out_of_memory; 2395 + const temp_nm_path = std.fmt.allocPrint(arena_alloc, "{s}/{s}", .{exec_base, pkg_name}) catch return .out_of_memory; 2396 + const temp_pkg_json = std.fmt.allocPrint(arena_alloc, "{s}/package.json", .{temp_nm_path}) catch return .out_of_memory; 2397 + const temp_nm_dir = std.fmt.allocPrint(arena_alloc, "{s}/node_modules", .{temp_nm_path}) catch return .out_of_memory; 2398 + const temp_lockfile = std.fmt.allocPrint(arena_alloc, "{s}/ant.lockb", .{temp_nm_path}) catch return .out_of_memory; 2399 + 2400 + if (std.fs.cwd().openDir(exec_base, .{ .iterate = true })) |dir| { 2401 + var d = dir; 2402 + defer d.close(); 2403 + 2404 + const stat = d.statFile(pkg_name) catch null; 2405 + if (stat) |s| { 2406 + const now: i128 = std.time.nanoTimestamp(); 2407 + const mtime: i128 = s.mtime; 2408 + const age_ns = now - mtime; 2409 + const hours_24_ns: i128 = 24 * 60 * 60 * 1_000_000_000; 2410 + 2411 + if (age_ns > hours_24_ns) { 2412 + debug.log("exec: cleaning stale cache for {s} (age: {d}h)", .{ 2413 + pkg_name, @divFloor(age_ns, 60 * 60 * 1_000_000_000), 2414 + }); 2415 + d.deleteTree(pkg_name) catch {}; 2416 + } 2417 + } 2418 + } else |_| {} 2419 + 2420 + std.fs.cwd().makePath(temp_nm_path) catch {}; 2421 + 2422 + const pkg_json_content = std.fmt.allocPrint(arena_alloc, 2423 + \\{{"dependencies":{{"{s}":"{s}"}}}} 2424 + , .{pkg_name, version_constraint}) catch return .out_of_memory; 2425 + 2426 + const pkg_json_file = std.fs.cwd().createFile(temp_pkg_json, .{}) catch { 2427 + c.setError("Failed to create temp package.json"); 2428 + return .io_error; 2429 + }; 2430 + pkg_json_file.writeAll(pkg_json_content) catch { 2431 + pkg_json_file.close(); 2432 + c.setError("Failed to write temp package.json"); 2433 + return .io_error; 2434 + }; 2435 + pkg_json_file.close(); 2436 + 2437 + const http = c.http orelse return .network_error; 2438 + const db = c.cache_db orelse return .cache_error; 2439 + 2440 + var interleaved = InterleavedContext.init(c.allocator, arena_alloc, db, http, c); 2441 + defer interleaved.deinit(); 2442 + 2443 + var res = resolver.Resolver.init( 2444 + arena_alloc, 2445 + c.allocator, 2446 + &c.string_pool, 2447 + http, 2448 + db, 2449 + if (c.options.registry_url) |url| std.mem.span(url) else "https://registry.npmjs.org", 2450 + &c.metadata_cache, 2451 + ); defer res.deinit(); 2452 + 2453 + res.setOnPackageResolved(InterleavedContext.onPackageResolved, &interleaved); 2454 + res.resolveFromPackageJson(temp_pkg_json) catch |err| { 2455 + c.setErrorFmt("Failed to resolve {s}: {}", .{ pkg_name, err }); 2456 + return .resolve_error; 2457 + }; 2458 + 2459 + debug.log("exec: resolved {d} packages, queued {d} tarballs", .{ 2460 + interleaved.callbacks_received, interleaved.tarballs_queued, 2461 + }); 2462 + 2463 + http.run() catch {}; 2464 + 2465 + var pkg_linker = linker.Linker.init(c.allocator); 2466 + defer pkg_linker.deinit(); 2467 + 2468 + pkg_linker.setNodeModulesPath(temp_nm_dir) catch |err| { 2469 + c.setErrorFmt("Failed to set up exec directory: {}", .{err}); 2470 + return .io_error; 2471 + }; 2472 + 2473 + for (interleaved.extract_contexts.items) |ectx| { 2474 + defer ectx.ext.deinit(); 2475 + if (ectx.has_error) continue; 2476 + 2477 + const stats = ectx.ext.stats(); 2478 + db.insert(&.{ 2479 + .integrity = ectx.integrity, 2480 + .path = ectx.cache_path, 2481 + .unpacked_size = stats.bytes, 2482 + .file_count = stats.files, 2483 + .cached_at = std.time.timestamp(), 2484 + }, ectx.pkg_name, ectx.version_str) catch continue; 2485 + 2486 + pkg_linker.linkPackage(.{ 2487 + .cache_path = ectx.cache_path, 2488 + .node_modules_path = temp_nm_dir, 2489 + .name = ectx.pkg_name, 2490 + .parent_path = ectx.parent_path, 2491 + .file_count = stats.files, 2492 + .has_bin = ectx.has_bin, 2493 + }) catch continue; 2494 + } 2495 + 2496 + var resolved_iter = res.resolved.valueIterator(); 2497 + while (resolved_iter.next()) |pkg_ptr| { 2498 + const pkg = pkg_ptr.*; 2499 + if (db.hasIntegrity(&pkg.integrity)) { 2500 + const pkg_cache_path = db.getPackagePath(&pkg.integrity, arena_alloc) catch continue; 2501 + pkg_linker.linkPackage(.{ 2502 + .cache_path = pkg_cache_path, 2503 + .node_modules_path = temp_nm_dir, 2504 + .name = pkg.name.slice(), 2505 + .parent_path = pkg.parent_path, 2506 + .file_count = 0, 2507 + .has_bin = pkg.has_bin, 2508 + }) catch continue; 2509 + } 2510 + } 2511 + 2512 + res.writeLockfile(temp_lockfile) catch {}; 2513 + 2514 + var trusted = std.StringHashMap(void).init(arena_alloc); 2515 + var resolved_iter2 = res.resolved.valueIterator(); 2516 + while (resolved_iter2.next()) |pkg_ptr| { 2517 + trusted.put(pkg_ptr.*.name.slice(), {}) catch continue; 2518 + } 2519 + runTrustedPostinstall(c, &trusted, temp_nm_dir, arena_alloc); 2520 + 2521 + var bin_path_buf: [std.fs.max_path_bytes]u8 = undefined; 2522 + const bin_link_path = std.fmt.bufPrint(&bin_path_buf, "{s}/.bin/{s}", .{ temp_nm_dir, bin_name }) catch return .io_error; 2523 + 2524 + debug.log("exec: looking for bin at {s}", .{bin_link_path}); 2525 + 2526 + std.fs.cwd().access(bin_link_path, .{}) catch { 2527 + c.setErrorFmt("Binary '{s}' not found in package", .{bin_name}); 2528 + return .not_found; 2529 + }; 2530 + 2531 + var real_path_buf: [std.fs.max_path_bytes]u8 = undefined; 2532 + const real_path = std.fs.cwd().realpath(bin_link_path, &real_path_buf) catch return .io_error; 2533 + 2534 + if (real_path.len >= out_bin_path_len) return .io_error; 2535 + 2536 + @memcpy(out_bin_path[0..real_path.len], real_path); 2537 + out_bin_path[real_path.len] = 0; 2538 + 2539 + return .ok; 2540 + } 2541 + 2542 + fn getGlobalDir(allocator: std.mem.Allocator) ![]const u8 { 2543 + const home = try getHomeDir(allocator); 2544 + defer allocator.free(home); 2545 + return std.fmt.allocPrint(allocator, "{s}/.ant/pkg/global", .{home}); 2546 + } 2547 + 2548 + fn getGlobalBinDir(allocator: std.mem.Allocator) ![]const u8 { 2549 + const home = try getHomeDir(allocator); 2550 + defer allocator.free(home); 2551 + return std.fmt.allocPrint(allocator, "{s}/.ant/bin", .{home}); 2552 + } 2553 + 2554 + fn ensureGlobalPackageJson(allocator: std.mem.Allocator, global_dir: []const u8) !void { 2555 + const pkg_json_path = try std.fmt.allocPrint(allocator, "{s}/package.json", .{global_dir}); 2556 + defer allocator.free(pkg_json_path); 2557 + 2558 + std.fs.cwd().access(pkg_json_path, .{}) catch { 2559 + std.fs.cwd().makePath(global_dir) catch {}; 2560 + const file = try std.fs.cwd().createFile(pkg_json_path, .{}); 2561 + defer file.close(); 2562 + try file.writeAll("{\"dependencies\":{}}\n"); 2563 + }; 2564 + } 2565 + 2566 + fn linkGlobalBins(allocator: std.mem.Allocator, nm_path: []const u8, pkg_name: []const u8) void { 2567 + const bin_dir = getGlobalBinDir(allocator) catch return; 2568 + defer allocator.free(bin_dir); 2569 + 2570 + std.fs.cwd().makePath(bin_dir) catch return; 2571 + 2572 + const pkg_bin_dir = std.fmt.allocPrint(allocator, "{s}/{s}", .{nm_path, pkg_name}) catch return; 2573 + defer allocator.free(pkg_bin_dir); 2574 + 2575 + const pkg_json_path = std.fmt.allocPrint(allocator, "{s}/package.json", .{pkg_bin_dir}) catch return; 2576 + defer allocator.free(pkg_json_path); 2577 + 2578 + const content = std.fs.cwd().readFileAlloc(allocator, pkg_json_path, 1024 * 1024) catch return; 2579 + defer allocator.free(content); 2580 + 2581 + const parsed = std.json.parseFromSlice(std.json.Value, allocator, content, .{}) catch return; 2582 + defer parsed.deinit(); 2583 + 2584 + const bin_val = parsed.value.object.get("bin") orelse return; 2585 + 2586 + switch (bin_val) { 2587 + .string => |s| { 2588 + const base_name = if (std.mem.lastIndexOfScalar(u8, pkg_name, '/')) |idx| 2589 + pkg_name[idx + 1..] else pkg_name; 2590 + linkSingleBin(allocator, bin_dir, nm_path, pkg_name, base_name, s); 2591 + }, 2592 + .object => |obj| { 2593 + for (obj.keys(), obj.values()) |bin_name, path_val| { 2594 + if (path_val == .string) linkSingleBin(allocator, bin_dir, nm_path, pkg_name, bin_name, path_val.string); 2595 + } 2596 + }, 2597 + else => {}, 2598 + } 2599 + } 2600 + 2601 + fn linkSingleBin(allocator: std.mem.Allocator, bin_dir: []const u8, nm_path: []const u8, pkg_name: []const u8, bin_name: []const u8, bin_rel_path: []const u8) void { 2602 + const target = std.fmt.allocPrint(allocator, "{s}/{s}/{s}", .{nm_path, pkg_name, bin_rel_path}) catch return; 2603 + defer allocator.free(target); 2604 + 2605 + const link_path = std.fmt.allocPrint(allocator, "{s}/{s}", .{bin_dir, bin_name}) catch return; 2606 + defer allocator.free(link_path); 2607 + 2608 + std.fs.cwd().deleteFile(link_path) catch {}; 2609 + linker.createSymlinkAbsolute(target, link_path); 2610 + 2611 + debug.log("linked global bin: {s} -> {s}", .{link_path, target}); 2612 + } 2613 + 2614 + fn unlinkGlobalBins(allocator: std.mem.Allocator, pkg_name: []const u8) void { 2615 + const bin_dir = getGlobalBinDir(allocator) catch return; 2616 + defer allocator.free(bin_dir); 2617 + 2618 + var dir = std.fs.cwd().openDir(bin_dir, .{ .iterate = true }) catch return; 2619 + defer dir.close(); 2620 + 2621 + var iter = dir.iterate(); 2622 + while (iter.next() catch null) |entry| { 2623 + if (entry.kind != .sym_link) continue; 2624 + 2625 + var target_buf: [std.fs.max_path_bytes]u8 = undefined; 2626 + const target = dir.readLink(entry.name, &target_buf) catch continue; 2627 + 2628 + const pattern = std.fmt.allocPrint(allocator, "/{s}/", .{pkg_name}) catch continue; 2629 + defer allocator.free(pattern); 2630 + const pattern_end = std.fmt.allocPrint(allocator, "/{s}", .{pkg_name}) catch continue; 2631 + defer allocator.free(pattern_end); 2632 + 2633 + if (std.mem.indexOf(u8, target, pattern) != null or std.mem.endsWith(u8, target, pattern_end)) { 2634 + dir.deleteFile(entry.name) catch continue; 2635 + debug.log("unlinked global bin: {s}", .{entry.name}); 2636 + } 2637 + } 2638 + } 2639 + 2640 + export fn pkg_add_global( 2641 + ctx: ?*PkgContext, 2642 + package_spec: [*:0]const u8, 2643 + ) PkgError { 2644 + const c = ctx orelse return .invalid_argument; 2645 + const allocator = c.allocator; 2646 + 2647 + const global_dir = getGlobalDir(allocator) catch { 2648 + c.setError("HOME not set"); 2649 + return .invalid_argument; 2650 + }; 2651 + defer allocator.free(global_dir); 2652 + 2653 + ensureGlobalPackageJson(allocator, global_dir) catch { 2654 + c.setError("Failed to create global package.json"); 2655 + return .io_error; 2656 + }; 2657 + 2658 + const pkg_json_path = std.fmt.allocPrintSentinel(allocator, "{s}/package.json", .{global_dir}, 0) catch return .out_of_memory; 2659 + defer allocator.free(pkg_json_path); 2660 + const lockfile_path = std.fmt.allocPrintSentinel(allocator, "{s}/ant.lockb", .{global_dir}, 0) catch return .out_of_memory; 2661 + defer allocator.free(lockfile_path); 2662 + const nm_path = std.fmt.allocPrintSentinel(allocator, "{s}/node_modules", .{global_dir}, 0) catch return .out_of_memory; 2663 + defer allocator.free(nm_path); 2664 + 2665 + const spec_str = std.mem.span(package_spec); 2666 + var pkg_name: []const u8 = spec_str; 2667 + 2668 + if (std.mem.indexOf(u8, spec_str, "@")) |at_idx| { 2669 + if (at_idx == 0) { 2670 + if (std.mem.indexOfPos(u8, spec_str, 1, "@")) |second_at| { 2671 + pkg_name = spec_str[0..second_at]; 2672 + } 2673 + } else { 2674 + pkg_name = spec_str[0..at_idx]; 2675 + } 2676 + } 2677 + 2678 + const add_result = pkg_add(c, pkg_json_path.ptr, package_spec, false); 2679 + if (add_result != .ok) return add_result; 2680 + 2681 + const install_result = pkg_resolve_and_install(c, pkg_json_path.ptr, lockfile_path.ptr, nm_path.ptr); 2682 + if (install_result != .ok) return install_result; 2683 + 2684 + linkGlobalBins(allocator, nm_path, pkg_name); 2685 + 2686 + return .ok; 2687 + } 2688 + 2689 + export fn pkg_remove_global( 2690 + ctx: ?*PkgContext, 2691 + package_name: [*:0]const u8, 2692 + ) PkgError { 2693 + const c = ctx orelse return .invalid_argument; 2694 + const allocator = c.allocator; 2695 + 2696 + const global_dir = getGlobalDir(allocator) catch { 2697 + c.setError("HOME not set"); 2698 + return .invalid_argument; 2699 + }; 2700 + defer allocator.free(global_dir); 2701 + 2702 + const pkg_json_path = std.fmt.allocPrintSentinel(allocator, "{s}/package.json", .{global_dir}, 0) catch return .out_of_memory; 2703 + defer allocator.free(pkg_json_path); 2704 + const lockfile_path = std.fmt.allocPrintSentinel(allocator, "{s}/ant.lockb", .{global_dir}, 0) catch return .out_of_memory; 2705 + defer allocator.free(lockfile_path); 2706 + const nm_path = std.fmt.allocPrintSentinel(allocator, "{s}/node_modules", .{global_dir}, 0) catch return .out_of_memory; 2707 + defer allocator.free(nm_path); 2708 + 2709 + const name_str = std.mem.span(package_name); 2710 + 2711 + unlinkGlobalBins(allocator, name_str); 2712 + 2713 + const remove_result = pkg_remove(c, pkg_json_path.ptr, package_name); 2714 + if (remove_result != .ok and remove_result != .not_found) return remove_result; 2715 + if (remove_result == .not_found) return .not_found; 2716 + 2717 + const install_result = pkg_resolve_and_install(c, pkg_json_path.ptr, lockfile_path.ptr, nm_path.ptr); 2718 + if (install_result != .ok) return install_result; 2719 + 2720 + return .ok; 2721 + } 2722 + 2723 + export fn pkg_list_global( 2724 + ctx: ?*PkgContext, 2725 + callback: ?*const fn (name: [*:0]const u8, version: [*:0]const u8, user_data: ?*anyopaque) callconv(.c) void, 2726 + user_data: ?*anyopaque, 2727 + ) PkgError { 2728 + const c = ctx orelse return .invalid_argument; 2729 + _ = c.arena_state.reset(.retain_capacity); 2730 + const arena_alloc = c.arena_state.allocator(); 2731 + 2732 + const global_dir = getGlobalDir(arena_alloc) catch return .invalid_argument; 2733 + const pkg_json_path = std.fmt.allocPrint(arena_alloc, "{s}/package.json", .{global_dir}) catch return .out_of_memory; 2734 + const nm_path = std.fmt.allocPrint(arena_alloc, "{s}/node_modules", .{global_dir}) catch return .out_of_memory; 2735 + 2736 + const content = std.fs.cwd().readFileAlloc(arena_alloc, pkg_json_path, 1024 * 1024) catch return .not_found; 2737 + 2738 + const parsed = std.json.parseFromSlice(std.json.Value, arena_alloc, content, .{}) catch return .invalid_argument; 2739 + defer parsed.deinit(); 2740 + 2741 + const deps = parsed.value.object.get("dependencies") orelse return .ok; 2742 + if (deps != .object) return .ok; 2743 + 2744 + const cb = callback orelse return .ok; 2745 + 2746 + for (deps.object.keys()) |dep_name| { 2747 + const dep_pkg_json = std.fmt.allocPrint(arena_alloc, "{s}/{s}/package.json", .{nm_path, dep_name}) catch continue; 2748 + 2749 + const dep_content = std.fs.cwd().readFileAlloc(arena_alloc, dep_pkg_json, 256 * 1024) catch continue; 2750 + const dep_parsed = std.json.parseFromSlice(std.json.Value, arena_alloc, dep_content, .{}) catch continue; 2751 + defer dep_parsed.deinit(); 2752 + 2753 + const version = if (dep_parsed.value.object.get("version")) |v| 2754 + if (v == .string) v.string else "?" else "?"; 2755 + 2756 + const name_z = arena_alloc.dupeZ(u8, dep_name) catch continue; 2757 + const version_z = arena_alloc.dupeZ(u8, version) catch continue; 2758 + 2759 + cb(name_z.ptr, version_z.ptr, user_data); 2760 + } 2761 + 2762 + return .ok; 2763 + }
+14
src/pkg/tlsuv.zig
··· 1 + const uv = @import("uv.zig"); 2 + 3 + pub const stream_t = extern struct { 4 + data: ?*anyopaque = null, 5 + _pad: [1024]u8 = undefined, 6 + }; 7 + 8 + pub extern fn tlsuv_stream_init(*uv.loop_t, *stream_t, ?*anyopaque) c_int; 9 + pub extern fn tlsuv_stream_set_hostname(*stream_t, [*:0]const u8) c_int; 10 + pub extern fn tlsuv_stream_set_protocols(*stream_t, c_int, [*]const [*:0]const u8) c_int; 11 + pub extern fn tlsuv_stream_connect(*uv.connect_t, *stream_t, [*:0]const u8, c_int, uv.connect_cb) c_int; 12 + pub extern fn tlsuv_stream_read_start(*stream_t, uv.alloc_cb, uv.read_cb) c_int; 13 + pub extern fn tlsuv_stream_write(*uv.write_t, *stream_t, *uv.buf_t, uv.write_cb) c_int; 14 + pub extern fn tlsuv_stream_close(*stream_t, uv.close_cb) c_int;
+31
src/pkg/uv.zig
··· 1 + pub const loop_t = opaque {}; 2 + pub const handle_t = opaque {}; 3 + pub const stream_t = opaque {}; 4 + 5 + pub const buf_t = extern struct { 6 + base: [*c]u8, 7 + len: usize, 8 + }; 9 + 10 + pub const connect_t = extern struct { 11 + data: ?*anyopaque = null, 12 + _pad: [256]u8 = undefined, 13 + }; 14 + 15 + pub const write_t = extern struct { 16 + data: ?*anyopaque = null, 17 + _pad: [256]u8 = undefined, 18 + }; 19 + 20 + pub const RUN_DEFAULT: c_int = 0; 21 + pub const RUN_ONCE: c_int = 1; 22 + pub const RUN_NOWAIT: c_int = 2; 23 + 24 + pub const connect_cb = ?*const fn (*connect_t, c_int) callconv(.c) void; 25 + pub const close_cb = ?*const fn (*handle_t) callconv(.c) void; 26 + pub const alloc_cb = ?*const fn (*handle_t, usize, *buf_t) callconv(.c) void; 27 + pub const read_cb = ?*const fn (*stream_t, isize, *const buf_t) callconv(.c) void; 28 + pub const write_cb = ?*const fn (*write_t, c_int) callconv(.c) void; 29 + 30 + pub extern fn uv_default_loop() *loop_t; 31 + pub extern fn uv_run(*loop_t, c_int) c_int;
+1 -1
src/repl.c
··· 520 520 521 521 while (1) { 522 522 const char *prompt = multiline_buf ? "| " : "> "; 523 - printf("%s", prompt); 523 + fputs(prompt, stdout); 524 524 fflush(stdout); 525 525 526 526 ctrl_c_pressed = 0;
-1
src/tools/npm-shrinkwrap.json
··· 4 4 "requires": true, 5 5 "packages": { 6 6 "": { 7 - "name": "tools", 8 7 "dependencies": { 9 8 "esbuild": "^0.27.2" 10 9 }
+28 -1
src/utils.c
··· 3 3 4 4 #include <string.h> 5 5 #include <stdint.h> 6 + #include <pthread.h> 6 7 #include <argtable3.h> 8 + 9 + static char ant_semver_buf[32]; 10 + static pthread_once_t ant_semver_once = PTHREAD_ONCE_INIT; 11 + 12 + static void ant_semver_init(void) { 13 + const char *s = ANT_VERSION; 14 + int d = 0, i = 0; 15 + while (s[i] && d < 3 && i < 31) { 16 + if (s[i] == '.') d++; 17 + ant_semver_buf[i] = s[i]; i++; 18 + } 19 + ant_semver_buf[i - (d == 3)] = '\0'; 20 + } 21 + 22 + const char *ant_semver(void) { 23 + pthread_once(&ant_semver_once, ant_semver_init); 24 + return ant_semver_buf; 25 + } 7 26 8 27 uint64_t hash_key(const char *key, size_t len) { 9 28 uint64_t hash = 14695981039346656037ULL; ··· 73 92 " /_/" RESET " by @themackabu\n" 74 93 RESET; 75 94 76 - printf("%s", logo); 95 + fputs(logo, stdout); 77 96 78 97 printf("%s (released %s, %ld%s ago)\n", 79 98 ANT_VERSION, ··· 85 104 arg_freetable(argtable, ARGTABLE_COUNT); 86 105 87 106 return EXIT_SUCCESS; 107 + } 108 + 109 + void *try_oom(size_t size) { 110 + void *p = malloc(size); 111 + if (!p) { 112 + fputs("Error: out of memory\n", stderr); 113 + exit(EXIT_FAILURE); 114 + } return p; 88 115 }
+14
vendor/lmdb.wrap
··· 1 + [wrap-file] 2 + directory = openldap-LMDB_0.9.33 3 + source_url = https://git.openldap.org/openldap/openldap/-/archive/LMDB_0.9.33/openldap-LMDB_0.9.33.tar.bz2 4 + source_filename = openldap-LMDB_0.9.33.tar.bz2 5 + source_hash = d19d52725800177b89d235161c0af8ae8b2932207e3c9eb87e95b61f1925206d 6 + patch_filename = lmdb_0.9.33-1_patch.zip 7 + patch_url = https://wrapdb.mesonbuild.com/v2/lmdb_0.9.33-1/get_patch 8 + patch_hash = a3ef1b01fafc4f4dea30eb8c9a586b55c8c56ad1e8fc4ba3a389ca3fc29a3276 9 + source_fallback_url = https://github.com/mesonbuild/wrapdb/releases/download/lmdb_0.9.33-1/openldap-LMDB_0.9.33.tar.bz2 10 + wrapdb_version = 0.9.33-1 11 + 12 + [provide] 13 + lmdb = lmdb_dep 14 + program_names = mdb_stat, mdb_copy, mdb_dump, mdb_load
+13
vendor/nghttp2.wrap
··· 1 + [wrap-file] 2 + directory = nghttp2-1.62.1 3 + source_url = https://github.com/nghttp2/nghttp2/releases/download/v1.62.1/nghttp2-1.62.1.tar.xz 4 + source_filename = nghttp2-1.62.1.tar.xz 5 + source_hash = 2345d4dc136fda28ce243e0bb21f2e7e8ef6293d62c799abbf6f633a6887af72 6 + patch_filename = nghttp2_1.62.1-3_patch.zip 7 + patch_url = https://wrapdb.mesonbuild.com/v2/nghttp2_1.62.1-3/get_patch 8 + patch_hash = 4b53c0b0b89825ba61e542d9696b13f52ba33c80a7b6577d3d843aedf9572b1c 9 + source_fallback_url = https://github.com/mesonbuild/wrapdb/releases/download/nghttp2_1.62.1-3/nghttp2-1.62.1.tar.xz 10 + wrapdb_version = 1.62.1-3 11 + 12 + [provide] 13 + dependency_names = libnghttp2