Kubernetes Operator for Tangled Spindles
15
fork

Configure Feed

Select the types of activity you want to include in your feed.

init commit

Evan Jarrett 6c6437b2

+9808
+25
.devcontainer/devcontainer.json
··· 1 + { 2 + "name": "Kubebuilder DevContainer", 3 + "image": "golang:1.24", 4 + "features": { 5 + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, 6 + "ghcr.io/devcontainers/features/git:1": {} 7 + }, 8 + 9 + "runArgs": ["--network=host"], 10 + 11 + "customizations": { 12 + "vscode": { 13 + "settings": { 14 + "terminal.integrated.shell.linux": "/bin/bash" 15 + }, 16 + "extensions": [ 17 + "ms-kubernetes-tools.vscode-kubernetes-tools", 18 + "ms-azuretools.vscode-docker" 19 + ] 20 + } 21 + }, 22 + 23 + "onCreateCommand": "bash .devcontainer/post-install.sh" 24 + } 25 +
+23
.devcontainer/post-install.sh
··· 1 + #!/bin/bash 2 + set -x 3 + 4 + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 5 + chmod +x ./kind 6 + mv ./kind /usr/local/bin/kind 7 + 8 + curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/amd64 9 + chmod +x kubebuilder 10 + mv kubebuilder /usr/local/bin/ 11 + 12 + KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) 13 + curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl" 14 + chmod +x kubectl 15 + mv kubectl /usr/local/bin/kubectl 16 + 17 + docker network create -d=bridge --subnet=172.19.0.0/24 kind 18 + 19 + kind version 20 + kubebuilder version 21 + docker --version 22 + go version 23 + kubectl version --client
+3
.dockerignore
··· 1 + # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 + # Ignore build and test binaries. 3 + bin/
+23
.github/workflows/lint.yml
··· 1 + name: Lint 2 + 3 + on: 4 + push: 5 + pull_request: 6 + 7 + jobs: 8 + lint: 9 + name: Run on Ubuntu 10 + runs-on: ubuntu-latest 11 + steps: 12 + - name: Clone the code 13 + uses: actions/checkout@v4 14 + 15 + - name: Setup Go 16 + uses: actions/setup-go@v5 17 + with: 18 + go-version-file: go.mod 19 + 20 + - name: Run linter 21 + uses: golangci/golangci-lint-action@v8 22 + with: 23 + version: v2.1.0
+32
.github/workflows/test-e2e.yml
··· 1 + name: E2E Tests 2 + 3 + on: 4 + push: 5 + pull_request: 6 + 7 + jobs: 8 + test-e2e: 9 + name: Run on Ubuntu 10 + runs-on: ubuntu-latest 11 + steps: 12 + - name: Clone the code 13 + uses: actions/checkout@v4 14 + 15 + - name: Setup Go 16 + uses: actions/setup-go@v5 17 + with: 18 + go-version-file: go.mod 19 + 20 + - name: Install the latest version of kind 21 + run: | 22 + curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 23 + chmod +x ./kind 24 + sudo mv ./kind /usr/local/bin/kind 25 + 26 + - name: Verify kind installation 27 + run: kind version 28 + 29 + - name: Running Test e2e 30 + run: | 31 + go mod tidy 32 + make test-e2e
+23
.github/workflows/test.yml
··· 1 + name: Tests 2 + 3 + on: 4 + push: 5 + pull_request: 6 + 7 + jobs: 8 + test: 9 + name: Run on Ubuntu 10 + runs-on: ubuntu-latest 11 + steps: 12 + - name: Clone the code 13 + uses: actions/checkout@v4 14 + 15 + - name: Setup Go 16 + uses: actions/setup-go@v5 17 + with: 18 + go-version-file: go.mod 19 + 20 + - name: Running Tests 21 + run: | 22 + go mod tidy 23 + make test
+27
.gitignore
··· 1 + # Binaries for programs and plugins 2 + *.exe 3 + *.exe~ 4 + *.dll 5 + *.so 6 + *.dylib 7 + bin/* 8 + Dockerfile.cross 9 + 10 + # Test binary, built with `go test -c` 11 + *.test 12 + 13 + # Output of the go coverage tool, specifically when used with LiteIDE 14 + *.out 15 + 16 + # Go workspace file 17 + go.work 18 + 19 + # Kubernetes Generated files - skip generated files, except for vendored files 20 + !vendor/**/zz_generated.* 21 + 22 + # editor and IDE paraphernalia 23 + .idea 24 + .vscode 25 + *.swp 26 + *.swo 27 + *~
+52
.golangci.yml
··· 1 + version: "2" 2 + run: 3 + allow-parallel-runners: true 4 + linters: 5 + default: none 6 + enable: 7 + - copyloopvar 8 + - dupl 9 + - errcheck 10 + - ginkgolinter 11 + - goconst 12 + - gocyclo 13 + - govet 14 + - ineffassign 15 + - lll 16 + - misspell 17 + - nakedret 18 + - prealloc 19 + - revive 20 + - staticcheck 21 + - unconvert 22 + - unparam 23 + - unused 24 + settings: 25 + revive: 26 + rules: 27 + - name: comment-spacings 28 + - name: import-shadowing 29 + exclusions: 30 + generated: lax 31 + rules: 32 + - linters: 33 + - lll 34 + path: api/* 35 + - linters: 36 + - dupl 37 + - lll 38 + path: internal/* 39 + paths: 40 + - third_party$ 41 + - builtin$ 42 + - examples$ 43 + formatters: 44 + enable: 45 + - gofmt 46 + - goimports 47 + exclusions: 48 + generated: lax 49 + paths: 50 + - third_party$ 51 + - builtin$ 52 + - examples$
+21
.tangled/workflows/workflow-amd64.yaml
··· 1 + engine: kubernetes 2 + image: golang:1.24-bookworm 3 + architecture: amd64 4 + 5 + steps: 6 + - name: download dependencies 7 + command: | 8 + go mod download 9 + 10 + - name: run tests 11 + command: | 12 + make test 13 + 14 + - name: build manager binary 15 + command: | 16 + make build 17 + 18 + - name: verify build artifacts 19 + command: | 20 + ls -lh bin/ 21 + file bin/manager
+21
.tangled/workflows/workflow-arm64.yaml
··· 1 + engine: kubernetes 2 + image: golang:1.24-bookworm 3 + architecture: arm64 4 + 5 + steps: 6 + - name: download dependencies 7 + command: | 8 + go mod download 9 + 10 + - name: run tests 11 + command: | 12 + make test 13 + 14 + - name: build manager binary 15 + command: | 16 + make build 17 + 18 + - name: verify build artifacts 19 + command: | 20 + ls -lh bin/ 21 + file bin/manager
+360
ARCHITECTURE.md
··· 1 + # Loom Architecture 2 + 3 + Loom is a Kubernetes operator that runs tangled.org Spindle with a Kubernetes-native execution engine. 4 + 5 + ## Overview 6 + 7 + ``` 8 + ┌─────────────────────────────────────────────┐ 9 + │ Loom Operator Pod │ 10 + │ │ 11 + │ ┌────────────────────────────────────┐ │ 12 + │ │ Controller Manager │ │ 13 + │ │ - Watches SpindleSet CRD │ │ 14 + │ │ - Monitors Kubernetes Jobs │ │ 15 + │ │ - Reports status to spindle DB │ │ 16 + │ │ - Creates Service endpoints │ │ 17 + │ └────────────────────────────────────┘ │ 18 + │ │ 19 + │ ┌────────────────────────────────────┐ │ 20 + │ │ Embedded Spindle Server │ │ 21 + │ │ │ │ 22 + │ │ HTTP Endpoints: │ │ 23 + │ │ - GET /events (WebSocket) │ │ 24 + │ │ - GET /logs/{knot}/{rkey}/{name} │ │ 25 + │ │ - POST /xrpc/sh.tangled.repo.* │ │ 26 + │ │ │ │ 27 + │ │ Components: │ │ 28 + │ │ - EventConsumer (knot firehose) │ │ 29 + │ │ - Database (SQLite) │ │ 30 + │ │ - Queue (job queue) │ │ 31 + │ │ - Vault (secrets manager) │ │ 32 + │ │ - KubernetesEngine ──────────┐ │ │ 33 + │ └──────────────────────────────│─────┘ │ 34 + └─────────────────────────────────│───────────┘ 35 + 36 + │ creates 37 + 38 + ┌──────────────────────────┐ 39 + │ Kubernetes Jobs │ 40 + │ (one per workflow) │ 41 + │ │ 42 + │ ┌──────────────────────┐ │ 43 + │ │ Init Container: │ │ 44 + │ │ - Clone repo │ │ 45 + │ │ - Checkout commit │ │ 46 + │ └──────────────────────┘ │ 47 + │ │ 48 + │ ┌──────────────────────┐ │ 49 + │ │ Main Container: │ │ 50 + │ │ - Execute all steps │ │ 51 + │ │ in sequence │ │ 52 + │ └──────────────────────┘ │ 53 + └──────────────────────────┘ 54 + ``` 55 + 56 + ## Components 57 + 58 + ### Loom Operator 59 + 60 + The Loom operator is a standard Kubernetes controller that: 61 + 1. Watches `SpindleSet` custom resources 62 + 2. Embeds a spindle server instance 63 + 3. Creates Kubernetes Services to expose spindle HTTP endpoints 64 + 4. Monitors Job status and reports to spindle's database 65 + 66 + ### Embedded Spindle Server 67 + 68 + Instead of deploying separate spindle server instances, Loom **embeds** the spindle server: 69 + - Runs in the same process as the controller 70 + - Uses `spindle.New()` to initialize with `KubernetesEngine` 71 + - Handles all spindle functionality: WebSocket connections, XRPC endpoints, database, queue, etc. 72 + 73 + ### KubernetesEngine 74 + 75 + A custom engine implementation that uses Kubernetes Jobs instead of Docker containers. 76 + 77 + ## Execution Model: KubernetesEngine vs NixeryEngine 78 + 79 + ### NixeryEngine Pattern (Docker) 80 + 81 + ``` 82 + ┌─────────────────────────────────────────────┐ 83 + │ SetupWorkflow() │ 84 + │ - docker create <image> cat │ 85 + │ - docker start <container> │ 86 + │ - Keep container running │ 87 + └─────────────────────────────────────────────┘ 88 + 89 + 90 + ┌─────────────────────────────────────────────┐ 91 + │ For each step: │ 92 + │ RunStep() │ 93 + │ - docker exec <container> bash -c <cmd> │ 94 + │ - Stream logs to WorkflowLogger │ 95 + │ - Wait for completion │ 96 + │ - Return exit code │ 97 + └─────────────────────────────────────────────┘ 98 + 99 + 100 + ┌─────────────────────────────────────────────┐ 101 + │ DestroyWorkflow() │ 102 + │ - docker stop <container> │ 103 + │ - docker rm <container> │ 104 + └─────────────────────────────────────────────┘ 105 + ``` 106 + 107 + **Key characteristics:** 108 + - One long-running container per workflow 109 + - Steps executed sequentially via `docker exec` 110 + - Spindle controls step execution timing 111 + - Fine-grained logging per step 112 + 113 + ### KubernetesEngine Pattern (Jobs) 114 + 115 + ``` 116 + ┌─────────────────────────────────────────────┐ 117 + │ SetupWorkflow() │ 118 + │ - Generate bash script with ALL steps │ 119 + │ - Create Kubernetes Job │ 120 + │ - Init container: clone repo │ 121 + │ - Main container: execute script │ 122 + │ - Job runs to completion │ 123 + └─────────────────────────────────────────────┘ 124 + 125 + 126 + ┌─────────────────────────────────────────────┐ 127 + │ RunStep() - NO-OP │ 128 + │ (Steps are already running in the Job) │ 129 + └─────────────────────────────────────────────┘ 130 + 131 + 132 + ┌─────────────────────────────────────────────┐ 133 + │ DestroyWorkflow() │ 134 + │ - Delete Kubernetes Job │ 135 + │ - TTL controller auto-cleans after 1 hour │ 136 + └─────────────────────────────────────────────┘ 137 + ``` 138 + 139 + **Key characteristics:** 140 + - One Kubernetes Job per workflow 141 + - All steps baked into a single bash script 142 + - Job runs autonomously after creation 143 + - Logs captured as a single stream per Job 144 + 145 + ## Why Different Execution Models? 146 + 147 + ### Docker: exec pattern is lightweight 148 + - `docker exec` is a cheap operation 149 + - Easy to run commands in existing container 150 + - Low overhead for sequential execution 151 + 152 + ### Kubernetes: exec pattern is heavyweight 153 + - `kubectl exec` involves API calls, auth, streaming setup 154 + - Each exec requires network round-trips 155 + - Kubernetes is designed for declarative workloads, not imperative step-by-step 156 + 157 + ### Jobs are idiomatic Kubernetes 158 + - Jobs are purpose-built for batch workloads 159 + - Native features: status tracking, TTL cleanup, retry policies 160 + - Declarative: describe the work, Kubernetes handles execution 161 + - Better isolation: each workflow is a separate Job 162 + 163 + ### Script generation is efficient 164 + - Single script with all steps: `BuildStepExecutionScript()` 165 + - GitHub Actions-compatible environment passing 166 + - Error handling built into script 167 + - Timestamps and step boundaries in logs 168 + - No coordinator overhead 169 + 170 + ## Trade-offs 171 + 172 + ### What we gain with Jobs: 173 + ✅ Idiomatic Kubernetes 174 + ✅ Simpler implementation 175 + ✅ Better isolation per workflow 176 + ✅ Native Job status and cleanup 177 + ✅ No persistent connections needed 178 + ✅ Works with standard Kubernetes features 179 + 180 + ### What we lose vs Docker exec: 181 + ❌ No per-step control from spindle 182 + ❌ Can't pause/resume between steps 183 + ❌ Logs are bundled (not separated by step) 184 + ❌ `RunStep()` is a no-op 185 + ❌ Can't dynamically change step execution 186 + 187 + ### Is this okay? 188 + 189 + **Yes!** The execution model difference is intentional and appropriate for Kubernetes. We still: 190 + - Run all steps in order 191 + - Handle errors correctly 192 + - Pass environment between steps 193 + - Report status to spindle DB 194 + - Stream logs 195 + - Support secrets 196 + 197 + The interface contract with spindle's Engine is satisfied, just with a different implementation strategy. 198 + 199 + ## Step Script Generation 200 + 201 + See `pkg/jobbuilder/script_builder.go` for how we generate the bash script. 202 + 203 + ### Features: 204 + - **GitHub Actions compatibility**: `GITHUB_ENV`, `GITHUB_PATH`, `GITHUB_OUTPUT` 205 + - **Environment passing**: Steps can set variables for later steps 206 + - **Error handling**: Script exits on first failure 207 + - **Timestamps**: Every step logs start/end times 208 + - **Step boundaries**: Clear markers in logs 209 + 210 + ### Example generated script: 211 + 212 + ```bash 213 + #!/bin/bash 214 + set -e 215 + set -o pipefail 216 + 217 + # Setup GITHUB_ENV, GITHUB_PATH, GITHUB_OUTPUT 218 + export GITHUB_ENV=/tmp/github/env 219 + export GITHUB_PATH=/tmp/github/path 220 + export GITHUB_OUTPUT=/tmp/github/output 221 + 222 + # Helper functions 223 + add_to_env() { echo "$1=$2" >> $GITHUB_ENV; export "$1"="$2"; } 224 + add_to_path() { echo "$1" >> $GITHUB_PATH; export PATH="$1:$PATH"; } 225 + load_github_env() { source "$GITHUB_ENV" 2>/dev/null || true; } 226 + 227 + ############################################################################## 228 + # Step 1: Build 229 + ############################################################################## 230 + echo "2025-11-06T15:00:00.000Z ===== Starting Step 1: Build =====" 231 + load_github_env 232 + cd /tangled/workspace 233 + go build ./... || { echo "Step 'Build' failed"; exit 1; } 234 + echo "2025-11-06T15:00:10.000Z ===== Completed Step 1: Build =====" 235 + 236 + ############################################################################## 237 + # Step 2: Test 238 + ############################################################################## 239 + echo "2025-11-06T15:00:10.000Z ===== Starting Step 2: Test =====" 240 + load_github_env 241 + cd /tangled/workspace 242 + go test ./... || { echo "Step 'Test' failed"; exit 1; } 243 + echo "2025-11-06T15:00:20.000Z ===== Completed Step 2: Test =====" 244 + 245 + exit 0 246 + ``` 247 + 248 + ## Configuration 249 + 250 + Loom reads spindle configuration from environment variables (via `tangled.org/core/spindle/config`): 251 + 252 + ```bash 253 + SPINDLE_SERVER_LISTEN_ADDR=0.0.0.0:6555 254 + SPINDLE_SERVER_DB_PATH=/data/spindle.db 255 + SPINDLE_SERVER_HOSTNAME=loom.example.com 256 + SPINDLE_SERVER_OWNER=did:web:example.com 257 + SPINDLE_SERVER_QUEUE_SIZE=100 258 + SPINDLE_SERVER_MAX_JOB_COUNT=2 259 + SPINDLE_SERVER_LOG_DIR=/var/log/spindle 260 + SPINDLE_SERVER_SECRETS_PROVIDER=sqlite 261 + ``` 262 + 263 + These are typically set in the Loom Deployment manifest. 264 + 265 + ## Status Reporting 266 + 267 + Status flows through spindle's event system: 268 + 269 + 1. **Job status changes** detected by controller 270 + 2. **Write to spindle DB**: `db.StatusPending/Running/Success/Failed()` 271 + 3. **Broadcast via Notifier**: `notifier.NotifyAll()` 272 + 4. **Stream to subscribers**: Knots connected to `/events` WebSocket receive updates 273 + 274 + This follows spindle's standard pattern - no custom status reporter needed. 275 + 276 + ## Networking 277 + 278 + ### Internal (Cluster) 279 + - Spindle HTTP server listens on `:6555` inside operator pod 280 + - Controller creates a Kubernetes Service to expose it 281 + - Service name: `<spindleset-name>-spindle` 282 + - Endpoints available at: `http://<service>.<namespace>.svc.cluster.local:6555` 283 + 284 + ### External Access 285 + Options for external access: 286 + 1. **NodePort**: Change Service type to expose on node IP 287 + 2. **LoadBalancer**: Create cloud load balancer 288 + 3. **Ingress**: Route external traffic to Service 289 + 4. **Port forwarding**: For testing: `kubectl port-forward svc/spindleset-sample-spindle 6555:6555` 290 + 291 + ## Multi-Architecture Support 292 + 293 + Jobs can target specific node architectures using NodeAffinity: 294 + 295 + ```yaml 296 + spec: 297 + template: 298 + architecture: arm64 # or amd64 299 + ``` 300 + 301 + The jobbuilder generates NodeAffinity rules: 302 + ```yaml 303 + affinity: 304 + nodeAffinity: 305 + requiredDuringSchedulingIgnoredDuringExecution: 306 + nodeSelectorTerms: 307 + - matchExpressions: 308 + - key: kubernetes.io/arch 309 + operator: In 310 + values: [arm64] 311 + ``` 312 + 313 + ## Future Enhancements 314 + 315 + ### 1. Structured Logs Per Step 316 + Have the Job script POST progress updates: 317 + ```bash 318 + curl -X POST $SPINDLE_STATUS_URL -d '{"step": 1, "status": "start"}' 319 + go build ./... 320 + curl -X POST $SPINDLE_STATUS_URL -d '{"step": 1, "status": "complete"}' 321 + ``` 322 + 323 + ### 2. Step-level Artifacts 324 + Mount a PVC for step outputs: 325 + ```yaml 326 + volumes: 327 + - name: artifacts 328 + persistentVolumeClaim: 329 + claimName: workflow-artifacts 330 + ``` 331 + 332 + ### 3. Parallel Steps 333 + Support running independent steps in parallel (separate containers in same pod). 334 + 335 + ### 4. Dynamic Step Execution 336 + For advanced use cases, implement the exec pattern: 337 + - Create long-running Pod (not Job) 338 + - Implement `RunStep()` with `kubectl exec` 339 + - Trade efficiency for flexibility 340 + 341 + ## Comparison with Other Engines 342 + 343 + | Feature | NixeryEngine | KubernetesEngine | 344 + |---------|-------------|------------------| 345 + | Runtime | Docker | Kubernetes | 346 + | Isolation | Container | Job (Pod) | 347 + | Step execution | Sequential exec | Baked script | 348 + | RunStep() | Active | No-op | 349 + | Overhead | Low | Medium (K8s API) | 350 + | Idiomatic | Yes (Docker) | Yes (K8s) | 351 + | Scalability | Limited | High | 352 + | Multi-tenancy | Shared host | Cluster-native | 353 + 354 + ## References 355 + 356 + - **Spindle Core**: `/home/data/core/spindle/` 357 + - **NixeryEngine**: `/home/data/core/spindle/engines/nixery/engine.go` 358 + - **KubernetesEngine**: `/home/data/loom/internal/engine/kubernetes_engine.go` 359 + - **Job Builder**: `/home/data/loom/pkg/jobbuilder/` 360 + - **Script Builder**: `/home/data/loom/pkg/jobbuilder/script_builder.go`
+339
CLAUDE.md
··· 1 + # Working with Claude Code on Loom 2 + 3 + This document describes how Claude Code was used to develop the Loom operator and provides guidelines for continuing development with AI assistance. 4 + 5 + ## Project Context 6 + 7 + **What is Loom?** 8 + Loom is a Kubernetes operator that coordinates tangled.org Spindles - ephemeral CI/CD runners inspired by GitHub's Actions Runner Controller (ARC). It enables running pipeline workflows in Kubernetes in response to events from tangled.org knots. 9 + 10 + **Key Characteristics:** 11 + - Built with operator-sdk and Kubebuilder 12 + - Integrates with tangled.org's AT Protocol-based event system 13 + - Reuses components from `tangled.org/core/spindle` 14 + - Implements a Kubernetes-native execution engine 15 + 16 + ## Development Approach 17 + 18 + ### Research Phase 19 + Claude Code researched three key areas before implementation: 20 + 21 + 1. **GitHub Actions Runner Controller (ARC)** 22 + - Studied three-tier controller architecture 23 + - Learned JIT token patterns for security 24 + - Understood ephemeral runner lifecycle 25 + - Reviewed auto-scaling mechanisms 26 + 27 + 2. **Chainguard Kaniko Fork** 28 + - Investigated rootless container builds 29 + - Understood security benefits 30 + - Evaluated integration patterns (deferred for MVP) 31 + 32 + 3. **tangled.org Spindles** 33 + - Analyzed existing implementation in `/home/data/core/spindle` 34 + - Studied Engine interface and models 35 + - Understood WebSocket event ingestion 36 + - Reviewed Nixery integration (simplified for MVP) 37 + 38 + ### Architectural Decisions 39 + 40 + **Simplifications Made:** 41 + 1. **No Nixery for MVP**: Use standard Docker images instead of dynamic Nix-based images 42 + 2. **Ephemeral Jobs**: Scale-to-zero approach, one Job per pipeline 43 + 3. **Kubernetes-native logging**: Stream logs via K8s API instead of disk-based 44 + 4. **Direct WebSocket**: Maintain persistent connection to knot (not polling) 45 + 46 + **Code Reuse Strategy:** 47 + - Import models and interfaces from `tangled.org/core/spindle` 48 + - Reuse WebSocket client logic 49 + - Implement new `KubernetesEngine` for Job-based execution 50 + - Avoid duplicating existing functionality 51 + 52 + ## Claude Code Usage Guidelines 53 + 54 + ### When to Use Claude Code 55 + 56 + **Good Use Cases:** 57 + - Implementing boilerplate (CRDs, controllers, builders) 58 + - Generating Kubernetes manifests and RBAC 59 + - Creating test scaffolding 60 + - Refactoring for clarity 61 + - Adding logging and metrics 62 + - Updating documentation 63 + 64 + **Not Recommended:** 65 + - Security-critical authentication logic (review carefully) 66 + - Complex AT Protocol interactions (defer to existing code) 67 + - Performance-critical paths (benchmark first) 68 + 69 + ### Prompting Best Practices 70 + 71 + **Provide Context:** 72 + ``` 73 + I want to add [feature]. We're using the KubernetesEngine from 74 + internal/engine/kubernetes_engine.go which implements the Engine 75 + interface from tangled.org/core/spindle/models. The feature should 76 + [specific behavior]. 77 + ``` 78 + 79 + **Reference Existing Code:** 80 + ``` 81 + Look at how core/spindle/engines/nixery/engine.go handles [X]. 82 + We need similar logic in our KubernetesEngine but adapted for 83 + Kubernetes Jobs instead of Docker containers. 84 + ``` 85 + 86 + **Specify Constraints:** 87 + ``` 88 + Implement [feature] but: 89 + 1. Reuse the existing Job builder pattern 90 + 2. Add Prometheus metrics 91 + 3. Follow the error handling pattern from the controller 92 + 4. Don't break multi-arch support 93 + ``` 94 + 95 + ### Common Patterns in This Project 96 + 97 + **1. Job Creation Pattern** 98 + ```go 99 + // Jobs are owned by SpindleSet for automatic cleanup 100 + ctrl.SetControllerReference(spindleSet, job, r.Scheme) 101 + 102 + // Jobs labeled for querying 103 + labels := map[string]string{ 104 + "loom.j5t.io/spindleset": spindleSet.Name, 105 + "loom.j5t.io/workflow": workflowName, 106 + } 107 + ``` 108 + 109 + **2. Status Update Pattern** 110 + ```go 111 + // Always update status in defer to ensure updates even on errors 112 + defer func() { 113 + if err := r.Status().Update(ctx, spindleSet); err != nil { 114 + log.Error(err, "Failed to update SpindleSet status") 115 + } 116 + }() 117 + ``` 118 + 119 + **3. Multi-Arch Node Targeting** 120 + ```go 121 + // Architecture specified in workflow, translated to node affinity 122 + affinity := &corev1.Affinity{ 123 + NodeAffinity: &corev1.NodeAffinity{ 124 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 125 + NodeSelectorTerms: []corev1.NodeSelectorTerm{{ 126 + MatchExpressions: []corev1.NodeSelectorRequirement{{ 127 + Key: "kubernetes.io/arch", 128 + Operator: corev1.NodeSelectorOpIn, 129 + Values: []string{workflow.Architecture}, 130 + }}, 131 + }}, 132 + }, 133 + }, 134 + } 135 + ``` 136 + 137 + **4. Metrics Pattern** 138 + ```go 139 + // Register metrics in init() 140 + func init() { 141 + metrics.Registry.MustRegister(runningSpindles, completedSpindles) 142 + } 143 + 144 + // Update metrics in controller 145 + runningSpindles.Set(float64(spindleSet.Status.RunningJobs)) 146 + ``` 147 + 148 + ## Development Workflow 149 + 150 + ### Standard Development Cycle 151 + 152 + 1. **Plan**: Update `PLAN.md` with new feature/phase 153 + 2. **Implement**: Generate code with Claude Code 154 + 3. **Test**: Run unit tests (`make test`) 155 + 4. **Generate**: Update manifests (`make manifests`) 156 + 5. **Deploy**: Test in cluster (`make deploy`) 157 + 6. **Iterate**: Fix issues, update docs 158 + 159 + ### Key Commands 160 + 161 + ```bash 162 + # Generate CRDs and code 163 + make manifests generate 164 + 165 + # Run tests 166 + make test 167 + 168 + # Build operator 169 + make build 170 + 171 + # Deploy to cluster 172 + make deploy IMG=<your-registry>/loom:tag 173 + 174 + # Run locally (for debugging) 175 + make install run 176 + 177 + # Clean up 178 + make undeploy 179 + ``` 180 + 181 + ### Testing Strategy 182 + 183 + **Unit Tests:** 184 + - Mock Kubernetes clients 185 + - Test Job builder logic 186 + - Test affinity generation 187 + - Test script builder 188 + 189 + **Integration Tests:** 190 + - Deploy to test cluster (kind/k3s) 191 + - Create SpindleSet CR 192 + - Simulate pipeline events 193 + - Verify Jobs created correctly 194 + - Check logs and status 195 + 196 + **Manual Testing:** 197 + - Deploy to real cluster with mixed amd64/arm64 nodes 198 + - Connect to real tangled.org knot 199 + - Trigger actual pipeline runs 200 + - Monitor with Prometheus/Grafana 201 + 202 + ## Code Organization 203 + 204 + ### Package Structure 205 + 206 + ``` 207 + loom/ 208 + ├── api/v1alpha1/ # CRD types (SpindleSet) 209 + ├── internal/ 210 + │ ├── controller/ # Reconciliation logic 211 + │ └── engine/ # KubernetesEngine implementation 212 + ├── pkg/ 213 + │ ├── ingester/ # WebSocket client 214 + │ ├── jobbuilder/ # Job template generation 215 + │ └── knot/ # Knot API client 216 + └── config/ # Kubernetes manifests 217 + ``` 218 + 219 + ### Import Guidelines 220 + 221 + **From tangled.org/core:** 222 + ```go 223 + // Models and interfaces 224 + import "tangled.org/core/spindle/models" 225 + import "tangled.org/core/api/tangled" 226 + 227 + // Adapt, don't import directly: 228 + // - WebSocket client (pkg/ingester) 229 + // - Status reporting (pkg/knot) 230 + ``` 231 + 232 + **Kubernetes:** 233 + ```go 234 + import batchv1 "k8s.io/api/batch/v1" 235 + import corev1 "k8s.io/api/core/v1" 236 + import ctrl "sigs.k8s.io/controller-runtime" 237 + ``` 238 + 239 + ## Common Tasks 240 + 241 + ### Adding a New Field to SpindleSet 242 + 243 + 1. Edit `api/v1alpha1/spindleset_types.go` 244 + 2. Add field with proper kubebuilder markers 245 + 3. Run `make manifests generate` 246 + 4. Update sample CRs in `config/samples/` 247 + 5. Update controller logic to use field 248 + 6. Add tests for new behavior 249 + 250 + ### Changing Job Template 251 + 252 + 1. Edit `pkg/jobbuilder/job_template.go` 253 + 2. Update Job generation logic 254 + 3. Run unit tests (`make test`) 255 + 4. Deploy and test (`make deploy`) 256 + 5. Update documentation if needed 257 + 258 + ### Adding Prometheus Metrics 259 + 260 + 1. Define metric in controller file 261 + 2. Register in `init()` function 262 + 3. Update metric in reconciliation loop 263 + 4. Add metric to Prometheus config 264 + 5. Document in `PLAN.md` metrics section 265 + 266 + ## Troubleshooting 267 + 268 + ### Common Issues 269 + 270 + **Issue: CRD not updating** 271 + ```bash 272 + # Regenerate and reinstall 273 + make manifests 274 + make install 275 + ``` 276 + 277 + **Issue: Controller not reconciling** 278 + ```bash 279 + # Check logs 280 + kubectl logs -n loom-system deployment/loom-controller-manager 281 + 282 + # Check RBAC 283 + kubectl auth can-i create jobs --as=system:serviceaccount:loom-system:loom-controller-manager 284 + ``` 285 + 286 + **Issue: Jobs stuck pending** 287 + ```bash 288 + # Check job spec 289 + kubectl get job <job-name> -o yaml 290 + 291 + # Check events 292 + kubectl describe job <job-name> 293 + 294 + # Check node availability 295 + kubectl get nodes -L kubernetes.io/arch 296 + ``` 297 + 298 + **Issue: WebSocket disconnecting** 299 + ```bash 300 + # Check SpindleSet status 301 + kubectl get spindleset -o wide 302 + 303 + # Check controller logs for connection errors 304 + kubectl logs -n loom-system deployment/loom-controller-manager | grep -i websocket 305 + ``` 306 + 307 + ## Contributing 308 + 309 + ### Before Asking Claude Code for Help 310 + 311 + 1. Read the existing code in the affected area 312 + 2. Check `PLAN.md` for architectural context 313 + 3. Review similar implementations in the codebase 314 + 4. Check tangled.org/core for reusable components 315 + 316 + ### After Getting Code from Claude Code 317 + 318 + 1. Review generated code for correctness 319 + 2. Run tests (`make test`) 320 + 3. Check for proper error handling 321 + 4. Verify metrics are updated 322 + 5. Update documentation 323 + 6. Test in real cluster 324 + 325 + ## Resources 326 + 327 + - **Kubebuilder Book**: https://book.kubebuilder.io/ 328 + - **Controller Runtime**: https://pkg.go.dev/sigs.k8s.io/controller-runtime 329 + - **GitHub ARC**: https://github.com/actions/actions-runner-controller 330 + - **tangled.org Core**: /home/data/core/ 331 + - **Operator SDK**: https://sdk.operatorframework.io/ 332 + 333 + ## Notes 334 + 335 + - This project prioritizes simplicity over completeness for MVP 336 + - Code reuse from tangled.org/core is preferred over reimplementation 337 + - Multi-architecture support is a first-class concern 338 + - Prometheus metrics are essential, not optional 339 + - Security (RBAC, secrets) should be carefully reviewed
+500
CONFIGURATION.md
··· 1 + # Loom Configuration Guide 2 + 3 + Loom is configured via environment variables that are passed to the embedded spindle server. 4 + 5 + ## Quick Start 6 + 7 + 1. Create namespace: `kubectl create namespace loom-system` 8 + 2. Apply CRDs: `kubectl apply -f config/crd/bases/` 9 + 3. Apply RBAC: `kubectl apply -f config/rbac/` 10 + 4. Deploy operator with config (see below) 11 + 5. Create SpindleSet: `kubectl apply -f config/samples/` 12 + 13 + ## Required Environment Variables 14 + 15 + These must be set in the Loom operator Deployment: 16 + 17 + ```yaml 18 + apiVersion: apps/v1 19 + kind: Deployment 20 + metadata: 21 + name: loom-controller-manager 22 + namespace: loom-system 23 + spec: 24 + replicas: 1 25 + selector: 26 + matchLabels: 27 + control-plane: controller-manager 28 + template: 29 + metadata: 30 + labels: 31 + control-plane: controller-manager 32 + spec: 33 + containers: 34 + - name: manager 35 + image: loom:latest 36 + env: 37 + # Required: Spindle server configuration 38 + - name: SPINDLE_SERVER_LISTEN_ADDR 39 + value: "0.0.0.0:6555" 40 + 41 + - name: SPINDLE_SERVER_DB_PATH 42 + value: "/data/spindle.db" 43 + 44 + - name: SPINDLE_SERVER_HOSTNAME 45 + value: "loom.example.com" # Change to your domain 46 + 47 + - name: SPINDLE_SERVER_OWNER 48 + value: "did:web:example.com" # Change to your DID 49 + 50 + - name: SPINDLE_SERVER_JETSTREAM_ENDPOINT 51 + value: "wss://jetstream1.us-west.bsky.network/subscribe" 52 + 53 + # Optional: Adjust queue settings 54 + - name: SPINDLE_SERVER_QUEUE_SIZE 55 + value: "100" 56 + 57 + - name: SPINDLE_SERVER_MAX_JOB_COUNT 58 + value: "2" # Max concurrent workflows 59 + 60 + # Optional: Log directory (not used much in K8s) 61 + - name: SPINDLE_SERVER_LOG_DIR 62 + value: "/var/log/spindle" 63 + 64 + # Optional: Development mode 65 + - name: SPINDLE_SERVER_DEV 66 + value: "false" 67 + 68 + # Optional: Secrets provider (default: sqlite) 69 + - name: SPINDLE_SERVER_SECRETS_PROVIDER 70 + value: "sqlite" 71 + 72 + ports: 73 + - containerPort: 6555 74 + name: http 75 + protocol: TCP 76 + 77 + volumeMounts: 78 + - name: data 79 + mountPath: /data 80 + 81 + volumes: 82 + - name: data 83 + emptyDir: {} # Or use PersistentVolume for persistence 84 + ``` 85 + 86 + ## Environment Variable Reference 87 + 88 + ### SPINDLE_SERVER_LISTEN_ADDR 89 + **Required**: Yes 90 + **Default**: `0.0.0.0:6555` 91 + **Description**: Address and port for spindle HTTP server 92 + **Example**: `0.0.0.0:6555` 93 + 94 + The spindle HTTP server exposes: 95 + - `GET /events` - WebSocket for status updates 96 + - `GET /logs/{knot}/{rkey}/{name}` - WebSocket for logs 97 + - `POST /xrpc/*` - XRPC endpoints 98 + 99 + ### SPINDLE_SERVER_DB_PATH 100 + **Required**: Yes 101 + **Default**: `spindle.db` 102 + **Description**: Path to SQLite database file 103 + **Example**: `/data/spindle.db` 104 + 105 + **Important**: Use a persistent volume if you want to retain data across pod restarts. 106 + 107 + ### SPINDLE_SERVER_HOSTNAME 108 + **Required**: Yes 109 + **Default**: None 110 + **Description**: Hostname for this spindle instance (used in DID) 111 + **Example**: `loom.example.com` 112 + 113 + This is used to construct the spindle's DID: `did:web:<hostname>` 114 + 115 + ### SPINDLE_SERVER_OWNER 116 + **Required**: Yes 117 + **Default**: None 118 + **Description**: DID of the spindle owner 119 + **Example**: `did:web:example.com` or `did:plc:abc123...` 120 + 121 + The owner has full control over the spindle instance. 122 + 123 + ### SPINDLE_SERVER_JETSTREAM_ENDPOINT 124 + **Required**: Yes 125 + **Default**: `wss://jetstream1.us-west.bsky.network/subscribe` 126 + **Description**: Bluesky jetstream endpoint for ingesting member/repo records 127 + **Example**: `wss://jetstream1.us-west.bsky.network/subscribe` 128 + 129 + Used to watch for: 130 + - `sh.tangled.spindleMember` records 131 + - `sh.tangled.repo` records 132 + - `sh.tangled.repoCollaborator` records 133 + 134 + ### SPINDLE_SERVER_QUEUE_SIZE 135 + **Required**: No 136 + **Default**: `100` 137 + **Description**: Maximum number of jobs that can be queued 138 + **Example**: `100` 139 + 140 + If queue is full, new pipeline events will be rejected. 141 + 142 + ### SPINDLE_SERVER_MAX_JOB_COUNT 143 + **Required**: No 144 + **Default**: `2` 145 + **Description**: Maximum number of workflows running concurrently 146 + **Example**: `5` 147 + 148 + Controls how many Kubernetes Jobs will run in parallel. 149 + 150 + ### SPINDLE_SERVER_LOG_DIR 151 + **Required**: No 152 + **Default**: `/var/log/spindle` 153 + **Description**: Directory for workflow logs (not heavily used in Kubernetes) 154 + **Example**: `/var/log/spindle` 155 + 156 + KubernetesEngine logs are primarily captured via Kubernetes pod logs, not local files. 157 + 158 + ### SPINDLE_SERVER_DEV 159 + **Required**: No 160 + **Default**: `false` 161 + **Description**: Enable development mode 162 + **Example**: `true` 163 + 164 + Development mode may skip some validations or enable verbose logging. 165 + 166 + ### SPINDLE_SERVER_SECRETS_PROVIDER 167 + **Required**: No 168 + **Default**: `sqlite` 169 + **Description**: Secrets storage backend 170 + **Options**: `sqlite`, `openbao` 171 + 172 + #### SQLite Provider (default) 173 + Stores secrets in the SQLite database. 174 + 175 + No additional configuration needed. 176 + 177 + #### OpenBao Provider 178 + Stores secrets in OpenBao vault. 179 + 180 + **Additional environment variables required:** 181 + ```yaml 182 + - name: SPINDLE_SERVER_SECRETS_PROVIDER 183 + value: "openbao" 184 + 185 + - name: SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR 186 + value: "http://openbao:8200" 187 + 188 + - name: SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT 189 + value: "spindle" # Default mount path 190 + ``` 191 + 192 + ## Persistence 193 + 194 + ### SQLite Database 195 + 196 + The SQLite database contains: 197 + - Repos being watched 198 + - Spindle members 199 + - Pipeline events 200 + - Status history 201 + - Secrets (if using sqlite provider) 202 + 203 + **For production, use a PersistentVolume:** 204 + 205 + ```yaml 206 + volumes: 207 + - name: data 208 + persistentVolumeClaim: 209 + claimName: spindle-data 210 + 211 + --- 212 + apiVersion: v1 213 + kind: PersistentVolumeClaim 214 + metadata: 215 + name: spindle-data 216 + namespace: loom-system 217 + spec: 218 + accessModes: 219 + - ReadWriteOnce 220 + resources: 221 + requests: 222 + storage: 10Gi 223 + ``` 224 + 225 + ### Log Directory 226 + 227 + Not critical for Kubernetes deployments since logs are captured via pod logs. 228 + 229 + You can omit the log directory volume or use emptyDir. 230 + 231 + ## Service Configuration 232 + 233 + The controller automatically creates a Kubernetes Service to expose the spindle HTTP server: 234 + 235 + ```yaml 236 + apiVersion: v1 237 + kind: Service 238 + metadata: 239 + name: spindleset-sample-spindle 240 + namespace: default 241 + spec: 242 + selector: 243 + control-plane: controller-manager # Selects Loom operator pod 244 + ports: 245 + - name: http 246 + port: 6555 247 + targetPort: 6555 248 + type: ClusterIP 249 + ``` 250 + 251 + ### External Access 252 + 253 + **Option 1: NodePort** 254 + ```yaml 255 + spec: 256 + type: NodePort 257 + ports: 258 + - name: http 259 + port: 6555 260 + targetPort: 6555 261 + nodePort: 30655 262 + ``` 263 + 264 + **Option 2: LoadBalancer** 265 + ```yaml 266 + spec: 267 + type: LoadBalancer 268 + ports: 269 + - name: http 270 + port: 6555 271 + targetPort: 6555 272 + ``` 273 + 274 + **Option 3: Ingress** 275 + ```yaml 276 + apiVersion: networking.k8s.io/v1 277 + kind: Ingress 278 + metadata: 279 + name: spindle-ingress 280 + namespace: loom-system 281 + spec: 282 + rules: 283 + - host: loom.example.com 284 + http: 285 + paths: 286 + - path: / 287 + pathType: Prefix 288 + backend: 289 + service: 290 + name: spindleset-sample-spindle 291 + port: 292 + number: 6555 293 + ``` 294 + 295 + **Option 4: Port Forward (Testing)** 296 + ```bash 297 + kubectl port-forward -n loom-system svc/spindleset-sample-spindle 6555:6555 298 + ``` 299 + 300 + ## SpindleSet CRD 301 + 302 + The SpindleSet resource configures workflow execution (not spindle server config): 303 + 304 + ```yaml 305 + apiVersion: loom.j5t.io/v1alpha1 306 + kind: SpindleSet 307 + metadata: 308 + name: spindleset-sample 309 + namespace: default 310 + spec: 311 + # URL of the knot this spindle serves (currently not used) 312 + knotUrl: https://knot1.tangled.sh 313 + 314 + # Kubernetes secret containing auth credentials (currently not used) 315 + knotAuthSecret: spindle-auth 316 + 317 + # Maximum concurrent workflows (not enforced yet - uses SPINDLE_SERVER_MAX_JOB_COUNT) 318 + maxConcurrentJobs: 5 319 + 320 + # Template for Kubernetes Jobs 321 + template: 322 + # Resource limits for workflow pods 323 + resources: 324 + requests: 325 + cpu: "500m" 326 + memory: "1Gi" 327 + limits: 328 + cpu: "2" 329 + memory: "4Gi" 330 + 331 + # Node selector for scheduling 332 + nodeSelector: 333 + disktype: ssd 334 + 335 + # Tolerations for taints 336 + tolerations: 337 + - key: workload 338 + operator: Equal 339 + value: ci 340 + effect: NoSchedule 341 + 342 + # Additional affinity rules (merged with architecture affinity) 343 + affinity: 344 + nodeAffinity: 345 + preferredDuringSchedulingIgnoredDuringExecution: 346 + - weight: 100 347 + preference: 348 + matchExpressions: 349 + - key: node-role.kubernetes.io/worker 350 + operator: In 351 + values: [true] 352 + ``` 353 + 354 + **Note**: Currently the SpindleSet is mainly used to trigger Service creation. The spindle server configuration comes from environment variables in the operator Deployment. 355 + 356 + ## Complete Deployment Example 357 + 358 + See `config/manager/manager.yaml` for the full operator deployment manifest. 359 + 360 + Minimal example: 361 + 362 + ```yaml 363 + apiVersion: v1 364 + kind: Namespace 365 + metadata: 366 + name: loom-system 367 + 368 + --- 369 + apiVersion: v1 370 + kind: ServiceAccount 371 + metadata: 372 + name: loom-controller-manager 373 + namespace: loom-system 374 + 375 + --- 376 + # RBAC manifests from config/rbac/... 377 + 378 + --- 379 + apiVersion: apps/v1 380 + kind: Deployment 381 + metadata: 382 + name: loom-controller-manager 383 + namespace: loom-system 384 + spec: 385 + replicas: 1 386 + selector: 387 + matchLabels: 388 + control-plane: controller-manager 389 + template: 390 + metadata: 391 + labels: 392 + control-plane: controller-manager 393 + spec: 394 + serviceAccountName: loom-controller-manager 395 + containers: 396 + - name: manager 397 + image: loom:latest 398 + command: 399 + - /manager 400 + env: 401 + - name: SPINDLE_SERVER_LISTEN_ADDR 402 + value: "0.0.0.0:6555" 403 + - name: SPINDLE_SERVER_DB_PATH 404 + value: "/data/spindle.db" 405 + - name: SPINDLE_SERVER_HOSTNAME 406 + value: "loom.example.com" 407 + - name: SPINDLE_SERVER_OWNER 408 + value: "did:web:example.com" 409 + - name: SPINDLE_SERVER_JETSTREAM_ENDPOINT 410 + value: "wss://jetstream1.us-west.bsky.network/subscribe" 411 + - name: SPINDLE_SERVER_QUEUE_SIZE 412 + value: "100" 413 + - name: SPINDLE_SERVER_MAX_JOB_COUNT 414 + value: "2" 415 + ports: 416 + - containerPort: 6555 417 + name: http 418 + - containerPort: 8081 419 + name: healthz 420 + livenessProbe: 421 + httpGet: 422 + path: /healthz 423 + port: healthz 424 + readinessProbe: 425 + httpGet: 426 + path: /readyz 427 + port: healthz 428 + volumeMounts: 429 + - name: data 430 + mountPath: /data 431 + volumes: 432 + - name: data 433 + emptyDir: {} 434 + 435 + --- 436 + apiVersion: loom.j5t.io/v1alpha1 437 + kind: SpindleSet 438 + metadata: 439 + name: default-spindle 440 + namespace: loom-system 441 + spec: 442 + knotUrl: https://knot1.tangled.sh 443 + maxConcurrentJobs: 5 444 + ``` 445 + 446 + ## Testing Connectivity 447 + 448 + ### 1. Check Service Created 449 + ```bash 450 + kubectl get svc -A | grep spindle 451 + ``` 452 + 453 + Should show: `default-spindle-spindle` 454 + 455 + ### 2. Port Forward to Access Locally 456 + ```bash 457 + kubectl port-forward -n loom-system svc/default-spindle-spindle 6555:6555 458 + ``` 459 + 460 + ### 3. Test /events WebSocket 461 + ```bash 462 + websocat ws://localhost:6555/events 463 + ``` 464 + 465 + Should connect and start streaming events (or stay connected waiting for events). 466 + 467 + ### 4. Check Logs 468 + ```bash 469 + kubectl logs -n loom-system -l control-plane=controller-manager 470 + ``` 471 + 472 + Should show: 473 + - "spindle server initialized successfully" 474 + - "starting spindle HTTP server" 475 + - "Spindle HTTP service created successfully" 476 + 477 + ## Troubleshooting 478 + 479 + ### "spindle server error: address already in use" 480 + Port 6555 is already bound. Check if another process is using it or if you have multiple operator replicas. 481 + 482 + ### "failed to load spindle config: SPINDLE_SERVER_HOSTNAME is required" 483 + Missing required environment variable. Check your Deployment manifest. 484 + 485 + ### Service not created 486 + Check controller logs for errors. Ensure RBAC permissions for Services are granted. 487 + 488 + ### Jobs not creating 489 + - Check spindle can reach knot WebSocket 490 + - Verify EventConsumer is running (logs should show "starting knot event consumer") 491 + - Check if repos are registered in spindle database 492 + 493 + ### Database locked errors 494 + SQLite can't be shared across multiple pods. Set `replicas: 1` or use a different database solution. 495 + 496 + ## Next Steps 497 + 498 + - [Architecture Overview](./ARCHITECTURE.md) - Understand how Loom works 499 + - [Workflow Configuration](./WORKFLOWS.md) - Write workflow YAML files 500 + - [Upstream Improvements](./TANGLED.md) - Contribute to tangled.org/core
+34
Dockerfile
··· 1 + # Build the manager binary 2 + FROM golang:1.24 AS builder 3 + ARG TARGETOS 4 + ARG TARGETARCH 5 + 6 + WORKDIR /workspace 7 + # Copy the core module (for local replace directive) 8 + COPY core/ core/ 9 + # Copy the Go Modules manifests 10 + COPY loom/go.mod loom/go.mod 11 + COPY loom/go.sum loom/go.sum 12 + # cache deps before building and copying source so that we don't need to re-download as much 13 + # and so that source changes don't invalidate our downloaded layer 14 + WORKDIR /workspace/loom 15 + RUN go mod download 16 + 17 + # Copy the go source 18 + COPY loom/cmd/main.go cmd/main.go 19 + COPY loom/api/ api/ 20 + COPY loom/internal/ internal/ 21 + COPY loom/pkg/ pkg/ 22 + 23 + # Build 24 + # CGO is required for go-sqlite3 25 + RUN CGO_ENABLED=1 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go 26 + 27 + # Use distroless base image with glibc (not static) since we need CGO 28 + # Refer to https://github.com/GoogleContainerTools/distroless for more details 29 + FROM gcr.io/distroless/base-debian12:nonroot 30 + WORKDIR / 31 + COPY --from=builder /workspace/loom/manager . 32 + USER 65532:65532 33 + 34 + ENTRYPOINT ["/manager"]
+1066
Kubernetes Operator for Github Actions Runner.md
··· 1 + # Kubernetes Operator for Github Actions Runner 2 + Created at 2025-11-03 05:24:53 UTC 3 + 4 + > @ 2025-11-03 05:24:53 UTC 5 + > 6 + > I want to implement a kubernetes operator similar to github actions runner controller. I want to primarily focus on the logic for spawning a “runner” container via jobs and polling logs as it’s processing. I also want to make sure i can build docker containers within these runners. And the jobs should have logic to target nodes based on os architecture. This is supposed to be a rough poc, i already have go logic that works for reading a yaml file and figuring out what workflow to run. It just needs to send that job/step to a job in kubes 7 + 8 + <details><summary>Thinking</summary> 9 + 10 + The user is asking for help implementing a Kubernetes operator similar to GitHub Actions runner controller. They want to: 11 + 1. Spawn runner containers via Jobs 12 + 2. Poll logs as it's processing 13 + 3. Build docker containers within runners 14 + 4. Target nodes based on OS architecture 15 + 5. This is a rough POC and they already have Go logic to read YAML and figure out what workflow to run 16 + 17 + I'll provide a comprehensive answer covering the implementation of these requirements. 18 + 19 + </details> 20 + 21 + I'll help you implement a Kubernetes operator for spawning runner containers similar to GitHub Actions Runner Controller. Here's a comprehensive approach for your POC: 22 + 23 + ## **Kubernetes Job Spawning Logic** 24 + 25 + Here's the core Go code structure for creating and managing Jobs: 26 + 27 + ```go 28 + package operator 29 + 30 + import ( 31 + "context" 32 + "fmt" 33 + batchv1 "k8s.io/api/batch/v1" 34 + corev1 "k8s.io/api/core/v1" 35 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 + "k8s.io/client-go/kubernetes" 37 + ) 38 + 39 + type RunnerController struct { 40 + clientset *kubernetes.Clientset 41 + namespace string 42 + } 43 + 44 + func (rc *RunnerController) CreateRunnerJob(workflowID, stepName, stepCommand string, arch string) (*batchv1.Job, error) { 45 + job := &batchv1.Job{ 46 + ObjectMeta: metav1.ObjectMeta{ 47 + Name: fmt.Sprintf("runner-%s-%s", workflowID, stepName), 48 + Namespace: rc.namespace, 49 + Labels: map[string]string{ 50 + "workflow-id": workflowID, 51 + "step-name": stepName, 52 + "type": "runner", 53 + }, 54 + }, 55 + Spec: batchv1.JobSpec{ 56 + Template: corev1.PodTemplateSpec{ 57 + ObjectMeta: metav1.ObjectMeta{ 58 + Labels: map[string]string{ 59 + "workflow-id": workflowID, 60 + "step-name": stepName, 61 + }, 62 + }, 63 + Spec: corev1.PodSpec{ 64 + RestartPolicy: corev1.RestartPolicyNever, 65 + NodeSelector: map[string]string{ 66 + "kubernetes.io/arch": arch, // amd64, arm64, etc. 67 + }, 68 + Containers: []corev1.Container{ 69 + { 70 + Name: "runner", 71 + Image: "docker:dind", // Docker in Docker image 72 + Command: []string{"/bin/sh", "-c"}, 73 + Args: []string{stepCommand}, 74 + Env: []corev1.EnvVar{ 75 + { 76 + Name: "DOCKER_HOST", 77 + Value: "tcp://localhost:2375", 78 + }, 79 + }, 80 + VolumeMounts: []corev1.VolumeMount{ 81 + { 82 + Name: "docker-graph-storage", 83 + MountPath: "/var/lib/docker", 84 + }, 85 + }, 86 + }, 87 + { 88 + Name: "dind-daemon", 89 + Image: "docker:dind", 90 + Command: []string{"dockerd"}, 91 + Args: []string{ 92 + "--host=tcp://0.0.0.0:2375", 93 + "--storage-driver=overlay2", 94 + }, 95 + SecurityContext: &corev1.SecurityContext{ 96 + Privileged: &[]bool{true}[0], 97 + }, 98 + VolumeMounts: []corev1.VolumeMount{ 99 + { 100 + Name: "docker-graph-storage", 101 + MountPath: "/var/lib/docker", 102 + }, 103 + }, 104 + }, 105 + }, 106 + Volumes: []corev1.Volume{ 107 + { 108 + Name: "docker-graph-storage", 109 + VolumeSource: corev1.VolumeSource{ 110 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 111 + }, 112 + }, 113 + }, 114 + }, 115 + }, 116 + }, 117 + } 118 + 119 + return rc.clientset.BatchV1().Jobs(rc.namespace).Create(context.TODO(), job, metav1.CreateOptions{}) 120 + } 121 + ``` 122 + 123 + ## **Docker-in-Docker Configuration** 124 + 125 + For building Docker containers within your runners, you have two main approaches [^6][^2]: 126 + 127 + ### **Option 1: Docker-in-Docker (DinD)** 128 + The above example uses a sidecar container approach with the `docker:dind` image. This provides full Docker functionality but requires **privileged mode**. 129 + 130 + ### **Option 2: Kaniko (Rootless Alternative)** 131 + For better security, consider using Kaniko for building images without Docker daemon: 132 + 133 + ```go 134 + // Alternative container spec for Kaniko 135 + { 136 + Name: "runner", 137 + Image: "gcr.io/kaniko-project/executor:latest", 138 + Args: []string{ 139 + "--dockerfile=Dockerfile", 140 + "--context=git://github.com/your-repo.git", 141 + "--destination=your-registry/image:tag", 142 + }, 143 + } 144 + ``` 145 + 146 + ## **Log Polling Implementation** 147 + 148 + Here's how to poll and stream logs from your runner Jobs [^1][^3]: 149 + 150 + ```go 151 + import ( 152 + "bufio" 153 + "io" 154 + "time" 155 + "k8s.io/client-go/kubernetes" 156 + ) 157 + 158 + func (rc *RunnerController) PollJobLogs(jobName string) error { 159 + // Wait for pod to be created 160 + time.Sleep(2 * time.Second) 161 + 162 + // Get pods for the job 163 + labelSelector := fmt.Sprintf("job-name=%s", jobName) 164 + pods, err := rc.clientset.CoreV1().Pods(rc.namespace).List( 165 + context.TODO(), 166 + metav1.ListOptions{LabelSelector: labelSelector}, 167 + ) 168 + if err != nil || len(pods.Items) == 0 { 169 + return fmt.Errorf("no pods found for job %s", jobName) 170 + } 171 + 172 + podName := pods.Items[0].Name 173 + 174 + // Stream logs 175 + req := rc.clientset.CoreV1().Pods(rc.namespace).GetLogs(podName, &corev1.PodLogOptions{ 176 + Follow: true, 177 + Container: "runner", 178 + Timestamps: true, 179 + }) 180 + 181 + stream, err := req.Stream(context.TODO()) 182 + if err != nil { 183 + return err 184 + } 185 + defer stream.Close() 186 + 187 + // Read and process logs 188 + reader := bufio.NewReader(stream) 189 + for { 190 + line, err := reader.ReadString('\n') 191 + if err != nil { 192 + if err == io.EOF { 193 + break 194 + } 195 + return err 196 + } 197 + // Process log line (send to channel, write to file, etc.) 198 + fmt.Print(line) 199 + } 200 + 201 + return nil 202 + } 203 + ``` 204 + 205 + ## **Architecture-Based Node Targeting** 206 + 207 + To target specific node architectures, use **nodeSelector** or **nodeAffinity** [^1]: 208 + 209 + ```go 210 + // Enhanced node selection with affinity 211 + Spec: corev1.PodSpec{ 212 + Affinity: &corev1.Affinity{ 213 + NodeAffinity: &corev1.NodeAffinity{ 214 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 215 + NodeSelectorTerms: []corev1.NodeSelectorTerm{ 216 + { 217 + MatchExpressions: []corev1.NodeSelectorRequirement{ 218 + { 219 + Key: "kubernetes.io/arch", 220 + Operator: corev1.NodeSelectorOpIn, 221 + Values: []string{arch}, // "amd64", "arm64" 222 + }, 223 + { 224 + Key: "kubernetes.io/os", 225 + Operator: corev1.NodeSelectorOpIn, 226 + Values: []string{"linux"}, 227 + }, 228 + }, 229 + }, 230 + }, 231 + }, 232 + }, 233 + }, 234 + // ... rest of pod spec 235 + } 236 + ``` 237 + 238 + ## **Job Monitoring and Cleanup** 239 + 240 + Add logic to monitor job completion and cleanup: 241 + 242 + ```go 243 + func (rc *RunnerController) WaitForJobCompletion(jobName string, timeout time.Duration) (*batchv1.Job, error) { 244 + ctx, cancel := context.WithTimeout(context.Background(), timeout) 245 + defer cancel() 246 + 247 + ticker := time.NewTicker(5 * time.Second) 248 + defer ticker.Stop() 249 + 250 + for { 251 + select { 252 + case <-ctx.Done(): 253 + return nil, fmt.Errorf("job %s timed out", jobName) 254 + case <-ticker.C: 255 + job, err := rc.clientset.BatchV1().Jobs(rc.namespace).Get( 256 + context.TODO(), jobName, metav1.GetOptions{}, 257 + ) 258 + if err != nil { 259 + return nil, err 260 + } 261 + 262 + if job.Status.Succeeded > 0 { 263 + return job, nil 264 + } 265 + if job.Status.Failed > 0 { 266 + return job, fmt.Errorf("job %s failed", jobName) 267 + } 268 + } 269 + } 270 + } 271 + 272 + func (rc *RunnerController) CleanupJob(jobName string) error { 273 + propagationPolicy := metav1.DeletePropagationBackground 274 + return rc.clientset.BatchV1().Jobs(rc.namespace).Delete( 275 + context.TODO(), 276 + jobName, 277 + metav1.DeleteOptions{ 278 + PropagationPolicy: &propagationPolicy, 279 + }, 280 + ) 281 + } 282 + ``` 283 + 284 + ## **Integration with Your Workflow Parser** 285 + 286 + Here's how to tie it all together with your existing YAML parser: 287 + 288 + ```go 289 + type WorkflowExecutor struct { 290 + controller *RunnerController 291 + } 292 + 293 + func (we *WorkflowExecutor) ExecuteWorkflow(workflow *WorkflowSpec) error { 294 + for _, step := range workflow.Steps { 295 + // Create job for step 296 + job, err := we.controller.CreateRunnerJob( 297 + workflow.ID, 298 + step.Name, 299 + step.Command, 300 + step.Architecture, // from your YAML 301 + ) 302 + if err != nil { 303 + return err 304 + } 305 + 306 + // Start log polling in goroutine 307 + go we.controller.PollJobLogs(job.Name) 308 + 309 + // Wait for completion 310 + completedJob, err := we.controller.WaitForJobCompletion(job.Name, 30*time.Minute) 311 + if err != nil { 312 + return err 313 + } 314 + 315 + // Optional: cleanup 316 + if workflow.CleanupOnComplete { 317 + we.controller.CleanupJob(job.Name) 318 + } 319 + } 320 + return nil 321 + } 322 + ``` 323 + 324 + ## **Key Considerations** 325 + 326 + - **Security**: Running Docker-in-Docker requires privileged containers. Consider using Kaniko or Buildah for rootless builds 327 + - **Resource Management**: Set appropriate resource limits on Jobs to prevent resource exhaustion 328 + - **Persistent Storage**: For caching Docker layers between builds, consider using PersistentVolumes instead of EmptyDir 329 + - **RBAC**: Ensure your operator has proper ServiceAccount permissions to create/delete Jobs and read logs 330 + 331 + This POC gives you a solid foundation similar to the GitHub Actions Runner Controller [^4][^5], with the core functionality you need for spawning runners, building containers, and monitoring execution. 332 + 333 + 334 + _References_: 335 + [^1]: [Jobs | Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/job/) 336 + [^2]: [Jenkins: running workers in Kubernetes and Docker images build](https://dev.to/setevoy/jenkins-running-workers-in-kubernetes-and-docker-images-build-1lef) 337 + [^3]: [kubectl logs - Kubernetes](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_logs/) 338 + [^4]: [Kubernetes controller for GitHub Actions self-hosted runners](https://github.com/actions/actions-runner-controller) 339 + [^5]: [GitHub - evryfs/github-actions-runner-operator: K8S operator for...](https://github.com/evryfs/github-actions-runner-operator) 340 + [^6]: [Automating Docker Builds in Kubernetes | by Sebastien Goasguen](https://sebgoa.medium.com/automating-docker-builds-in-kubernetes-15ef047795ab) 341 + 342 + > @ 2025-11-03 05:37:24 UTC 343 + > 344 + > Yeah i think i want to support buildah primarily. Not to say we can’t have ways to run alternative images later. Why would i want to choose node affinity over nodeselector? 345 + 346 + <details><summary>Thinking</summary> 347 + 348 + The user is asking about why they would choose node affinity over nodeSelector, and they want to use Buildah primarily for container builds. Let me provide a comprehensive answer about the differences between nodeSelector and nodeAffinity, and then update the code examples to use Buildah instead of Docker-in-Docker. 349 + 350 + Based on the research provided: 351 + 1. NodeSelector is simpler but less flexible 352 + 2. NodeAffinity provides more advanced scheduling options 353 + 3. Buildah can run rootless, which is better for security 354 + 355 + Let me structure a response that addresses both points clearly. 356 + 357 + </details> 358 + 359 + Great choice on **Buildah**! It's much more secure since it can run rootless without requiring privileged containers. Let me explain the nodeAffinity vs nodeSelector decision and provide updated code for Buildah. 360 + 361 + ## **NodeSelector vs NodeAffinity: When to Choose Which** 362 + 363 + ### **NodeSelector** 364 + - **Simple equality-based selection** [^4][^2] 365 + - Only supports "AND" logic between labels 366 + - More concise for basic requirements 367 + - Example: `kubernetes.io/arch: amd64` 368 + 369 + ### **NodeAffinity** 370 + NodeAffinity offers **significantly more flexibility** that you'll likely want for a runner controller [^2][^1]: 371 + 372 + 1. **Complex expressions**: Use operators like `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt` 373 + 2. **Soft preferences**: Use `preferredDuringSchedulingIgnoredDuringExecution` for optional requirements 374 + 3. **Multiple criteria**: Combine multiple conditions with OR logic 375 + 4. **Weighted preferences**: Assign weights to preferred nodes 376 + 377 + For your runner controller, **nodeAffinity is better** because: 378 + - You can handle fallback scenarios (prefer arm64 but accept amd64) 379 + - You can exclude certain nodes (avoid nodes with specific taints) 380 + - You can combine architecture with other requirements (GPU nodes, specific OS versions) 381 + 382 + ## **Buildah-Based Runner Implementation** 383 + 384 + Here's the updated implementation using Buildah: 385 + 386 + ```go 387 + package operator 388 + 389 + import ( 390 + "context" 391 + "fmt" 392 + batchv1 "k8s.io/api/batch/v1" 393 + corev1 "k8s.io/api/core/v1" 394 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 395 + "k8s.io/client-go/kubernetes" 396 + ) 397 + 398 + type BuildahRunnerConfig struct { 399 + Registry string 400 + RegistryAuth string // base64 encoded auth 401 + StorageDriver string // overlay, vfs 402 + Architecture string // amd64, arm64 403 + } 404 + 405 + func (rc *RunnerController) CreateBuildahRunnerJob( 406 + workflowID, stepName string, 407 + buildContext string, 408 + config BuildahRunnerConfig, 409 + ) (*batchv1.Job, error) { 410 + 411 + job := &batchv1.Job{ 412 + ObjectMeta: metav1.ObjectMeta{ 413 + Name: fmt.Sprintf("runner-%s-%s", workflowID, stepName), 414 + Namespace: rc.namespace, 415 + Labels: map[string]string{ 416 + "workflow-id": workflowID, 417 + "step-name": stepName, 418 + "builder": "buildah", 419 + }, 420 + }, 421 + Spec: batchv1.JobSpec{ 422 + BackoffLimit: &[]int32{2}[0], 423 + Template: corev1.PodTemplateSpec{ 424 + ObjectMeta: metav1.ObjectMeta{ 425 + Labels: map[string]string{ 426 + "workflow-id": workflowID, 427 + "step-name": stepName, 428 + }, 429 + Annotations: map[string]string{ 430 + "container.apparmor.security.beta.kubernetes.io/buildah": "unconfined", 431 + }, 432 + }, 433 + Spec: corev1.PodSpec{ 434 + RestartPolicy: corev1.RestartPolicyNever, 435 + 436 + // Using NodeAffinity for flexible architecture targeting 437 + Affinity: &corev1.Affinity{ 438 + NodeAffinity: &corev1.NodeAffinity{ 439 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 440 + NodeSelectorTerms: []corev1.NodeSelectorTerm{ 441 + { 442 + MatchExpressions: []corev1.NodeSelectorRequirement{ 443 + { 444 + Key: "kubernetes.io/arch", 445 + Operator: corev1.NodeSelectorOpIn, 446 + Values: []string{config.Architecture}, 447 + }, 448 + }, 449 + }, 450 + }, 451 + }, 452 + // Optional: Prefer nodes with more resources 453 + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ 454 + { 455 + Weight: 100, 456 + Preference: corev1.NodeSelectorTerm{ 457 + MatchExpressions: []corev1.NodeSelectorRequirement{ 458 + { 459 + Key: "node-role.kubernetes.io/runner", 460 + Operator: corev1.NodeSelectorOpExists, 461 + }, 462 + }, 463 + }, 464 + }, 465 + }, 466 + }, 467 + }, 468 + 469 + Containers: []corev1.Container{ 470 + { 471 + Name: "buildah", 472 + Image: "quay.io/buildah/stable:latest", 473 + Command: []string{"/bin/bash", "-c"}, 474 + Args: []string{fmt.Sprintf(` 475 + set -e 476 + echo "Starting build process..." 477 + 478 + # Clone or prepare build context 479 + %s 480 + 481 + # Build the image 482 + buildah bud \ 483 + --storage-driver=%s \ 484 + --format=docker \ 485 + --tls-verify=false \ 486 + -t %s/%s:%s \ 487 + -f Dockerfile . 488 + 489 + # Push to registry (if configured) 490 + if [ -n "$REGISTRY_AUTH" ]; then 491 + echo "$REGISTRY_AUTH" | base64 -d > /tmp/auth.json 492 + buildah push \ 493 + --authfile /tmp/auth.json \ 494 + --tls-verify=false \ 495 + %s/%s:%s 496 + rm /tmp/auth.json 497 + fi 498 + 499 + echo "Build completed successfully" 500 + `, 501 + buildContext, 502 + config.StorageDriver, 503 + config.Registry, workflowID, stepName, 504 + config.Registry, workflowID, stepName, 505 + )}, 506 + 507 + SecurityContext: &corev1.SecurityContext{ 508 + // Buildah can run rootless! 509 + RunAsUser: &[]int64{1000}[0], 510 + RunAsGroup: &[]int64{1000}[0], 511 + Capabilities: &corev1.Capabilities{ 512 + Add: []corev1.Capability{ 513 + "SETUID", 514 + "SETGID", 515 + }, 516 + }, 517 + }, 518 + 519 + Env: []corev1.EnvVar{ 520 + { 521 + Name: "STORAGE_DRIVER", 522 + Value: config.StorageDriver, 523 + }, 524 + { 525 + Name: "REGISTRY_AUTH", 526 + Value: config.RegistryAuth, 527 + }, 528 + { 529 + Name: "BUILDAH_ISOLATION", 530 + Value: "chroot", 531 + }, 532 + }, 533 + 534 + Resources: corev1.ResourceRequirements{ 535 + Requests: corev1.ResourceList{ 536 + corev1.ResourceCPU: resource.MustParse("500m"), 537 + corev1.ResourceMemory: resource.MustParse("1Gi"), 538 + }, 539 + Limits: corev1.ResourceList{ 540 + corev1.ResourceCPU: resource.MustParse("2"), 541 + corev1.ResourceMemory: resource.MustParse("4Gi"), 542 + }, 543 + }, 544 + 545 + VolumeMounts: []corev1.VolumeMount{ 546 + { 547 + Name: "buildah-storage", 548 + MountPath: "/var/lib/containers", 549 + }, 550 + }, 551 + }, 552 + }, 553 + 554 + Volumes: []corev1.Volume{ 555 + { 556 + Name: "buildah-storage", 557 + VolumeSource: corev1.VolumeSource{ 558 + EmptyDir: &corev1.EmptyDirVolumeSource{ 559 + Medium: corev1.StorageMediumMemory, // Use RAM for speed 560 + }, 561 + }, 562 + }, 563 + }, 564 + }, 565 + }, 566 + }, 567 + } 568 + 569 + return rc.clientset.BatchV1().Jobs(rc.namespace).Create(context.TODO(), job, metav1.CreateOptions{}) 570 + } 571 + ``` 572 + 573 + ## **Supporting Multiple Build Tools with Strategy Pattern** 574 + 575 + Since you mentioned supporting alternative images later, here's an extensible approach: 576 + 577 + ```go 578 + type BuilderStrategy interface { 579 + CreateJob(workflowID, stepName string, config BuildConfig) *batchv1.Job 580 + GetImage() string 581 + GetSecurityContext() *corev1.SecurityContext 582 + } 583 + 584 + type BuildahStrategy struct{} 585 + type KanikoStrategy struct{} 586 + type DockerStrategy struct{} 587 + 588 + func (b *BuildahStrategy) GetSecurityContext() *corev1.SecurityContext { 589 + return &corev1.SecurityContext{ 590 + RunAsUser: &[]int64{1000}[0], 591 + RunAsGroup: &[]int64{1000}[0], 592 + Capabilities: &corev1.Capabilities{ 593 + Add: []corev1.Capability{"SETUID", "SETGID"}, 594 + }, 595 + } 596 + } 597 + 598 + func (k *KanikoStrategy) GetSecurityContext() *corev1.SecurityContext { 599 + return &corev1.SecurityContext{ 600 + RunAsUser: &[]int64{0}[0], // Kaniko needs root but doesn't need privileged 601 + } 602 + } 603 + 604 + // Factory pattern for builder selection 605 + func GetBuilderStrategy(builderType string) BuilderStrategy { 606 + switch builderType { 607 + case "kaniko": 608 + return &KanikoStrategy{} 609 + case "docker": 610 + return &DockerStrategy{} 611 + default: 612 + return &BuildahStrategy{} // Default to Buildah 613 + } 614 + } 615 + ``` 616 + 617 + ## **Advanced NodeAffinity Example** 618 + 619 + Here's a more sophisticated nodeAffinity setup that showcases why it's superior for complex requirements: 620 + 621 + ```go 622 + Affinity: &corev1.Affinity{ 623 + NodeAffinity: &corev1.NodeAffinity{ 624 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 625 + NodeSelectorTerms: []corev1.NodeSelectorTerm{ 626 + { 627 + MatchExpressions: []corev1.NodeSelectorRequirement{ 628 + { 629 + Key: "kubernetes.io/arch", 630 + Operator: corev1.NodeSelectorOpIn, 631 + Values: []string{"amd64", "arm64"}, // Either architecture 632 + }, 633 + { 634 + Key: "node.kubernetes.io/instance-type", 635 + Operator: corev1.NodeSelectorOpNotIn, 636 + Values: []string{"t2.micro", "t2.small"}, // Exclude small instances 637 + }, 638 + }, 639 + }, 640 + }, 641 + }, 642 + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ 643 + { 644 + Weight: 100, 645 + Preference: corev1.NodeSelectorTerm{ 646 + MatchExpressions: []corev1.NodeSelectorRequirement{ 647 + { 648 + Key: "kubernetes.io/arch", 649 + Operator: corev1.NodeSelectorOpIn, 650 + Values: []string{config.PreferredArch}, // Prefer specific arch 651 + }, 652 + }, 653 + }, 654 + }, 655 + { 656 + Weight: 50, 657 + Preference: corev1.NodeSelectorTerm{ 658 + MatchExpressions: []corev1.NodeSelectorRequirement{ 659 + { 660 + Key: "workload-type", 661 + Operator: corev1.NodeSelectorOpIn, 662 + Values: []string{"build-optimized"}, 663 + }, 664 + }, 665 + }, 666 + }, 667 + }, 668 + }, 669 + } 670 + ``` 671 + 672 + **Buildah** is an excellent choice for your runner controller because it: 673 + - Runs without root privileges (better security) [^5][^3] 674 + - Doesn't require a daemon 675 + - Has lower resource overhead 676 + - Supports multiple storage drivers 677 + - Works well in Kubernetes environments 678 + 679 + The combination of Buildah with nodeAffinity gives you a secure, flexible runner system that can intelligently place workloads across your cluster. 680 + 681 + 682 + _References_: 683 + [^1]: [Mastering Node Placement in Kubernetes: Node Selectors vs. Node Affinity](https://medium.com/@rahulbansod519/mastering-node-placement-in-kubernetes-node-selectors-vs-node-affinity-f6a24cda0ddd) 684 + [^2]: [Node Selector vs Node Affinity in Kubernetes - Medium](https://medium.com/@reach2shristi.81/node-selector-vs-node-affinity-in-kubernetes-3292e9552aa0) 685 + [^3]: [Buildah and Podman Relationship | buildah.io](https://buildah.io/blogs/2018/10/31/podman-buildah-relationship.html) 686 + [^4]: [Kubernetes: Node Selector and Node Affinity | by Claire Lee | Medium](https://yuminlee2.medium.com/kubernetes-node-selector-and-node-affinity-ecb3a4d69165) 687 + [^5]: [Building a Buildah container image for Kubernetes](https://buildah.io/blogs/2018/03/01/building-buildah-container-image-for-kubernetes.html) 688 + 689 + > @ 2025-11-03 05:43:40 UTC 690 + > 691 + > How does GitHub handle isolation? I believe each step in a job is the same runner, but different jobs are isolated? This code appears to make a single step into a job? Is there shared context between steps? Ie environment variables, cached files etc? 692 + 693 + <details><summary>Thinking</summary> 694 + 695 + The user is asking about GitHub's isolation model and noticing that my code creates a separate Kubernetes Job for each step, which is different from how GitHub Actions works. They're right - GitHub Actions runs all steps in a job on the same runner with shared context, while different jobs are isolated. Let me explain this architecture and provide updated code that better mimics GitHub's model. 696 + 697 + </details> 698 + 699 + Excellent observation! You're absolutely right - **GitHub Actions runs all steps within a job on the same runner**, sharing context between steps. Your current code creates separate Kubernetes Jobs for each step, which breaks this model. Let me explain GitHub's architecture and provide a better implementation. 700 + 701 + ## **GitHub Actions Isolation Model** 702 + 703 + ### **How GitHub Actually Works:** 704 + - **Job Level**: Each job gets its own runner (isolated environment) [^1] 705 + - **Step Level**: All steps within a job execute sequentially on the same runner [^1] 706 + - **Shared Context**: Steps share: 707 + - Filesystem/workspace 708 + - Environment variables 709 + - Docker volumes [^4] 710 + - Network context 711 + - Installed tools/dependencies 712 + 713 + ### **Isolation Boundaries:** 714 + - **Between Jobs**: Complete isolation (different runners/pods) 715 + - **Between Steps**: No isolation (same container/environment) 716 + - **Between Workflows**: Complete isolation 717 + 718 + ## **Corrected Implementation: Job = Pod, Steps = Sequential Commands** 719 + 720 + Here's a redesigned approach that properly mimics GitHub's model: 721 + 722 + ```go 723 + package operator 724 + 725 + import ( 726 + "context" 727 + "encoding/json" 728 + "fmt" 729 + "strings" 730 + batchv1 "k8s.io/api/batch/v1" 731 + corev1 "k8s.io/api/core/v1" 732 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 733 + ) 734 + 735 + type WorkflowJob struct { 736 + ID string 737 + Name string 738 + Steps []Step 739 + Architecture string 740 + Environment map[string]string 741 + } 742 + 743 + type Step struct { 744 + Name string 745 + Command string 746 + Env map[string]string 747 + Uses string // For action references 748 + } 749 + 750 + func (rc *RunnerController) CreateJobRunner(job WorkflowJob) (*batchv1.Job, error) { 751 + // Build a single script that executes all steps sequentially 752 + runnerScript := rc.buildRunnerScript(job.Steps, job.Environment) 753 + 754 + k8sJob := &batchv1.Job{ 755 + ObjectMeta: metav1.ObjectMeta{ 756 + Name: fmt.Sprintf("runner-job-%s", job.ID), 757 + Namespace: rc.namespace, 758 + Labels: map[string]string{ 759 + "workflow-id": job.ID, 760 + "job-name": job.Name, 761 + "type": "github-runner", 762 + }, 763 + }, 764 + Spec: batchv1.JobSpec{ 765 + BackoffLimit: &[]int32{0}[0], // No retries for workflow jobs 766 + Template: corev1.PodTemplateSpec{ 767 + Spec: corev1.PodSpec{ 768 + RestartPolicy: corev1.RestartPolicyNever, 769 + 770 + // Single init container to set up workspace 771 + InitContainers: []corev1.Container{ 772 + { 773 + Name: "setup-workspace", 774 + Image: "alpine/git:latest", 775 + Command: []string{"/bin/sh", "-c"}, 776 + Args: []string{` 777 + # Clone repository or set up workspace 778 + git clone ${REPO_URL} /workspace || true 779 + cd /workspace 780 + git checkout ${GITHUB_SHA:-main} 781 + `}, 782 + VolumeMounts: []corev1.VolumeMount{ 783 + { 784 + Name: "workspace", 785 + MountPath: "/workspace", 786 + }, 787 + }, 788 + }, 789 + }, 790 + 791 + // Main runner container executes ALL steps 792 + Containers: []corev1.Container{ 793 + { 794 + Name: "runner", 795 + Image: "quay.io/buildah/stable:latest", 796 + Command: []string{"/bin/bash", "-c"}, 797 + Args: []string{runnerScript}, 798 + 799 + WorkingDir: "/workspace", 800 + 801 + Env: rc.buildEnvironment(job), 802 + 803 + VolumeMounts: []corev1.VolumeMount{ 804 + { 805 + Name: "workspace", 806 + MountPath: "/workspace", 807 + }, 808 + { 809 + Name: "step-outputs", 810 + MountPath: "/tmp/step-outputs", 811 + }, 812 + { 813 + Name: "github-env", 814 + MountPath: "/tmp/github", 815 + }, 816 + { 817 + Name: "buildah-storage", 818 + MountPath: "/var/lib/containers", 819 + }, 820 + }, 821 + 822 + SecurityContext: &corev1.SecurityContext{ 823 + RunAsUser: &[]int64{1000}[0], 824 + RunAsGroup: &[]int64{1000}[0], 825 + }, 826 + }, 827 + }, 828 + 829 + Volumes: []corev1.Volume{ 830 + { 831 + Name: "workspace", 832 + VolumeSource: corev1.VolumeSource{ 833 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 834 + }, 835 + }, 836 + { 837 + Name: "step-outputs", 838 + VolumeSource: corev1.VolumeSource{ 839 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 840 + }, 841 + }, 842 + { 843 + Name: "github-env", 844 + VolumeSource: corev1.VolumeSource{ 845 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 846 + }, 847 + }, 848 + { 849 + Name: "buildah-storage", 850 + VolumeSource: corev1.VolumeSource{ 851 + EmptyDir: &corev1.EmptyDirVolumeSource{ 852 + Medium: corev1.StorageMediumMemory, 853 + }, 854 + }, 855 + }, 856 + }, 857 + 858 + NodeSelector: map[string]string{ 859 + "kubernetes.io/arch": job.Architecture, 860 + }, 861 + }, 862 + }, 863 + }, 864 + } 865 + 866 + return rc.clientset.BatchV1().Jobs(rc.namespace).Create(context.TODO(), k8sJob, metav1.CreateOptions{}) 867 + } 868 + ``` 869 + 870 + ## **Step Execution Script Builder** 871 + 872 + This critical function creates a bash script that maintains context between steps: 873 + 874 + ```go 875 + func (rc *RunnerController) buildRunnerScript(steps []Step, jobEnv map[string]string) string { 876 + var script strings.Builder 877 + 878 + script.WriteString(`#!/bin/bash 879 + set -e 880 + 881 + # GitHub Actions compatible environment setup 882 + export GITHUB_ENV=/tmp/github/env 883 + export GITHUB_PATH=/tmp/github/path 884 + export GITHUB_OUTPUT=/tmp/github/output 885 + export GITHUB_STEP_SUMMARY=/tmp/github/step-summary.md 886 + 887 + # Initialize files 888 + touch $GITHUB_ENV $GITHUB_PATH $GITHUB_OUTPUT $GITHUB_STEP_SUMMARY 889 + 890 + # Helper functions matching GitHub Actions 891 + add_to_env() { 892 + echo "$1=$2" >> $GITHUB_ENV 893 + export $1="$2" 894 + } 895 + 896 + add_to_path() { 897 + echo "$1" >> $GITHUB_PATH 898 + export PATH="$1:$PATH" 899 + } 900 + 901 + set_output() { 902 + echo "$1=$2" >> $GITHUB_OUTPUT 903 + } 904 + 905 + # Function to load environment between steps 906 + load_github_env() { 907 + if [ -f "$GITHUB_ENV" ]; then 908 + set -a 909 + source $GITHUB_ENV 910 + set +a 911 + fi 912 + if [ -f "$GITHUB_PATH" ]; then 913 + while IFS= read -r line; do 914 + export PATH="$line:$PATH" 915 + done < $GITHUB_PATH 916 + fi 917 + } 918 + 919 + `) 920 + 921 + // Add each step 922 + for i, step := range steps { 923 + script.WriteString(fmt.Sprintf(` 924 + echo "==== Starting Step: %s ====" 925 + load_github_env 926 + 927 + # Step %d environment variables 928 + `, step.Name, i+1)) 929 + 930 + // Add step-specific environment variables 931 + for k, v := range step.Env { 932 + script.WriteString(fmt.Sprintf("export %s='%s'\n", k, v)) 933 + } 934 + 935 + // Execute step command 936 + script.WriteString(fmt.Sprintf(` 937 + # Execute step 938 + ( 939 + %s 940 + ) || { 941 + echo "Step '%s' failed with exit code $?" 942 + exit 1 943 + } 944 + 945 + echo "==== Completed Step: %s ====" 946 + `, step.Command, step.Name, step.Name)) 947 + } 948 + 949 + return script.String() 950 + } 951 + ``` 952 + 953 + ## **Shared Context Implementation** 954 + 955 + Here's how to properly share context between steps like GitHub Actions does [^2][^3]: 956 + 957 + ```go 958 + type StepOutput struct { 959 + StepName string 960 + Outputs map[string]string 961 + } 962 + 963 + // Enhanced step structure with outputs 964 + type EnhancedStep struct { 965 + Step 966 + ID string // For referencing outputs 967 + Outputs []string // Expected output keys 968 + } 969 + 970 + func (rc *RunnerController) buildEnhancedRunnerScript(steps []EnhancedStep) string { 971 + var script strings.Builder 972 + 973 + // ... (previous setup code) ... 974 + 975 + for i, step := range steps { 976 + script.WriteString(fmt.Sprintf(` 977 + # Step: %s (id: %s) 978 + STEP_OUTPUT_FILE="/tmp/step-outputs/%s.json" 979 + 980 + # Run step and capture outputs 981 + ( 982 + %s 983 + 984 + # Save step outputs if any were set 985 + if [ -f "$GITHUB_OUTPUT" ]; then 986 + # Parse and save outputs for this step 987 + python3 -c " 988 + import json 989 + outputs = {} 990 + with open('$GITHUB_OUTPUT', 'r') as f: 991 + for line in f: 992 + if '=' in line: 993 + key, value = line.strip().split('=', 1) 994 + outputs[key] = value 995 + with open('$STEP_OUTPUT_FILE', 'w') as f: 996 + json.dump(outputs, f) 997 + " 998 + fi 999 + ) 1000 + 1001 + # Make outputs available as environment variables for next steps 1002 + if [ -f "$STEP_OUTPUT_FILE" ]; then 1003 + export STEP_%s_OUTPUTS=$(cat $STEP_OUTPUT_FILE) 1004 + fi 1005 + `, step.Name, step.ID, step.ID, step.Command, strings.ToUpper(step.ID))) 1006 + } 1007 + 1008 + return script.String() 1009 + } 1010 + ``` 1011 + 1012 + ## **Multi-Container Pattern for Complex Workflows** 1013 + 1014 + For workflows that need services (like databases), use a multi-container pod: 1015 + 1016 + ```go 1017 + // For jobs that need service containers (like GitHub Actions services) 1018 + func (rc *RunnerController) CreateJobWithServices(job WorkflowJob, services []ServiceContainer) (*batchv1.Job, error) { 1019 + containers := []corev1.Container{ 1020 + // Main runner container 1021 + { 1022 + Name: "runner", 1023 + // ... (as before) 1024 + }, 1025 + } 1026 + 1027 + // Add service containers that run alongside the main runner 1028 + for _, svc := range services { 1029 + containers = append(containers, corev1.Container{ 1030 + Name: svc.Name, 1031 + Image: svc.Image, 1032 + Env: convertToEnvVar(svc.Environment), 1033 + Ports: svc.Ports, 1034 + // Services run for the lifetime of the job 1035 + Command: svc.Command, 1036 + }) 1037 + } 1038 + 1039 + // Update pod spec with all containers 1040 + // ... 1041 + } 1042 + ``` 1043 + 1044 + ## **Key Differences from Original Implementation** 1045 + 1046 + | Aspect | GitHub Actions Model | Your Original Code | Corrected Approach | 1047 + |--------|---------------------|-------------------|-------------------| 1048 + | **Job Mapping** | Job = Runner Instance | Step = K8s Job | Job = K8s Job/Pod | 1049 + | **Step Execution** | Sequential in same environment | Separate pods | Sequential in same container | 1050 + | **Filesystem** | Shared across steps | Isolated per step | Shared via volumes | 1051 + | **Environment Variables** | Persistent across steps | Isolated | Persistent via GITHUB_ENV | 1052 + | **Docker/Buildah Cache** | Shared in job | Lost between steps | Shared via volume | 1053 + 1054 + This corrected approach properly mimics GitHub Actions' execution model while maintaining the isolation boundaries that make sense in Kubernetes. 1055 + 1056 + 1057 + _References_: 1058 + [^1]: [Understanding GitHub Actions](https://docs.github.com/articles/getting-started-with-github-actions) 1059 + [^2]: [Store information in variables - GitHub Docs](https://docs.github.com/en/actions/how-tos/write-workflows/choose-what-workflows-do/use-variables) 1060 + [^3]: [Contexts - GitHub Docs](https://docs.github.com/en/actions/concepts/workflows-and-actions/contexts) 1061 + [^4]: [Workflow syntax for GitHub Actions](https://docs.github.com/actions/using-workflows/workflow-syntax-for-github-actions) 1062 + 1063 + 1064 + --- 1065 + 1066 + _Generated by [Kagi Assistant](https://kagi.com/assistant)_
+362
Makefile
··· 1 + # VERSION defines the project version for the bundle. 2 + # Update this value when you upgrade the version of your project. 3 + # To re-generate a bundle for another specific version without changing the standard setup, you can: 4 + # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) 5 + # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) 6 + VERSION ?= 0.0.1 7 + 8 + # CHANNELS define the bundle channels used in the bundle. 9 + # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") 10 + # To re-generate a bundle for other specific channels without changing the standard setup, you can: 11 + # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) 12 + # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") 13 + ifneq ($(origin CHANNELS), undefined) 14 + BUNDLE_CHANNELS := --channels=$(CHANNELS) 15 + endif 16 + 17 + # DEFAULT_CHANNEL defines the default channel used in the bundle. 18 + # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") 19 + # To re-generate a bundle for any other default channel without changing the default setup, you can: 20 + # - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) 21 + # - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") 22 + ifneq ($(origin DEFAULT_CHANNEL), undefined) 23 + BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) 24 + endif 25 + BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) 26 + 27 + # IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. 28 + # This variable is used to construct full image tags for bundle and catalog images. 29 + # 30 + # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both 31 + # j5t.io/loom-bundle:$VERSION and j5t.io/loom-catalog:$VERSION. 32 + IMAGE_TAG_BASE ?= j5t.io/loom 33 + 34 + # BUNDLE_IMG defines the image:tag used for the bundle. 35 + # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>) 36 + BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) 37 + 38 + # BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command 39 + BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) 40 + 41 + # USE_IMAGE_DIGESTS defines if images are resolved via tags or digests 42 + # You can enable this value if you would like to use SHA Based Digests 43 + # To enable set flag to true 44 + USE_IMAGE_DIGESTS ?= false 45 + ifeq ($(USE_IMAGE_DIGESTS), true) 46 + BUNDLE_GEN_FLAGS += --use-image-digests 47 + endif 48 + 49 + # Set the Operator SDK version to use. By default, what is installed on the system is used. 50 + # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. 51 + OPERATOR_SDK_VERSION ?= v1.41.1 52 + # Image URL to use all building/pushing image targets 53 + IMG ?= controller:latest 54 + 55 + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 56 + ifeq (,$(shell go env GOBIN)) 57 + GOBIN=$(shell go env GOPATH)/bin 58 + else 59 + GOBIN=$(shell go env GOBIN) 60 + endif 61 + 62 + # CONTAINER_TOOL defines the container tool to be used for building images. 63 + # Be aware that the target commands are only tested with Docker which is 64 + # scaffolded by default. However, you might want to replace it to use other 65 + # tools. (i.e. podman) 66 + CONTAINER_TOOL ?= docker 67 + 68 + # Setting SHELL to bash allows bash commands to be executed by recipes. 69 + # Options are set to exit when a recipe line exits non-zero or a piped command fails. 70 + SHELL = /usr/bin/env bash -o pipefail 71 + .SHELLFLAGS = -ec 72 + 73 + .PHONY: all 74 + all: build 75 + 76 + ##@ General 77 + 78 + # The help target prints out all targets with their descriptions organized 79 + # beneath their categories. The categories are represented by '##@' and the 80 + # target descriptions by '##'. The awk command is responsible for reading the 81 + # entire set of makefiles included in this invocation, looking for lines of the 82 + # file as xyz: ## something, and then pretty-format the target and help. Then, 83 + # if there's a line with ##@ something, that gets pretty-printed as a category. 84 + # More info on the usage of ANSI control characters for terminal formatting: 85 + # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 86 + # More info on the awk command: 87 + # http://linuxcommand.org/lc3_adv_awk.php 88 + 89 + .PHONY: help 90 + help: ## Display this help. 91 + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 92 + 93 + ##@ Development 94 + 95 + .PHONY: manifests 96 + manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 97 + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases 98 + 99 + .PHONY: generate 100 + generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 101 + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 102 + 103 + .PHONY: fmt 104 + fmt: ## Run go fmt against code. 105 + go fmt ./... 106 + 107 + .PHONY: vet 108 + vet: ## Run go vet against code. 109 + go vet ./... 110 + 111 + .PHONY: test 112 + test: manifests generate fmt vet setup-envtest ## Run tests. 113 + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out 114 + 115 + # TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. 116 + # The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. 117 + # CertManager is installed by default; skip with: 118 + # - CERT_MANAGER_INSTALL_SKIP=true 119 + KIND_CLUSTER ?= loom-test-e2e 120 + 121 + .PHONY: setup-test-e2e 122 + setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist 123 + @command -v $(KIND) >/dev/null 2>&1 || { \ 124 + echo "Kind is not installed. Please install Kind manually."; \ 125 + exit 1; \ 126 + } 127 + @case "$$($(KIND) get clusters)" in \ 128 + *"$(KIND_CLUSTER)"*) \ 129 + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ 130 + *) \ 131 + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ 132 + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ 133 + esac 134 + 135 + .PHONY: test-e2e 136 + test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. 137 + KIND_CLUSTER=$(KIND_CLUSTER) go test ./test/e2e/ -v -ginkgo.v 138 + $(MAKE) cleanup-test-e2e 139 + 140 + .PHONY: cleanup-test-e2e 141 + cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests 142 + @$(KIND) delete cluster --name $(KIND_CLUSTER) 143 + 144 + .PHONY: lint 145 + lint: golangci-lint ## Run golangci-lint linter 146 + $(GOLANGCI_LINT) run 147 + 148 + .PHONY: lint-fix 149 + lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes 150 + $(GOLANGCI_LINT) run --fix 151 + 152 + .PHONY: lint-config 153 + lint-config: golangci-lint ## Verify golangci-lint linter configuration 154 + $(GOLANGCI_LINT) config verify 155 + 156 + ##@ Build 157 + 158 + .PHONY: build 159 + build: manifests generate fmt vet ## Build manager binary. 160 + go build -o bin/manager cmd/main.go 161 + 162 + .PHONY: run 163 + run: manifests generate fmt vet ## Run a controller from your host. 164 + go run ./cmd/main.go 165 + 166 + # If you wish to build the manager image targeting other platforms you can use the --platform flag. 167 + # (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. 168 + # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 169 + .PHONY: docker-build 170 + docker-build: ## Build docker image with the manager. 171 + cd .. && $(CONTAINER_TOOL) build -f loom/Dockerfile -t ${IMG} . 172 + 173 + .PHONY: docker-push 174 + docker-push: ## Push docker image with the manager. 175 + $(CONTAINER_TOOL) push ${IMG} 176 + 177 + # PLATFORMS defines the target platforms for the manager image be built to provide support to multiple 178 + # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: 179 + # - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ 180 + # - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 181 + # - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail) 182 + # To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. 183 + PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le 184 + .PHONY: docker-buildx 185 + docker-buildx: ## Build and push docker image for the manager for cross-platform support 186 + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile 187 + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross 188 + - $(CONTAINER_TOOL) buildx create --name loom-builder 189 + $(CONTAINER_TOOL) buildx use loom-builder 190 + - cd .. && $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f loom/Dockerfile.cross . 191 + - $(CONTAINER_TOOL) buildx rm loom-builder 192 + rm Dockerfile.cross 193 + 194 + .PHONY: build-installer 195 + build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. 196 + mkdir -p dist 197 + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 198 + $(KUSTOMIZE) build config/default > dist/install.yaml 199 + 200 + ##@ Deployment 201 + 202 + ifndef ignore-not-found 203 + ignore-not-found = false 204 + endif 205 + 206 + .PHONY: install 207 + install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 208 + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - 209 + 210 + .PHONY: uninstall 211 + uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 212 + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 213 + 214 + .PHONY: deploy 215 + deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 216 + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 217 + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - 218 + 219 + .PHONY: undeploy 220 + undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 221 + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 222 + 223 + ##@ Dependencies 224 + 225 + ## Location to install dependencies to 226 + LOCALBIN ?= $(shell pwd)/bin 227 + $(LOCALBIN): 228 + mkdir -p $(LOCALBIN) 229 + 230 + ## Tool Binaries 231 + KUBECTL ?= kubectl 232 + KIND ?= kind 233 + KUSTOMIZE ?= $(LOCALBIN)/kustomize 234 + CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 235 + ENVTEST ?= $(LOCALBIN)/setup-envtest 236 + GOLANGCI_LINT = $(LOCALBIN)/golangci-lint 237 + 238 + ## Tool Versions 239 + KUSTOMIZE_VERSION ?= v5.6.0 240 + CONTROLLER_TOOLS_VERSION ?= v0.18.0 241 + #ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) 242 + ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') 243 + #ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) 244 + ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') 245 + GOLANGCI_LINT_VERSION ?= v2.1.0 246 + 247 + .PHONY: kustomize 248 + kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. 249 + $(KUSTOMIZE): $(LOCALBIN) 250 + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) 251 + 252 + .PHONY: controller-gen 253 + controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. 254 + $(CONTROLLER_GEN): $(LOCALBIN) 255 + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) 256 + 257 + .PHONY: setup-envtest 258 + setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory. 259 + @echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..." 260 + @$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path || { \ 261 + echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \ 262 + exit 1; \ 263 + } 264 + 265 + .PHONY: envtest 266 + envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. 267 + $(ENVTEST): $(LOCALBIN) 268 + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) 269 + 270 + .PHONY: golangci-lint 271 + golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. 272 + $(GOLANGCI_LINT): $(LOCALBIN) 273 + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) 274 + 275 + # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist 276 + # $1 - target path with name of binary 277 + # $2 - package url which can be installed 278 + # $3 - specific version of package 279 + define go-install-tool 280 + @[ -f "$(1)-$(3)" ] || { \ 281 + set -e; \ 282 + package=$(2)@$(3) ;\ 283 + echo "Downloading $${package}" ;\ 284 + rm -f $(1) || true ;\ 285 + GOBIN=$(LOCALBIN) go install $${package} ;\ 286 + mv $(1) $(1)-$(3) ;\ 287 + } ;\ 288 + ln -sf $(1)-$(3) $(1) 289 + endef 290 + 291 + .PHONY: operator-sdk 292 + OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk 293 + operator-sdk: ## Download operator-sdk locally if necessary. 294 + ifeq (,$(wildcard $(OPERATOR_SDK))) 295 + ifeq (, $(shell which operator-sdk 2>/dev/null)) 296 + @{ \ 297 + set -e ;\ 298 + mkdir -p $(dir $(OPERATOR_SDK)) ;\ 299 + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ 300 + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ 301 + chmod +x $(OPERATOR_SDK) ;\ 302 + } 303 + else 304 + OPERATOR_SDK = $(shell which operator-sdk) 305 + endif 306 + endif 307 + 308 + .PHONY: bundle 309 + bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. 310 + $(OPERATOR_SDK) generate kustomize manifests -q 311 + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) 312 + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) 313 + $(OPERATOR_SDK) bundle validate ./bundle 314 + 315 + .PHONY: bundle-build 316 + bundle-build: ## Build the bundle image. 317 + $(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . 318 + 319 + .PHONY: bundle-push 320 + bundle-push: ## Push the bundle image. 321 + $(MAKE) docker-push IMG=$(BUNDLE_IMG) 322 + 323 + .PHONY: opm 324 + OPM = $(LOCALBIN)/opm 325 + opm: ## Download opm locally if necessary. 326 + ifeq (,$(wildcard $(OPM))) 327 + ifeq (,$(shell which opm 2>/dev/null)) 328 + @{ \ 329 + set -e ;\ 330 + mkdir -p $(dir $(OPM)) ;\ 331 + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ 332 + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.55.0/$${OS}-$${ARCH}-opm ;\ 333 + chmod +x $(OPM) ;\ 334 + } 335 + else 336 + OPM = $(shell which opm) 337 + endif 338 + endif 339 + 340 + # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). 341 + # These images MUST exist in a registry and be pull-able. 342 + BUNDLE_IMGS ?= $(BUNDLE_IMG) 343 + 344 + # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). 345 + CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) 346 + 347 + # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. 348 + ifneq ($(origin CATALOG_BASE_IMG), undefined) 349 + FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) 350 + endif 351 + 352 + # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. 353 + # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: 354 + # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator 355 + .PHONY: catalog-build 356 + catalog-build: opm ## Build a catalog image. 357 + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) 358 + 359 + # Push the catalog image. 360 + .PHONY: catalog-push 361 + catalog-push: ## Push a catalog image. 362 + $(MAKE) docker-push IMG=$(CATALOG_IMG)
+416
PLAN.md
··· 1 + # Loom Kubernetes Operator - Implementation Plan 2 + 3 + ## Project Overview 4 + 5 + Loom is a Kubernetes operator for coordinating tangled.org Spindles - ephemeral CI/CD runners that execute pipelines in response to events from tangled.org knots. Inspired by GitHub's Actions Runner Controller (ARC) but adapted for tangled.org's AT Protocol-based, event-driven architecture. 6 + 7 + ## Architecture 8 + 9 + ### Core Principles 10 + - **Ephemeral Spindles**: One Kubernetes Job per pipeline event (scale-to-zero) 11 + - **Event-Driven**: WebSocket connection to tangled.org knot for pipeline events 12 + - **Code Reuse**: Leverage `tangled.org/core/spindle` for WebSocket, models, interfaces 13 + - **Simple Images**: Use standard Docker images (golang:1.24, node:20, etc.) - no Nixery for MVP 14 + - **Multi-Arch Support**: Schedule jobs on amd64/arm64 nodes based on workflow specification 15 + - **New Component**: Kubernetes-native Engine that spawns Jobs instead of Docker containers 16 + 17 + ### Key Components 18 + 19 + 1. **SpindleSet CRD**: Configures connection to tangled.org knot and job templates 20 + 2. **SpindleSet Controller**: Maintains WebSocket connection, handles pipeline events 21 + 3. **KubernetesEngine**: Implements tangled.org's Engine interface for Kubernetes Jobs 22 + 4. **Job Builder**: Generates Job specs with multi-arch node affinity 23 + 5. **Log Streamer**: Streams pod logs to knot via Kubernetes API 24 + 6. **Status Reporter**: Reports workflow status back to tangled.org 25 + 26 + --- 27 + 28 + ## Phase 1: CRD Design & Basic Structure 29 + 30 + ### SpindleSet CRD 31 + ```yaml 32 + apiVersion: loom.j5t.io/v1alpha1 33 + kind: SpindleSet 34 + metadata: 35 + name: tangled-org-spindle 36 + spec: 37 + # Knot configuration 38 + knotUrl: https://tangled.org/@org/repo 39 + knotAuthSecret: spindle-auth # Secret with auth token 40 + 41 + # Scaling configuration 42 + maxConcurrentJobs: 10 43 + 44 + # Default template (can be overridden by workflow) 45 + template: 46 + resources: 47 + requests: 48 + cpu: 500m 49 + memory: 1Gi 50 + limits: 51 + cpu: 2 52 + memory: 4Gi 53 + 54 + # Node targeting defaults 55 + nodeSelector: {} 56 + tolerations: [] 57 + affinity: {} 58 + ``` 59 + 60 + ### Status Fields 61 + - `conditions`: Standard Kubernetes conditions 62 + - `pendingJobs`, `runningJobs`: Current job counts 63 + - `completedJobs`, `failedJobs`: Cumulative counters 64 + - `webSocketConnected`: WebSocket connection status 65 + - `lastEventTime`: Last received event timestamp 66 + 67 + --- 68 + 69 + ## Phase 2: Kubernetes Engine Implementation 70 + 71 + ### Workflow File Format 72 + ```yaml 73 + # In tangled.org repository's .tangled/pipeline.yaml 74 + image: golang:1.24-bookworm 75 + architecture: amd64 # or arm64 76 + 77 + steps: 78 + - name: run tests 79 + command: | 80 + go test -v ./... 81 + 82 + - name: build binary 83 + command: | 84 + go build -o app ./cmd 85 + ``` 86 + 87 + ### Job Pod Structure 88 + - **Init container**: Clone repository from tangled.org 89 + - **Main container**: 90 + - Image: `{workflow.image}` (e.g., `golang:1.24-bookworm`) 91 + - Platform: `linux/{architecture}` 92 + - Execute all steps sequentially 93 + - **Volumes**: 94 + - `/tangled/workspace` - Shared workspace (emptyDir) 95 + - `/tmp/step-outputs` - Step output communication 96 + - `/tmp/github` - GITHUB_ENV-style env passing 97 + - **Node Affinity**: Based on `architecture` field 98 + 99 + ### Multi-Architecture Support 100 + ```go 101 + func (e *KubernetesEngine) buildJobAffinity(arch string) *corev1.Affinity { 102 + return &corev1.Affinity{ 103 + NodeAffinity: &corev1.NodeAffinity{ 104 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 105 + NodeSelectorTerms: []corev1.NodeSelectorTerm{ 106 + { 107 + MatchExpressions: []corev1.NodeSelectorRequirement{ 108 + { 109 + Key: "kubernetes.io/arch", 110 + Operator: corev1.NodeSelectorOpIn, 111 + Values: []string{arch}, // amd64 or arm64 112 + }, 113 + }, 114 + }, 115 + }, 116 + }, 117 + }, 118 + } 119 + } 120 + ``` 121 + 122 + ### Step Execution Model 123 + Generate bash script that executes all steps sequentially: 124 + - GitHub Actions-compatible environment variables (`GITHUB_ENV`, `GITHUB_OUTPUT`) 125 + - Environment passing between steps 126 + - Error handling and exit on failure 127 + - Step-level logging with timestamps 128 + 129 + --- 130 + 131 + ## Phase 3: WebSocket Integration & Event Handling 132 + 133 + ### WebSocket Client (Reuse from core/spindle) 134 + - Connect to `{knotUrl}/spindle/events` 135 + - Handle cursor-based backfill for missed events 136 + - Subscribe to live `sh.tangled.pipeline` events 137 + - Exponential backoff on connection failures 138 + 139 + ### Event Handler → Job Creation 140 + 1. Parse pipeline event payload 141 + 2. Extract workflow definition, repo, commit SHA 142 + 3. Create Kubernetes Job with: 143 + - Correct architecture node affinity 144 + - Image from workflow spec 145 + - Steps as bash script 146 + - Owner reference to SpindleSet (for cleanup) 147 + 4. Label Job with pipeline metadata 148 + 149 + ### SpindleSet Controller Reconciliation 150 + - Establish WebSocket connection to knot 151 + - Subscribe to pipeline events 152 + - Create Jobs on event received 153 + - Monitor running Jobs 154 + - Update SpindleSet status 155 + - Handle connection failures 156 + 157 + --- 158 + 159 + ## Phase 4: Status Reporting & Observability 160 + 161 + ### Job Status Tracking 162 + Watch Job events via Kubernetes API: 163 + - Job created → Report "running" to knot 164 + - Job succeeded → Report "success" to knot 165 + - Job failed → Report "failure" with error to knot 166 + - Job timeout → Report "timeout" to knot 167 + 168 + ### Status Reporting to Knot 169 + Reuse `spindle/db` status update patterns: 170 + - `StatusRunning()` - When Job starts 171 + - `StatusSuccess()` - When Job succeeds 172 + - `StatusFailed()` - When Job fails with error message 173 + - `StatusTimeout()` - When Job exceeds timeout 174 + 175 + ### Prometheus Metrics 176 + ```go 177 + loom_pending_spindles // Gauge: jobs pending 178 + loom_running_spindles // Gauge: jobs running 179 + loom_completed_spindles_total // Counter: total completed 180 + loom_failed_spindles_total // Counter: total failed 181 + loom_pipeline_duration_seconds // Histogram: execution duration 182 + ``` 183 + 184 + Exposed via controller-runtime's metrics server. 185 + 186 + --- 187 + 188 + ## Phase 5: Log Streaming via Kubernetes API 189 + 190 + ### Implementation 191 + ```go 192 + func (e *KubernetesEngine) StreamLogsToKnot(ctx context.Context, jobName string, knotClient *KnotClient) { 193 + // 1. Get pod for job 194 + // 2. Stream logs via K8s API 195 + // 3. Forward each line to knot in real-time 196 + } 197 + ``` 198 + 199 + ### Log Format 200 + Send to knot in tangled.org spindle format: 201 + ```json 202 + { 203 + "kind": "data", // or "control" 204 + "content": "test output line", 205 + "stepId": 0, 206 + "stepKind": "user" 207 + } 208 + ``` 209 + 210 + --- 211 + 212 + ## Phase 6: Testing & Deployment 213 + 214 + ### Unit Tests 215 + - Job template generation with different architectures 216 + - Node affinity generation (amd64 vs arm64) 217 + - Step script builder 218 + - Mock WebSocket client 219 + 220 + ### Integration Tests 221 + ```go 222 + // Test with real cluster 223 + func TestE2E_SimpleGoPipeline(t *testing.T) { 224 + // 1. Deploy SpindleSet CR 225 + // 2. Send test pipeline event 226 + // 3. Verify Job created on correct arch node 227 + // 4. Wait for completion 228 + // 5. Check logs streamed to knot 229 + } 230 + ``` 231 + 232 + ### Manual Testing 233 + ```bash 234 + # Deploy operator 235 + make deploy IMG=ghcr.io/you/loom:v0.1.0 236 + 237 + # Create SpindleSet 238 + kubectl apply -f config/samples/spindleset_sample.yaml 239 + 240 + # Push code to tangled.org with .tangled/pipeline.yaml 241 + 242 + # Watch Jobs 243 + kubectl get jobs -l loom.j5t.io/spindleset=test-spindle -w 244 + 245 + # Check pod placement 246 + kubectl get pods -o wide 247 + 248 + # View logs 249 + kubectl logs -f job/runner-<hash> 250 + ``` 251 + 252 + --- 253 + 254 + ## File Structure 255 + 256 + ``` 257 + loom/ 258 + ├── api/v1alpha1/ 259 + │ ├── spindleset_types.go # SpindleSet CRD 260 + │ └── groupversion_info.go 261 + 262 + ├── internal/ 263 + │ ├── controller/ 264 + │ │ └── spindleset_controller.go # Main reconciliation loop 265 + │ │ 266 + │ └── engine/ 267 + │ └── kubernetes_engine.go # K8s-native Engine implementation 268 + 269 + ├── pkg/ 270 + │ ├── ingester/ 271 + │ │ └── websocket.go # WebSocket client (adapted from core) 272 + │ │ 273 + │ ├── jobbuilder/ 274 + │ │ ├── job_template.go # Generate Job specs 275 + │ │ ├── affinity.go # Multi-arch node affinity 276 + │ │ └── script_builder.go # Step execution script 277 + │ │ 278 + │ └── knot/ 279 + │ └── client.go # Knot API client for status/logs 280 + 281 + ├── config/ 282 + │ ├── crd/ # Generated CRD manifests 283 + │ ├── rbac/ # RBAC for Job CRUD 284 + │ └── samples/ 285 + │ └── spindleset_sample.yaml 286 + 287 + └── cmd/main.go # Operator entrypoint 288 + ``` 289 + 290 + --- 291 + 292 + ## Dependencies 293 + 294 + ### From tangled.org/core 295 + ```go 296 + import ( 297 + "tangled.org/core/spindle/models" // Engine interface 298 + "tangled.org/core/spindle/config" // Config models 299 + "tangled.org/core/api/tangled" // Pipeline types 300 + // Adapt WebSocket logic from spindle/stream.go, ingester.go 301 + ) 302 + ``` 303 + 304 + ### Kubernetes 305 + ```go 306 + import ( 307 + batchv1 "k8s.io/api/batch/v1" 308 + corev1 "k8s.io/api/core/v1" 309 + "sigs.k8s.io/controller-runtime/pkg/client" 310 + ) 311 + ``` 312 + 313 + ### Metrics 314 + ```go 315 + import ( 316 + "github.com/prometheus/client_golang/prometheus" 317 + "sigs.k8s.io/controller-runtime/pkg/metrics" 318 + ) 319 + ``` 320 + 321 + --- 322 + 323 + ## Implementation Order 324 + 325 + 1. ✅ Create SpindleSet CRD (API types, generate manifests) 326 + 2. ⏳ Implement Job builder (template generation, multi-arch affinity) 327 + 3. ⏳ Implement KubernetesEngine (Engine interface for K8s Jobs) 328 + 4. ⏳ Import WebSocket client (adapt from core/spindle) 329 + 5. ⏳ Implement SpindleSet controller (reconciliation + event handling) 330 + 6. ⏳ Add Job status monitoring (watch Jobs, report to knot) 331 + 7. ⏳ Add log streaming (K8s API → knot) 332 + 8. ⏳ Add Prometheus metrics (instrument controller) 333 + 9. ⏳ Testing (unit + integration tests) 334 + 10. ⏳ Documentation (usage guide, architecture diagrams) 335 + 336 + --- 337 + 338 + ## MVP Scope 339 + 340 + ### Include ✅ 341 + - SpindleSet CRD with knot configuration 342 + - WebSocket connection to knot 343 + - Kubernetes Job creation per pipeline event 344 + - Multi-architecture support (amd64/arm64 node targeting) 345 + - Standard Docker images (golang:1.24, node:20, etc.) 346 + - Sequential step execution in single pod 347 + - Log streaming from K8s pods to knot via K8s API 348 + - Status reporting to knot (success/failure/timeout) 349 + - Prometheus metrics 350 + 351 + ### Exclude (Future Enhancements) ❌ 352 + - Nixery integration (add later) 353 + - Kaniko/Buildah for container builds 354 + - Persistent Nix store caching 355 + - Multi-knot support 356 + - Advanced auto-scaling policies 357 + - Service containers (DB sidecars) 358 + - Matrix builds 359 + 360 + --- 361 + 362 + ## Key Design Decisions 363 + 364 + 1. **Ephemeral Jobs**: Scale-to-zero, one Job per pipeline event 365 + 2. **Simple Images**: Use any Docker Hub image, no Nixery complexity for MVP 366 + 3. **Multi-Arch Native**: Use Kubernetes node affinity for amd64/arm64 targeting 367 + 4. **All steps in one pod**: GitHub Actions model (shared filesystem/env) 368 + 5. **K8s API for logs**: Stream pod logs to knot, no disk-based logging needed 369 + 6. **Reuse spindle models**: Maintain compatibility, adapt only execution layer 370 + 7. **Prometheus metrics**: Standard observability from day one 371 + 372 + --- 373 + 374 + ## Future Enhancements 375 + 376 + ### Phase 7: Nixery Integration 377 + - Detect `dependencies.nixpkgs` in workflow spec 378 + - Generate Nixery image URL dynamically 379 + - Support both standard images and Nixery 380 + - Implement Nix store caching (PVC) 381 + 382 + ### Phase 8: Advanced Features 383 + - Kaniko/Buildah integration for container builds 384 + - Service containers (like GitHub Actions services) 385 + - Matrix builds (multiple arch/version combinations) 386 + - Caching strategies (build cache, dependencies) 387 + - Advanced auto-scaling (predictive scaling) 388 + 389 + ### Phase 9: Multi-Tenancy 390 + - Multiple SpindleSets per cluster 391 + - Resource quotas per SpindleSet 392 + - Network policies for isolation 393 + - Multi-knot support (one operator, many knots) 394 + 395 + --- 396 + 397 + ## Success Criteria 398 + 399 + **MVP is complete when:** 400 + 1. SpindleSet CRD can be deployed to cluster 401 + 2. WebSocket connection to tangled.org knot established 402 + 3. Pipeline events trigger Job creation 403 + 4. Jobs execute on correct architecture nodes 404 + 5. Logs stream back to knot in real-time 405 + 6. Status updates sent to knot (success/failure) 406 + 7. Prometheus metrics exposed 407 + 8. Basic integration test passes 408 + 409 + **Production-ready when:** 410 + 1. Full test coverage (unit + integration) 411 + 2. Error handling and retry logic robust 412 + 3. Documentation complete 413 + 4. Helm chart available 414 + 5. Multi-arch container images published 415 + 6. Performance benchmarked 416 + 7. Security review completed
+23
PROJECT
··· 1 + # Code generated by tool. DO NOT EDIT. 2 + # This file is used to track the info used to scaffold your project 3 + # and allow the plugins properly work. 4 + # More info: https://book.kubebuilder.io/reference/project-config.html 5 + domain: j5t.io 6 + layout: 7 + - go.kubebuilder.io/v4 8 + plugins: 9 + manifests.sdk.operatorframework.io/v2: {} 10 + scorecard.sdk.operatorframework.io/v2: {} 11 + projectName: loom 12 + repo: tangled.org/evan.jarrett.net/loom 13 + resources: 14 + - api: 15 + crdVersion: v1 16 + namespaced: true 17 + controller: true 18 + domain: j5t.io 19 + group: loom 20 + kind: SpindleSet 21 + path: tangled.org/evan.jarrett.net/loom/api/v1alpha1 22 + version: v1alpha1 23 + version: "3"
+135
README.md
··· 1 + # loom 2 + // TODO(user): Add simple overview of use/purpose 3 + 4 + ## Description 5 + // TODO(user): An in-depth paragraph about your project and overview of use 6 + 7 + ## Getting Started 8 + 9 + ### Prerequisites 10 + - go version v1.24.0+ 11 + - docker version 17.03+. 12 + - kubectl version v1.11.3+. 13 + - Access to a Kubernetes v1.11.3+ cluster. 14 + 15 + ### To Deploy on the cluster 16 + **Build and push your image to the location specified by `IMG`:** 17 + 18 + ```sh 19 + make docker-build docker-push IMG=<some-registry>/loom:tag 20 + ``` 21 + 22 + **NOTE:** This image ought to be published in the personal registry you specified. 23 + And it is required to have access to pull the image from the working environment. 24 + Make sure you have the proper permission to the registry if the above commands don’t work. 25 + 26 + **Install the CRDs into the cluster:** 27 + 28 + ```sh 29 + make install 30 + ``` 31 + 32 + **Deploy the Manager to the cluster with the image specified by `IMG`:** 33 + 34 + ```sh 35 + make deploy IMG=<some-registry>/loom:tag 36 + ``` 37 + 38 + > **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin 39 + privileges or be logged in as admin. 40 + 41 + **Create instances of your solution** 42 + You can apply the samples (examples) from the config/sample: 43 + 44 + ```sh 45 + kubectl apply -k config/samples/ 46 + ``` 47 + 48 + >**NOTE**: Ensure that the samples has default values to test it out. 49 + 50 + ### To Uninstall 51 + **Delete the instances (CRs) from the cluster:** 52 + 53 + ```sh 54 + kubectl delete -k config/samples/ 55 + ``` 56 + 57 + **Delete the APIs(CRDs) from the cluster:** 58 + 59 + ```sh 60 + make uninstall 61 + ``` 62 + 63 + **UnDeploy the controller from the cluster:** 64 + 65 + ```sh 66 + make undeploy 67 + ``` 68 + 69 + ## Project Distribution 70 + 71 + Following the options to release and provide this solution to the users. 72 + 73 + ### By providing a bundle with all YAML files 74 + 75 + 1. Build the installer for the image built and published in the registry: 76 + 77 + ```sh 78 + make build-installer IMG=<some-registry>/loom:tag 79 + ``` 80 + 81 + **NOTE:** The makefile target mentioned above generates an 'install.yaml' 82 + file in the dist directory. This file contains all the resources built 83 + with Kustomize, which are necessary to install this project without its 84 + dependencies. 85 + 86 + 2. Using the installer 87 + 88 + Users can just run 'kubectl apply -f <URL for YAML BUNDLE>' to install 89 + the project, i.e.: 90 + 91 + ```sh 92 + kubectl apply -f https://raw.githubusercontent.com/<org>/loom/<tag or branch>/dist/install.yaml 93 + ``` 94 + 95 + ### By providing a Helm Chart 96 + 97 + 1. Build the chart using the optional helm plugin 98 + 99 + ```sh 100 + operator-sdk edit --plugins=helm/v1-alpha 101 + ``` 102 + 103 + 2. See that a chart was generated under 'dist/chart', and users 104 + can obtain this solution from there. 105 + 106 + **NOTE:** If you change the project, you need to update the Helm Chart 107 + using the same command above to sync the latest changes. Furthermore, 108 + if you create webhooks, you need to use the above command with 109 + the '--force' flag and manually ensure that any custom configuration 110 + previously added to 'dist/chart/values.yaml' or 'dist/chart/manager/manager.yaml' 111 + is manually re-applied afterwards. 112 + 113 + ## Contributing 114 + // TODO(user): Add detailed information on how you would like others to contribute to this project 115 + 116 + **NOTE:** Run `make help` for more information on all potential `make` targets 117 + 118 + More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) 119 + 120 + ## License 121 + 122 + Copyright 2025 Evan Jarrett. 123 + 124 + Licensed under the Apache License, Version 2.0 (the "License"); 125 + you may not use this file except in compliance with the License. 126 + You may obtain a copy of the License at 127 + 128 + http://www.apache.org/licenses/LICENSE-2.0 129 + 130 + Unless required by applicable law or agreed to in writing, software 131 + distributed under the License is distributed on an "AS IS" BASIS, 132 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 133 + See the License for the specific language governing permissions and 134 + limitations under the License. 135 +
+431
TANGLED.md
··· 1 + # Upstream Improvements for tangled.org/core 2 + 3 + This document tracks issues and proposed improvements for the upstream `tangled.org/core` repository that would benefit Loom and other spindle implementations. 4 + 5 + ## Critical Issues 6 + 7 + ### 1. Spindle Server Constructor Not Exported 8 + 9 + **Current State:** 10 + - `spindle.Run()` creates an unexported `Spindle` struct internally 11 + - Hardcodes `nixery.New()` engine initialization 12 + - Blocks on `http.ListenAndServe()` at the end 13 + - No way to inject custom engines or access spindle components 14 + 15 + **Proposed Solution:** 16 + ```go 17 + // spindle/server.go 18 + 19 + // Spindle should be exported 20 + type Spindle struct { 21 + jc *jetstream.JetstreamClient 22 + db *db.DB 23 + e *rbac.Enforcer 24 + l *slog.Logger 25 + n *notifier.Notifier 26 + engs map[string]models.Engine 27 + jq *queue.Queue 28 + cfg *config.Config 29 + ks *eventconsumer.Consumer 30 + res *idresolver.Resolver 31 + vault secrets.Manager 32 + } 33 + 34 + // New creates a spindle server with custom engines 35 + func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) { 36 + // All the initialization logic currently in Run() 37 + // But accept engines as parameter instead of hardcoding nixery 38 + 39 + return &Spindle{...}, nil 40 + } 41 + 42 + // Start starts the HTTP server (blocking) 43 + func (s *Spindle) Start(ctx context.Context) error { 44 + s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr) 45 + return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router()) 46 + } 47 + 48 + // Run is a convenience function for standard spindle operation 49 + func Run(ctx context.Context) error { 50 + cfg, err := config.Load(ctx) 51 + if err != nil { 52 + return err 53 + } 54 + 55 + // Create default engines (nixery) 56 + nixeryEng, err := nixery.New(ctx, cfg) 57 + if err != nil { 58 + return err 59 + } 60 + 61 + s, err := New(ctx, cfg, map[string]models.Engine{ 62 + "nixery": nixeryEng, 63 + }) 64 + if err != nil { 65 + return err 66 + } 67 + 68 + return s.Start(ctx) 69 + } 70 + ``` 71 + 72 + **Benefits:** 73 + - Custom engine implementations (KubernetesEngine, DockerEngine, etc.) 74 + - Embedding spindle in other applications 75 + - Testing with mock engines 76 + - Multiple engines in single spindle instance 77 + 78 + **Use Case in Loom:** 79 + ```go 80 + // Loom can now use spindle properly 81 + s, err := spindle.New(ctx, cfg, map[string]models.Engine{ 82 + "kubernetes": kubeEngine, 83 + }) 84 + 85 + // Start in goroutine 86 + go s.Start(ctx) 87 + 88 + // Access components for controller 89 + controller := &SpindleSetReconciler{ 90 + DB: s.DB(), 91 + Queue: s.Queue(), 92 + Engines: s.Engines(), 93 + } 94 + ``` 95 + 96 + --- 97 + 98 + ## Helpful Interface Additions 99 + 100 + ### 2. Engine Configuration Interface 101 + 102 + **Issue:** 103 + Engines have different configuration needs but no standard way to express requirements. 104 + 105 + **Proposed:** 106 + ```go 107 + // models/engine.go 108 + 109 + type EngineConfigurable interface { 110 + // ConfigSchema returns JSON schema for engine-specific config 111 + ConfigSchema() map[string]interface{} 112 + 113 + // ApplyConfig updates engine with workflow-specific config 114 + ApplyConfig(config map[string]interface{}) error 115 + } 116 + ``` 117 + 118 + **Benefit:** 119 + - Workflows can specify engine requirements 120 + - Dynamic engine selection based on config 121 + - Validation of engine compatibility 122 + 123 + --- 124 + 125 + ### 3. Repo Storage Backend Interface 126 + 127 + **Issue:** 128 + Database operations for repos are tightly coupled to SQLite implementation. 129 + 130 + **Proposed:** 131 + ```go 132 + // spindle/storage/storage.go 133 + 134 + type RepoStorage interface { 135 + AddRepo(knot, owner, name string) error 136 + GetRepo(knot, owner, name string) (*Repo, error) 137 + ListRepos() ([]*Repo, error) 138 + RemoveRepo(knot, owner, name string) error 139 + Knots() ([]string, error) 140 + } 141 + 142 + // Allow injection of custom storage 143 + func New(ctx context.Context, cfg *config.Config, storage RepoStorage, ...) (*Spindle, error) 144 + ``` 145 + 146 + **Benefit:** 147 + - Different storage backends (PostgreSQL, etcd, etc.) 148 + - Better testing with mock storage 149 + - Cloud-native deployments 150 + 151 + --- 152 + 153 + ### 4. Event Consumer Plugin Interface 154 + 155 + **Issue:** 156 + EventConsumer is hardcoded to consume from knots. Other event sources require forking. 157 + 158 + **Proposed:** 159 + ```go 160 + // eventconsumer/source.go 161 + 162 + type SourceProvider interface { 163 + // Discover returns available event sources 164 + Discover(ctx context.Context) ([]Source, error) 165 + 166 + // Watch notifies when new sources appear 167 + Watch(ctx context.Context) (<-chan Source, error) 168 + } 169 + 170 + // Allow registration of source providers 171 + func (c *Consumer) RegisterProvider(name string, provider SourceProvider) error 172 + ``` 173 + 174 + **Benefit:** 175 + - GitHub webhook sources 176 + - GitLab sources 177 + - Custom CI/CD integrations 178 + - Kubernetes events as sources 179 + 180 + --- 181 + 182 + ### 5. Status Reporter Interface 183 + 184 + **Issue:** 185 + Status is written to DB + notifier, but no abstraction for other status destinations. 186 + 187 + **Proposed:** 188 + ```go 189 + // spindle/status/status.go 190 + 191 + type StatusReporter interface { 192 + ReportPending(ctx context.Context, wid models.WorkflowId) error 193 + ReportRunning(ctx context.Context, wid models.WorkflowId) error 194 + ReportSuccess(ctx context.Context, wid models.WorkflowId) error 195 + ReportFailed(ctx context.Context, wid models.WorkflowId, err string, exitCode int64) error 196 + ReportTimeout(ctx context.Context, wid models.WorkflowId) error 197 + } 198 + 199 + // Allow multiple reporters 200 + func (s *Spindle) AddStatusReporter(reporter StatusReporter) 201 + 202 + // Default implementation uses DB + Notifier 203 + type DBStatusReporter struct { ... } 204 + ``` 205 + 206 + **Benefit:** 207 + - Send status to external systems (Prometheus, Datadog, etc.) 208 + - Custom status aggregation 209 + - Multi-destination status broadcasting 210 + 211 + --- 212 + 213 + ### 6. Secrets Backend Plugin Interface 214 + 215 + **Issue:** 216 + Only SQLite and OpenBao supported, no plugin system. 217 + 218 + **Proposed:** 219 + ```go 220 + // spindle/secrets/plugin.go 221 + 222 + type SecretProvider interface { 223 + Name() string 224 + // Existing Manager interface methods... 225 + } 226 + 227 + // Registry for secret providers 228 + var providers = make(map[string]func(*config.Config) (Manager, error)) 229 + 230 + func RegisterProvider(name string, factory func(*config.Config) (Manager, error)) { 231 + providers[name] = factory 232 + } 233 + 234 + // In config loading 235 + func CreateSecretsManager(cfg *config.Config) (Manager, error) { 236 + factory, ok := providers[cfg.Server.Secrets.Provider] 237 + if !ok { 238 + return nil, fmt.Errorf("unknown provider: %s", cfg.Server.Secrets.Provider) 239 + } 240 + return factory(cfg) 241 + } 242 + ``` 243 + 244 + **Benefit:** 245 + - AWS Secrets Manager 246 + - HashiCorp Vault 247 + - Kubernetes Secrets 248 + - Custom secret backends 249 + 250 + --- 251 + 252 + ### 7. Workflow Logger Interface 253 + 254 + **Issue:** 255 + WorkflowLogger writes to disk at hardcoded `/var/log/spindle/`. No abstraction. 256 + 257 + **Proposed:** 258 + ```go 259 + // models/logger.go 260 + 261 + type LogDestination interface { 262 + Write(workflowId WorkflowId, stepIdx int, line string) error 263 + Read(workflowId WorkflowId, stepIdx int) ([]string, error) 264 + Stream(workflowId WorkflowId) (<-chan LogLine, error) 265 + } 266 + 267 + type WorkflowLogger struct { 268 + destinations []LogDestination 269 + } 270 + 271 + // Built-in implementations 272 + type FileLogDestination struct { ... } 273 + type S3LogDestination struct { ... } 274 + type DatabaseLogDestination struct { ... } 275 + ``` 276 + 277 + **Benefit:** 278 + - Cloud storage for logs (S3, GCS, Azure Blob) 279 + - Database log storage 280 + - Log streaming services (Loki, ElasticSearch) 281 + - Multiple destinations 282 + 283 + --- 284 + 285 + ### 8. Pipeline Validation Interface 286 + 287 + **Issue:** 288 + No pre-execution validation of pipeline definitions. 289 + 290 + **Proposed:** 291 + ```go 292 + // models/validator.go 293 + 294 + type PipelineValidator interface { 295 + Validate(pipeline *tangled.Pipeline) error 296 + } 297 + 298 + type EngineValidator interface { 299 + ValidateWorkflow(workflow *Workflow) error 300 + } 301 + 302 + // Register validators 303 + func (s *Spindle) AddValidator(v PipelineValidator) 304 + ``` 305 + 306 + **Benefit:** 307 + - Catch errors before execution 308 + - Custom validation rules 309 + - Policy enforcement (no privileged containers, resource limits, etc.) 310 + 311 + --- 312 + 313 + ## Configuration Improvements 314 + 315 + ### 9. Config File Support 316 + 317 + **Current:** Only environment variables 318 + **Proposed:** Support YAML/JSON config files in addition to env vars 319 + 320 + ```go 321 + // Load from file or env 322 + func Load(ctx context.Context) (*Config, error) { 323 + if path := os.Getenv("SPINDLE_CONFIG_FILE"); path != "" { 324 + return LoadFromFile(path) 325 + } 326 + return LoadFromEnv(ctx) 327 + } 328 + ``` 329 + 330 + --- 331 + 332 + ### 10. Engine-Specific Config Sections 333 + 334 + **Current:** Only NixeryPipelines config 335 + **Proposed:** Generic engine config map 336 + 337 + ```go 338 + type Config struct { 339 + Server Server 340 + EngineConfig map[string]map[string]interface{} // engine name -> config 341 + } 342 + ``` 343 + 344 + Example: 345 + ```yaml 346 + spindle: 347 + server: 348 + hostname: spindle.example.com 349 + engines: 350 + nixery: 351 + nixery: nixery.tangled.sh 352 + workflow_timeout: 5m 353 + kubernetes: 354 + namespace: default 355 + default_service_account: spindle-runner 356 + ``` 357 + 358 + --- 359 + 360 + ## Testing Utilities 361 + 362 + ### 11. Mock Engine Implementation 363 + 364 + **Proposed:** Include a mock engine for testing 365 + 366 + ```go 367 + // testing/mockengine.go 368 + 369 + type MockEngine struct { 370 + InitFunc func(...) (*models.Workflow, error) 371 + SetupFunc func(...) error 372 + RunStepFunc func(...) error 373 + DestroyFunc func(...) error 374 + } 375 + 376 + func (m *MockEngine) InitWorkflow(...) (*models.Workflow, error) { 377 + if m.InitFunc != nil { 378 + return m.InitFunc(...) 379 + } 380 + return &models.Workflow{}, nil 381 + } 382 + // ... etc 383 + ``` 384 + 385 + --- 386 + 387 + ## Documentation Needs 388 + 389 + ### 12. Engine Implementation Guide 390 + 391 + Documentation for implementing custom engines: 392 + - Required interface methods 393 + - Lifecycle expectations 394 + - Error handling patterns 395 + - Testing strategies 396 + - Example implementations 397 + 398 + ### 13. Deployment Patterns 399 + 400 + Documentation for different deployment scenarios: 401 + - Single server 402 + - High availability 403 + - Kubernetes operator (Loom) 404 + - Docker Compose 405 + - Systemd service 406 + 407 + --- 408 + 409 + ## Priority for Loom 410 + 411 + **High Priority:** 412 + 1. ✅ **Spindle Server Constructor** - Blocks Loom implementation completely 413 + 2. Engine Configuration Interface - Needed for proper Kubernetes config 414 + 415 + **Medium Priority:** 416 + 3. Secrets Backend Plugin - For Kubernetes secrets integration 417 + 4. Workflow Logger Interface - For pod log streaming 418 + 5. Status Reporter Interface - For custom status handling 419 + 420 + **Low Priority:** 421 + 6. Other interfaces - Nice to have, workarounds exist 422 + 423 + --- 424 + 425 + ## Contributing Back 426 + 427 + Once Loom is working, we should: 428 + 1. Extract generic patterns from KubernetesEngine 429 + 2. Propose these interfaces upstream 430 + 3. Submit PRs for critical issues (#1) 431 + 4. Share deployment patterns and documentation
+36
api/v1alpha1/groupversion_info.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + // Package v1alpha1 contains API Schema definitions for the loom v1alpha1 API group. 18 + // +kubebuilder:object:generate=true 19 + // +groupName=loom.j5t.io 20 + package v1alpha1 21 + 22 + import ( 23 + "k8s.io/apimachinery/pkg/runtime/schema" 24 + "sigs.k8s.io/controller-runtime/pkg/scheme" 25 + ) 26 + 27 + var ( 28 + // GroupVersion is group version used to register these objects. 29 + GroupVersion = schema.GroupVersion{Group: "loom.j5t.io", Version: "v1alpha1"} 30 + 31 + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. 32 + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 + 34 + // AddToScheme adds the types in this group-version to the given scheme. 35 + AddToScheme = SchemeBuilder.AddToScheme 36 + )
+136
api/v1alpha1/spindleset_types.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package v1alpha1 18 + 19 + import ( 20 + corev1 "k8s.io/api/core/v1" 21 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 + ) 23 + 24 + // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 + // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 + 27 + // SpindleSetSpec defines the desired state of SpindleSet. 28 + type SpindleSetSpec struct { 29 + // KnotUrl is the URL of the tangled.org knot to connect to (e.g., https://tangled.org/@org/repo). 30 + // +kubebuilder:validation:Required 31 + // +kubebuilder:validation:Pattern=`^https?://.*` 32 + KnotUrl string `json:"knotUrl"` 33 + 34 + // KnotAuthSecret is the name of the Secret containing authentication credentials for the knot. 35 + // The secret should contain a key "token" with the auth token. 36 + // +kubebuilder:validation:Required 37 + KnotAuthSecret string `json:"knotAuthSecret"` 38 + 39 + // MaxConcurrentJobs is the maximum number of concurrent spindle jobs that can run. 40 + // Defaults to 10 if not specified. 41 + // +kubebuilder:default=10 42 + // +kubebuilder:validation:Minimum=1 43 + // +kubebuilder:validation:Maximum=100 44 + // +optional 45 + MaxConcurrentJobs int32 `json:"maxConcurrentJobs,omitempty"` 46 + 47 + // Template is the default pod template configuration for spindle jobs. 48 + // Individual workflows can override these settings. 49 + // +optional 50 + Template SpindleTemplate `json:"template,omitempty"` 51 + } 52 + 53 + // SpindleTemplate defines the pod template configuration for spindle jobs. 54 + type SpindleTemplate struct { 55 + // Resources defines the compute resource requirements for spindle jobs. 56 + // +optional 57 + Resources corev1.ResourceRequirements `json:"resources,omitempty"` 58 + 59 + // NodeSelector is a selector which must be true for the pod to fit on a node. 60 + // +optional 61 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` 62 + 63 + // Tolerations allows pods to schedule onto nodes with matching taints. 64 + // +optional 65 + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 66 + 67 + // Affinity defines scheduling constraints for spindle job pods. 68 + // +optional 69 + Affinity *corev1.Affinity `json:"affinity,omitempty"` 70 + } 71 + 72 + // SpindleSetStatus defines the observed state of SpindleSet. 73 + type SpindleSetStatus struct { 74 + // Conditions represent the latest available observations of the SpindleSet's state. 75 + // +optional 76 + // +patchMergeKey=type 77 + // +patchStrategy=merge 78 + // +listType=map 79 + // +listMapKey=type 80 + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` 81 + 82 + // PendingJobs is the number of spindle jobs currently pending. 83 + // +optional 84 + PendingJobs int32 `json:"pendingJobs,omitempty"` 85 + 86 + // RunningJobs is the number of spindle jobs currently running. 87 + // +optional 88 + RunningJobs int32 `json:"runningJobs,omitempty"` 89 + 90 + // CompletedJobs is the total number of spindle jobs that have completed successfully. 91 + // +optional 92 + CompletedJobs int32 `json:"completedJobs,omitempty"` 93 + 94 + // FailedJobs is the total number of spindle jobs that have failed. 95 + // +optional 96 + FailedJobs int32 `json:"failedJobs,omitempty"` 97 + 98 + // WebSocketConnected indicates whether the WebSocket connection to the knot is active. 99 + // +optional 100 + WebSocketConnected bool `json:"webSocketConnected,omitempty"` 101 + 102 + // LastEventTime is the timestamp of the last pipeline event received from the knot. 103 + // +optional 104 + LastEventTime *metav1.Time `json:"lastEventTime,omitempty"` 105 + } 106 + 107 + // +kubebuilder:object:root=true 108 + // +kubebuilder:subresource:status 109 + // +kubebuilder:printcolumn:name="Knot URL",type=string,JSONPath=`.spec.knotUrl` 110 + // +kubebuilder:printcolumn:name="Connected",type=boolean,JSONPath=`.status.webSocketConnected` 111 + // +kubebuilder:printcolumn:name="Running",type=integer,JSONPath=`.status.runningJobs` 112 + // +kubebuilder:printcolumn:name="Completed",type=integer,JSONPath=`.status.completedJobs` 113 + // +kubebuilder:printcolumn:name="Failed",type=integer,JSONPath=`.status.failedJobs` 114 + // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 115 + 116 + // SpindleSet is the Schema for the spindlesets API. 117 + type SpindleSet struct { 118 + metav1.TypeMeta `json:",inline"` 119 + metav1.ObjectMeta `json:"metadata,omitempty"` 120 + 121 + Spec SpindleSetSpec `json:"spec,omitempty"` 122 + Status SpindleSetStatus `json:"status,omitempty"` 123 + } 124 + 125 + // +kubebuilder:object:root=true 126 + 127 + // SpindleSetList contains a list of SpindleSet. 128 + type SpindleSetList struct { 129 + metav1.TypeMeta `json:",inline"` 130 + metav1.ListMeta `json:"metadata,omitempty"` 131 + Items []SpindleSet `json:"items"` 132 + } 133 + 134 + func init() { 135 + SchemeBuilder.Register(&SpindleSet{}, &SpindleSetList{}) 136 + }
+163
api/v1alpha1/zz_generated.deepcopy.go
··· 1 + //go:build !ignore_autogenerated 2 + 3 + /* 4 + Copyright 2025 Evan Jarrett. 5 + 6 + Licensed under the Apache License, Version 2.0 (the "License"); 7 + you may not use this file except in compliance with the License. 8 + You may obtain a copy of the License at 9 + 10 + http://www.apache.org/licenses/LICENSE-2.0 11 + 12 + Unless required by applicable law or agreed to in writing, software 13 + distributed under the License is distributed on an "AS IS" BASIS, 14 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 + See the License for the specific language governing permissions and 16 + limitations under the License. 17 + */ 18 + 19 + // Code generated by controller-gen. DO NOT EDIT. 20 + 21 + package v1alpha1 22 + 23 + import ( 24 + "k8s.io/api/core/v1" 25 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 + runtime "k8s.io/apimachinery/pkg/runtime" 27 + ) 28 + 29 + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 30 + func (in *SpindleSet) DeepCopyInto(out *SpindleSet) { 31 + *out = *in 32 + out.TypeMeta = in.TypeMeta 33 + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 34 + in.Spec.DeepCopyInto(&out.Spec) 35 + in.Status.DeepCopyInto(&out.Status) 36 + } 37 + 38 + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpindleSet. 39 + func (in *SpindleSet) DeepCopy() *SpindleSet { 40 + if in == nil { 41 + return nil 42 + } 43 + out := new(SpindleSet) 44 + in.DeepCopyInto(out) 45 + return out 46 + } 47 + 48 + // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 49 + func (in *SpindleSet) DeepCopyObject() runtime.Object { 50 + if c := in.DeepCopy(); c != nil { 51 + return c 52 + } 53 + return nil 54 + } 55 + 56 + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 57 + func (in *SpindleSetList) DeepCopyInto(out *SpindleSetList) { 58 + *out = *in 59 + out.TypeMeta = in.TypeMeta 60 + in.ListMeta.DeepCopyInto(&out.ListMeta) 61 + if in.Items != nil { 62 + in, out := &in.Items, &out.Items 63 + *out = make([]SpindleSet, len(*in)) 64 + for i := range *in { 65 + (*in)[i].DeepCopyInto(&(*out)[i]) 66 + } 67 + } 68 + } 69 + 70 + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpindleSetList. 71 + func (in *SpindleSetList) DeepCopy() *SpindleSetList { 72 + if in == nil { 73 + return nil 74 + } 75 + out := new(SpindleSetList) 76 + in.DeepCopyInto(out) 77 + return out 78 + } 79 + 80 + // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 81 + func (in *SpindleSetList) DeepCopyObject() runtime.Object { 82 + if c := in.DeepCopy(); c != nil { 83 + return c 84 + } 85 + return nil 86 + } 87 + 88 + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 89 + func (in *SpindleSetSpec) DeepCopyInto(out *SpindleSetSpec) { 90 + *out = *in 91 + in.Template.DeepCopyInto(&out.Template) 92 + } 93 + 94 + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpindleSetSpec. 95 + func (in *SpindleSetSpec) DeepCopy() *SpindleSetSpec { 96 + if in == nil { 97 + return nil 98 + } 99 + out := new(SpindleSetSpec) 100 + in.DeepCopyInto(out) 101 + return out 102 + } 103 + 104 + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 105 + func (in *SpindleSetStatus) DeepCopyInto(out *SpindleSetStatus) { 106 + *out = *in 107 + if in.Conditions != nil { 108 + in, out := &in.Conditions, &out.Conditions 109 + *out = make([]metav1.Condition, len(*in)) 110 + for i := range *in { 111 + (*in)[i].DeepCopyInto(&(*out)[i]) 112 + } 113 + } 114 + if in.LastEventTime != nil { 115 + in, out := &in.LastEventTime, &out.LastEventTime 116 + *out = (*in).DeepCopy() 117 + } 118 + } 119 + 120 + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpindleSetStatus. 121 + func (in *SpindleSetStatus) DeepCopy() *SpindleSetStatus { 122 + if in == nil { 123 + return nil 124 + } 125 + out := new(SpindleSetStatus) 126 + in.DeepCopyInto(out) 127 + return out 128 + } 129 + 130 + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 131 + func (in *SpindleTemplate) DeepCopyInto(out *SpindleTemplate) { 132 + *out = *in 133 + in.Resources.DeepCopyInto(&out.Resources) 134 + if in.NodeSelector != nil { 135 + in, out := &in.NodeSelector, &out.NodeSelector 136 + *out = make(map[string]string, len(*in)) 137 + for key, val := range *in { 138 + (*out)[key] = val 139 + } 140 + } 141 + if in.Tolerations != nil { 142 + in, out := &in.Tolerations, &out.Tolerations 143 + *out = make([]v1.Toleration, len(*in)) 144 + for i := range *in { 145 + (*in)[i].DeepCopyInto(&(*out)[i]) 146 + } 147 + } 148 + if in.Affinity != nil { 149 + in, out := &in.Affinity, &out.Affinity 150 + *out = new(v1.Affinity) 151 + (*in).DeepCopyInto(*out) 152 + } 153 + } 154 + 155 + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpindleTemplate. 156 + func (in *SpindleTemplate) DeepCopy() *SpindleTemplate { 157 + if in == nil { 158 + return nil 159 + } 160 + out := new(SpindleTemplate) 161 + in.DeepCopyInto(out) 162 + return out 163 + }
+302
cmd/main.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package main 18 + 19 + import ( 20 + "context" 21 + "crypto/tls" 22 + "flag" 23 + "fmt" 24 + "os" 25 + "path/filepath" 26 + 27 + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 28 + // to ensure that exec-entrypoint and run can make use of them. 29 + _ "k8s.io/client-go/plugin/pkg/client/auth" 30 + 31 + "k8s.io/apimachinery/pkg/runtime" 32 + utilruntime "k8s.io/apimachinery/pkg/util/runtime" 33 + clientgoscheme "k8s.io/client-go/kubernetes/scheme" 34 + ctrl "sigs.k8s.io/controller-runtime" 35 + "sigs.k8s.io/controller-runtime/pkg/certwatcher" 36 + "sigs.k8s.io/controller-runtime/pkg/healthz" 37 + "sigs.k8s.io/controller-runtime/pkg/log/zap" 38 + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" 39 + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" 40 + "sigs.k8s.io/controller-runtime/pkg/webhook" 41 + 42 + "tangled.org/core/spindle" 43 + "tangled.org/core/spindle/config" 44 + "tangled.org/core/spindle/models" 45 + 46 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 47 + "tangled.org/evan.jarrett.net/loom/internal/controller" 48 + "tangled.org/evan.jarrett.net/loom/internal/engine" 49 + // +kubebuilder:scaffold:imports 50 + ) 51 + 52 + var ( 53 + scheme = runtime.NewScheme() 54 + setupLog = ctrl.Log.WithName("setup") 55 + ) 56 + 57 + func init() { 58 + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 59 + 60 + utilruntime.Must(loomv1alpha1.AddToScheme(scheme)) 61 + // +kubebuilder:scaffold:scheme 62 + } 63 + 64 + // initializeSpindle creates a spindle server with KubernetesEngine 65 + func initializeSpindle(ctx context.Context, cfg *config.Config, mgr ctrl.Manager) (*spindle.Spindle, error) { 66 + // Initialize Kubernetes engine 67 + // Namespace and template will be configured by controller based on SpindleSet CRD 68 + kubeEngine := engine.NewKubernetesEngine(mgr.GetClient(), "default", loomv1alpha1.SpindleTemplate{}) 69 + 70 + // Create engines map with kubernetes engine 71 + engines := map[string]models.Engine{ 72 + "kubernetes": kubeEngine, 73 + } 74 + 75 + // Use spindle's New() function to create spindle server with our custom engine 76 + s, err := spindle.New(ctx, cfg, engines) 77 + if err != nil { 78 + return nil, fmt.Errorf("failed to create spindle: %w", err) 79 + } 80 + 81 + return s, nil 82 + } 83 + 84 + // nolint:gocyclo 85 + func main() { 86 + var metricsAddr string 87 + var metricsCertPath, metricsCertName, metricsCertKey string 88 + var webhookCertPath, webhookCertName, webhookCertKey string 89 + var enableLeaderElection bool 90 + var probeAddr string 91 + var secureMetrics bool 92 + var enableHTTP2 bool 93 + var tlsOpts []func(*tls.Config) 94 + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ 95 + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") 96 + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 97 + flag.BoolVar(&enableLeaderElection, "leader-elect", false, 98 + "Enable leader election for controller manager. "+ 99 + "Enabling this will ensure there is only one active controller manager.") 100 + flag.BoolVar(&secureMetrics, "metrics-secure", true, 101 + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") 102 + flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") 103 + flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") 104 + flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") 105 + flag.StringVar(&metricsCertPath, "metrics-cert-path", "", 106 + "The directory that contains the metrics server certificate.") 107 + flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") 108 + flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") 109 + flag.BoolVar(&enableHTTP2, "enable-http2", false, 110 + "If set, HTTP/2 will be enabled for the metrics and webhook servers") 111 + opts := zap.Options{ 112 + Development: true, 113 + } 114 + opts.BindFlags(flag.CommandLine) 115 + flag.Parse() 116 + 117 + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 118 + 119 + // if the enable-http2 flag is false (the default), http/2 should be disabled 120 + // due to its vulnerabilities. More specifically, disabling http/2 will 121 + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and 122 + // Rapid Reset CVEs. For more information see: 123 + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 124 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 125 + disableHTTP2 := func(c *tls.Config) { 126 + setupLog.Info("disabling http/2") 127 + c.NextProtos = []string{"http/1.1"} 128 + } 129 + 130 + if !enableHTTP2 { 131 + tlsOpts = append(tlsOpts, disableHTTP2) 132 + } 133 + 134 + // Create watchers for metrics and webhooks certificates 135 + var metricsCertWatcher, webhookCertWatcher *certwatcher.CertWatcher 136 + 137 + // Initial webhook TLS options 138 + webhookTLSOpts := tlsOpts 139 + 140 + if len(webhookCertPath) > 0 { 141 + setupLog.Info("Initializing webhook certificate watcher using provided certificates", 142 + "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) 143 + 144 + var err error 145 + webhookCertWatcher, err = certwatcher.New( 146 + filepath.Join(webhookCertPath, webhookCertName), 147 + filepath.Join(webhookCertPath, webhookCertKey), 148 + ) 149 + if err != nil { 150 + setupLog.Error(err, "Failed to initialize webhook certificate watcher") 151 + os.Exit(1) 152 + } 153 + 154 + webhookTLSOpts = append(webhookTLSOpts, func(config *tls.Config) { 155 + config.GetCertificate = webhookCertWatcher.GetCertificate 156 + }) 157 + } 158 + 159 + webhookServer := webhook.NewServer(webhook.Options{ 160 + TLSOpts: webhookTLSOpts, 161 + }) 162 + 163 + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. 164 + // More info: 165 + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server 166 + // - https://book.kubebuilder.io/reference/metrics.html 167 + metricsServerOptions := metricsserver.Options{ 168 + BindAddress: metricsAddr, 169 + SecureServing: secureMetrics, 170 + TLSOpts: tlsOpts, 171 + } 172 + 173 + if secureMetrics { 174 + // FilterProvider is used to protect the metrics endpoint with authn/authz. 175 + // These configurations ensure that only authorized users and service accounts 176 + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: 177 + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization 178 + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization 179 + } 180 + 181 + // If the certificate is not specified, controller-runtime will automatically 182 + // generate self-signed certificates for the metrics server. While convenient for development and testing, 183 + // this setup is not recommended for production. 184 + // 185 + // TODO(user): If you enable certManager, uncomment the following lines: 186 + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates 187 + // managed by cert-manager for the metrics server. 188 + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. 189 + if len(metricsCertPath) > 0 { 190 + setupLog.Info("Initializing metrics certificate watcher using provided certificates", 191 + "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) 192 + 193 + var err error 194 + metricsCertWatcher, err = certwatcher.New( 195 + filepath.Join(metricsCertPath, metricsCertName), 196 + filepath.Join(metricsCertPath, metricsCertKey), 197 + ) 198 + if err != nil { 199 + setupLog.Error(err, "to initialize metrics certificate watcher", "error", err) 200 + os.Exit(1) 201 + } 202 + 203 + metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { 204 + config.GetCertificate = metricsCertWatcher.GetCertificate 205 + }) 206 + } 207 + 208 + // Create context for spindle initialization 209 + ctx := context.Background() 210 + 211 + // Load spindle configuration from environment 212 + spindleCfg, err := config.Load(ctx) 213 + if err != nil { 214 + setupLog.Error(err, "failed to load spindle config") 215 + os.Exit(1) 216 + } 217 + 218 + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 219 + Scheme: scheme, 220 + Metrics: metricsServerOptions, 221 + WebhookServer: webhookServer, 222 + HealthProbeBindAddress: probeAddr, 223 + LeaderElection: enableLeaderElection, 224 + LeaderElectionID: "d9c2f6b8.j5t.io", 225 + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 226 + // when the Manager ends. This requires the binary to immediately end when the 227 + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 228 + // speeds up voluntary leader transitions as the new leader don't have to wait 229 + // LeaseDuration time first. 230 + // 231 + // In the default scaffold provided, the program ends immediately after 232 + // the manager stops, so would be fine to enable this option. However, 233 + // if you are doing or is intended to do any operation such as perform cleanups 234 + // after the manager stops then its usage might be unsafe. 235 + // LeaderElectionReleaseOnCancel: true, 236 + }) 237 + if err != nil { 238 + setupLog.Error(err, "unable to start manager") 239 + os.Exit(1) 240 + } 241 + 242 + // Initialize spindle server with KubernetesEngine 243 + s, err := initializeSpindle(ctx, spindleCfg, mgr) 244 + if err != nil { 245 + setupLog.Error(err, "failed to initialize spindle") 246 + os.Exit(1) 247 + } 248 + defer s.Queue().Stop() 249 + 250 + setupLog.Info("spindle server initialized successfully") 251 + 252 + // Start spindle HTTP server in background 253 + go func() { 254 + setupLog.Info("starting spindle HTTP server", "address", spindleCfg.Server.ListenAddr) 255 + if err := s.Start(ctx); err != nil { 256 + setupLog.Error(err, "spindle HTTP server error") 257 + } 258 + }() 259 + 260 + // Setup controller with spindle components 261 + if err := (&controller.SpindleSetReconciler{ 262 + Client: mgr.GetClient(), 263 + Scheme: mgr.GetScheme(), 264 + Config: mgr.GetConfig(), 265 + Spindle: s, 266 + }).SetupWithManager(mgr); err != nil { 267 + setupLog.Error(err, "unable to create controller", "controller", "SpindleSet") 268 + os.Exit(1) 269 + } 270 + // +kubebuilder:scaffold:builder 271 + 272 + if metricsCertWatcher != nil { 273 + setupLog.Info("Adding metrics certificate watcher to manager") 274 + if err := mgr.Add(metricsCertWatcher); err != nil { 275 + setupLog.Error(err, "unable to add metrics certificate watcher to manager") 276 + os.Exit(1) 277 + } 278 + } 279 + 280 + if webhookCertWatcher != nil { 281 + setupLog.Info("Adding webhook certificate watcher to manager") 282 + if err := mgr.Add(webhookCertWatcher); err != nil { 283 + setupLog.Error(err, "unable to add webhook certificate watcher to manager") 284 + os.Exit(1) 285 + } 286 + } 287 + 288 + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 289 + setupLog.Error(err, "unable to set up health check") 290 + os.Exit(1) 291 + } 292 + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 293 + setupLog.Error(err, "unable to set up ready check") 294 + os.Exit(1) 295 + } 296 + 297 + setupLog.Info("starting manager") 298 + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 299 + setupLog.Error(err, "problem running manager") 300 + os.Exit(1) 301 + } 302 + }
+1212
config/crd/bases/loom.j5t.io_spindlesets.yaml
··· 1 + --- 2 + apiVersion: apiextensions.k8s.io/v1 3 + kind: CustomResourceDefinition 4 + metadata: 5 + annotations: 6 + controller-gen.kubebuilder.io/version: v0.18.0 7 + name: spindlesets.loom.j5t.io 8 + spec: 9 + group: loom.j5t.io 10 + names: 11 + kind: SpindleSet 12 + listKind: SpindleSetList 13 + plural: spindlesets 14 + singular: spindleset 15 + scope: Namespaced 16 + versions: 17 + - additionalPrinterColumns: 18 + - jsonPath: .spec.knotUrl 19 + name: Knot URL 20 + type: string 21 + - jsonPath: .status.webSocketConnected 22 + name: Connected 23 + type: boolean 24 + - jsonPath: .status.runningJobs 25 + name: Running 26 + type: integer 27 + - jsonPath: .status.completedJobs 28 + name: Completed 29 + type: integer 30 + - jsonPath: .status.failedJobs 31 + name: Failed 32 + type: integer 33 + - jsonPath: .metadata.creationTimestamp 34 + name: Age 35 + type: date 36 + name: v1alpha1 37 + schema: 38 + openAPIV3Schema: 39 + description: SpindleSet is the Schema for the spindlesets API. 40 + properties: 41 + apiVersion: 42 + description: |- 43 + APIVersion defines the versioned schema of this representation of an object. 44 + Servers should convert recognized schemas to the latest internal value, and 45 + may reject unrecognized values. 46 + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 47 + type: string 48 + kind: 49 + description: |- 50 + Kind is a string value representing the REST resource this object represents. 51 + Servers may infer this from the endpoint the client submits requests to. 52 + Cannot be updated. 53 + In CamelCase. 54 + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 55 + type: string 56 + metadata: 57 + type: object 58 + spec: 59 + description: SpindleSetSpec defines the desired state of SpindleSet. 60 + properties: 61 + knotAuthSecret: 62 + description: |- 63 + KnotAuthSecret is the name of the Secret containing authentication credentials for the knot. 64 + The secret should contain a key "token" with the auth token. 65 + type: string 66 + knotUrl: 67 + description: KnotUrl is the URL of the tangled.org knot to connect 68 + to (e.g., https://tangled.org/@org/repo). 69 + pattern: ^https?://.* 70 + type: string 71 + maxConcurrentJobs: 72 + default: 10 73 + description: |- 74 + MaxConcurrentJobs is the maximum number of concurrent spindle jobs that can run. 75 + Defaults to 10 if not specified. 76 + format: int32 77 + maximum: 100 78 + minimum: 1 79 + type: integer 80 + template: 81 + description: |- 82 + Template is the default pod template configuration for spindle jobs. 83 + Individual workflows can override these settings. 84 + properties: 85 + affinity: 86 + description: Affinity defines scheduling constraints for spindle 87 + job pods. 88 + properties: 89 + nodeAffinity: 90 + description: Describes node affinity scheduling rules for 91 + the pod. 92 + properties: 93 + preferredDuringSchedulingIgnoredDuringExecution: 94 + description: |- 95 + The scheduler will prefer to schedule pods to nodes that satisfy 96 + the affinity expressions specified by this field, but it may choose 97 + a node that violates one or more of the expressions. The node that is 98 + most preferred is the one with the greatest sum of weights, i.e. 99 + for each node that meets all of the scheduling requirements (resource 100 + request, requiredDuringScheduling affinity expressions, etc.), 101 + compute a sum by iterating through the elements of this field and adding 102 + "weight" to the sum if the node matches the corresponding matchExpressions; the 103 + node(s) with the highest sum are the most preferred. 104 + items: 105 + description: |- 106 + An empty preferred scheduling term matches all objects with implicit weight 0 107 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). 108 + properties: 109 + preference: 110 + description: A node selector term, associated with 111 + the corresponding weight. 112 + properties: 113 + matchExpressions: 114 + description: A list of node selector requirements 115 + by node's labels. 116 + items: 117 + description: |- 118 + A node selector requirement is a selector that contains values, a key, and an operator 119 + that relates the key and values. 120 + properties: 121 + key: 122 + description: The label key that the selector 123 + applies to. 124 + type: string 125 + operator: 126 + description: |- 127 + Represents a key's relationship to a set of values. 128 + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. 129 + type: string 130 + values: 131 + description: |- 132 + An array of string values. If the operator is In or NotIn, 133 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 134 + the values array must be empty. If the operator is Gt or Lt, the values 135 + array must have a single element, which will be interpreted as an integer. 136 + This array is replaced during a strategic merge patch. 137 + items: 138 + type: string 139 + type: array 140 + x-kubernetes-list-type: atomic 141 + required: 142 + - key 143 + - operator 144 + type: object 145 + type: array 146 + x-kubernetes-list-type: atomic 147 + matchFields: 148 + description: A list of node selector requirements 149 + by node's fields. 150 + items: 151 + description: |- 152 + A node selector requirement is a selector that contains values, a key, and an operator 153 + that relates the key and values. 154 + properties: 155 + key: 156 + description: The label key that the selector 157 + applies to. 158 + type: string 159 + operator: 160 + description: |- 161 + Represents a key's relationship to a set of values. 162 + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. 163 + type: string 164 + values: 165 + description: |- 166 + An array of string values. If the operator is In or NotIn, 167 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 168 + the values array must be empty. If the operator is Gt or Lt, the values 169 + array must have a single element, which will be interpreted as an integer. 170 + This array is replaced during a strategic merge patch. 171 + items: 172 + type: string 173 + type: array 174 + x-kubernetes-list-type: atomic 175 + required: 176 + - key 177 + - operator 178 + type: object 179 + type: array 180 + x-kubernetes-list-type: atomic 181 + type: object 182 + x-kubernetes-map-type: atomic 183 + weight: 184 + description: Weight associated with matching the 185 + corresponding nodeSelectorTerm, in the range 1-100. 186 + format: int32 187 + type: integer 188 + required: 189 + - preference 190 + - weight 191 + type: object 192 + type: array 193 + x-kubernetes-list-type: atomic 194 + requiredDuringSchedulingIgnoredDuringExecution: 195 + description: |- 196 + If the affinity requirements specified by this field are not met at 197 + scheduling time, the pod will not be scheduled onto the node. 198 + If the affinity requirements specified by this field cease to be met 199 + at some point during pod execution (e.g. due to an update), the system 200 + may or may not try to eventually evict the pod from its node. 201 + properties: 202 + nodeSelectorTerms: 203 + description: Required. A list of node selector terms. 204 + The terms are ORed. 205 + items: 206 + description: |- 207 + A null or empty node selector term matches no objects. The requirements of 208 + them are ANDed. 209 + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. 210 + properties: 211 + matchExpressions: 212 + description: A list of node selector requirements 213 + by node's labels. 214 + items: 215 + description: |- 216 + A node selector requirement is a selector that contains values, a key, and an operator 217 + that relates the key and values. 218 + properties: 219 + key: 220 + description: The label key that the selector 221 + applies to. 222 + type: string 223 + operator: 224 + description: |- 225 + Represents a key's relationship to a set of values. 226 + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. 227 + type: string 228 + values: 229 + description: |- 230 + An array of string values. If the operator is In or NotIn, 231 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 232 + the values array must be empty. If the operator is Gt or Lt, the values 233 + array must have a single element, which will be interpreted as an integer. 234 + This array is replaced during a strategic merge patch. 235 + items: 236 + type: string 237 + type: array 238 + x-kubernetes-list-type: atomic 239 + required: 240 + - key 241 + - operator 242 + type: object 243 + type: array 244 + x-kubernetes-list-type: atomic 245 + matchFields: 246 + description: A list of node selector requirements 247 + by node's fields. 248 + items: 249 + description: |- 250 + A node selector requirement is a selector that contains values, a key, and an operator 251 + that relates the key and values. 252 + properties: 253 + key: 254 + description: The label key that the selector 255 + applies to. 256 + type: string 257 + operator: 258 + description: |- 259 + Represents a key's relationship to a set of values. 260 + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. 261 + type: string 262 + values: 263 + description: |- 264 + An array of string values. If the operator is In or NotIn, 265 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 266 + the values array must be empty. If the operator is Gt or Lt, the values 267 + array must have a single element, which will be interpreted as an integer. 268 + This array is replaced during a strategic merge patch. 269 + items: 270 + type: string 271 + type: array 272 + x-kubernetes-list-type: atomic 273 + required: 274 + - key 275 + - operator 276 + type: object 277 + type: array 278 + x-kubernetes-list-type: atomic 279 + type: object 280 + x-kubernetes-map-type: atomic 281 + type: array 282 + x-kubernetes-list-type: atomic 283 + required: 284 + - nodeSelectorTerms 285 + type: object 286 + x-kubernetes-map-type: atomic 287 + type: object 288 + podAffinity: 289 + description: Describes pod affinity scheduling rules (e.g. 290 + co-locate this pod in the same node, zone, etc. as some 291 + other pod(s)). 292 + properties: 293 + preferredDuringSchedulingIgnoredDuringExecution: 294 + description: |- 295 + The scheduler will prefer to schedule pods to nodes that satisfy 296 + the affinity expressions specified by this field, but it may choose 297 + a node that violates one or more of the expressions. The node that is 298 + most preferred is the one with the greatest sum of weights, i.e. 299 + for each node that meets all of the scheduling requirements (resource 300 + request, requiredDuringScheduling affinity expressions, etc.), 301 + compute a sum by iterating through the elements of this field and adding 302 + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the 303 + node(s) with the highest sum are the most preferred. 304 + items: 305 + description: The weights of all of the matched WeightedPodAffinityTerm 306 + fields are added per-node to find the most preferred 307 + node(s) 308 + properties: 309 + podAffinityTerm: 310 + description: Required. A pod affinity term, associated 311 + with the corresponding weight. 312 + properties: 313 + labelSelector: 314 + description: |- 315 + A label query over a set of resources, in this case pods. 316 + If it's null, this PodAffinityTerm matches with no Pods. 317 + properties: 318 + matchExpressions: 319 + description: matchExpressions is a list 320 + of label selector requirements. The requirements 321 + are ANDed. 322 + items: 323 + description: |- 324 + A label selector requirement is a selector that contains values, a key, and an operator that 325 + relates the key and values. 326 + properties: 327 + key: 328 + description: key is the label key 329 + that the selector applies to. 330 + type: string 331 + operator: 332 + description: |- 333 + operator represents a key's relationship to a set of values. 334 + Valid operators are In, NotIn, Exists and DoesNotExist. 335 + type: string 336 + values: 337 + description: |- 338 + values is an array of string values. If the operator is In or NotIn, 339 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 340 + the values array must be empty. This array is replaced during a strategic 341 + merge patch. 342 + items: 343 + type: string 344 + type: array 345 + x-kubernetes-list-type: atomic 346 + required: 347 + - key 348 + - operator 349 + type: object 350 + type: array 351 + x-kubernetes-list-type: atomic 352 + matchLabels: 353 + additionalProperties: 354 + type: string 355 + description: |- 356 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 357 + map is equivalent to an element of matchExpressions, whose key field is "key", the 358 + operator is "In", and the values array contains only "value". The requirements are ANDed. 359 + type: object 360 + type: object 361 + x-kubernetes-map-type: atomic 362 + matchLabelKeys: 363 + description: |- 364 + MatchLabelKeys is a set of pod label keys to select which pods will 365 + be taken into consideration. The keys are used to lookup values from the 366 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` 367 + to select the group of existing pods which pods will be taken into consideration 368 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 369 + pod labels will be ignored. The default value is empty. 370 + The same key is forbidden to exist in both matchLabelKeys and labelSelector. 371 + Also, matchLabelKeys cannot be set when labelSelector isn't set. 372 + items: 373 + type: string 374 + type: array 375 + x-kubernetes-list-type: atomic 376 + mismatchLabelKeys: 377 + description: |- 378 + MismatchLabelKeys is a set of pod label keys to select which pods will 379 + be taken into consideration. The keys are used to lookup values from the 380 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` 381 + to select the group of existing pods which pods will be taken into consideration 382 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 383 + pod labels will be ignored. The default value is empty. 384 + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. 385 + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. 386 + items: 387 + type: string 388 + type: array 389 + x-kubernetes-list-type: atomic 390 + namespaceSelector: 391 + description: |- 392 + A label query over the set of namespaces that the term applies to. 393 + The term is applied to the union of the namespaces selected by this field 394 + and the ones listed in the namespaces field. 395 + null selector and null or empty namespaces list means "this pod's namespace". 396 + An empty selector ({}) matches all namespaces. 397 + properties: 398 + matchExpressions: 399 + description: matchExpressions is a list 400 + of label selector requirements. The requirements 401 + are ANDed. 402 + items: 403 + description: |- 404 + A label selector requirement is a selector that contains values, a key, and an operator that 405 + relates the key and values. 406 + properties: 407 + key: 408 + description: key is the label key 409 + that the selector applies to. 410 + type: string 411 + operator: 412 + description: |- 413 + operator represents a key's relationship to a set of values. 414 + Valid operators are In, NotIn, Exists and DoesNotExist. 415 + type: string 416 + values: 417 + description: |- 418 + values is an array of string values. If the operator is In or NotIn, 419 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 420 + the values array must be empty. This array is replaced during a strategic 421 + merge patch. 422 + items: 423 + type: string 424 + type: array 425 + x-kubernetes-list-type: atomic 426 + required: 427 + - key 428 + - operator 429 + type: object 430 + type: array 431 + x-kubernetes-list-type: atomic 432 + matchLabels: 433 + additionalProperties: 434 + type: string 435 + description: |- 436 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 437 + map is equivalent to an element of matchExpressions, whose key field is "key", the 438 + operator is "In", and the values array contains only "value". The requirements are ANDed. 439 + type: object 440 + type: object 441 + x-kubernetes-map-type: atomic 442 + namespaces: 443 + description: |- 444 + namespaces specifies a static list of namespace names that the term applies to. 445 + The term is applied to the union of the namespaces listed in this field 446 + and the ones selected by namespaceSelector. 447 + null or empty namespaces list and null namespaceSelector means "this pod's namespace". 448 + items: 449 + type: string 450 + type: array 451 + x-kubernetes-list-type: atomic 452 + topologyKey: 453 + description: |- 454 + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching 455 + the labelSelector in the specified namespaces, where co-located is defined as running on a node 456 + whose value of the label with key topologyKey matches that of any node on which any of the 457 + selected pods is running. 458 + Empty topologyKey is not allowed. 459 + type: string 460 + required: 461 + - topologyKey 462 + type: object 463 + weight: 464 + description: |- 465 + weight associated with matching the corresponding podAffinityTerm, 466 + in the range 1-100. 467 + format: int32 468 + type: integer 469 + required: 470 + - podAffinityTerm 471 + - weight 472 + type: object 473 + type: array 474 + x-kubernetes-list-type: atomic 475 + requiredDuringSchedulingIgnoredDuringExecution: 476 + description: |- 477 + If the affinity requirements specified by this field are not met at 478 + scheduling time, the pod will not be scheduled onto the node. 479 + If the affinity requirements specified by this field cease to be met 480 + at some point during pod execution (e.g. due to a pod label update), the 481 + system may or may not try to eventually evict the pod from its node. 482 + When there are multiple elements, the lists of nodes corresponding to each 483 + podAffinityTerm are intersected, i.e. all terms must be satisfied. 484 + items: 485 + description: |- 486 + Defines a set of pods (namely those matching the labelSelector 487 + relative to the given namespace(s)) that this pod should be 488 + co-located (affinity) or not co-located (anti-affinity) with, 489 + where co-located is defined as running on a node whose value of 490 + the label with key <topologyKey> matches that of any node on which 491 + a pod of the set of pods is running 492 + properties: 493 + labelSelector: 494 + description: |- 495 + A label query over a set of resources, in this case pods. 496 + If it's null, this PodAffinityTerm matches with no Pods. 497 + properties: 498 + matchExpressions: 499 + description: matchExpressions is a list of label 500 + selector requirements. The requirements are 501 + ANDed. 502 + items: 503 + description: |- 504 + A label selector requirement is a selector that contains values, a key, and an operator that 505 + relates the key and values. 506 + properties: 507 + key: 508 + description: key is the label key that 509 + the selector applies to. 510 + type: string 511 + operator: 512 + description: |- 513 + operator represents a key's relationship to a set of values. 514 + Valid operators are In, NotIn, Exists and DoesNotExist. 515 + type: string 516 + values: 517 + description: |- 518 + values is an array of string values. If the operator is In or NotIn, 519 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 520 + the values array must be empty. This array is replaced during a strategic 521 + merge patch. 522 + items: 523 + type: string 524 + type: array 525 + x-kubernetes-list-type: atomic 526 + required: 527 + - key 528 + - operator 529 + type: object 530 + type: array 531 + x-kubernetes-list-type: atomic 532 + matchLabels: 533 + additionalProperties: 534 + type: string 535 + description: |- 536 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 537 + map is equivalent to an element of matchExpressions, whose key field is "key", the 538 + operator is "In", and the values array contains only "value". The requirements are ANDed. 539 + type: object 540 + type: object 541 + x-kubernetes-map-type: atomic 542 + matchLabelKeys: 543 + description: |- 544 + MatchLabelKeys is a set of pod label keys to select which pods will 545 + be taken into consideration. The keys are used to lookup values from the 546 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` 547 + to select the group of existing pods which pods will be taken into consideration 548 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 549 + pod labels will be ignored. The default value is empty. 550 + The same key is forbidden to exist in both matchLabelKeys and labelSelector. 551 + Also, matchLabelKeys cannot be set when labelSelector isn't set. 552 + items: 553 + type: string 554 + type: array 555 + x-kubernetes-list-type: atomic 556 + mismatchLabelKeys: 557 + description: |- 558 + MismatchLabelKeys is a set of pod label keys to select which pods will 559 + be taken into consideration. The keys are used to lookup values from the 560 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` 561 + to select the group of existing pods which pods will be taken into consideration 562 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 563 + pod labels will be ignored. The default value is empty. 564 + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. 565 + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. 566 + items: 567 + type: string 568 + type: array 569 + x-kubernetes-list-type: atomic 570 + namespaceSelector: 571 + description: |- 572 + A label query over the set of namespaces that the term applies to. 573 + The term is applied to the union of the namespaces selected by this field 574 + and the ones listed in the namespaces field. 575 + null selector and null or empty namespaces list means "this pod's namespace". 576 + An empty selector ({}) matches all namespaces. 577 + properties: 578 + matchExpressions: 579 + description: matchExpressions is a list of label 580 + selector requirements. The requirements are 581 + ANDed. 582 + items: 583 + description: |- 584 + A label selector requirement is a selector that contains values, a key, and an operator that 585 + relates the key and values. 586 + properties: 587 + key: 588 + description: key is the label key that 589 + the selector applies to. 590 + type: string 591 + operator: 592 + description: |- 593 + operator represents a key's relationship to a set of values. 594 + Valid operators are In, NotIn, Exists and DoesNotExist. 595 + type: string 596 + values: 597 + description: |- 598 + values is an array of string values. If the operator is In or NotIn, 599 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 600 + the values array must be empty. This array is replaced during a strategic 601 + merge patch. 602 + items: 603 + type: string 604 + type: array 605 + x-kubernetes-list-type: atomic 606 + required: 607 + - key 608 + - operator 609 + type: object 610 + type: array 611 + x-kubernetes-list-type: atomic 612 + matchLabels: 613 + additionalProperties: 614 + type: string 615 + description: |- 616 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 617 + map is equivalent to an element of matchExpressions, whose key field is "key", the 618 + operator is "In", and the values array contains only "value". The requirements are ANDed. 619 + type: object 620 + type: object 621 + x-kubernetes-map-type: atomic 622 + namespaces: 623 + description: |- 624 + namespaces specifies a static list of namespace names that the term applies to. 625 + The term is applied to the union of the namespaces listed in this field 626 + and the ones selected by namespaceSelector. 627 + null or empty namespaces list and null namespaceSelector means "this pod's namespace". 628 + items: 629 + type: string 630 + type: array 631 + x-kubernetes-list-type: atomic 632 + topologyKey: 633 + description: |- 634 + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching 635 + the labelSelector in the specified namespaces, where co-located is defined as running on a node 636 + whose value of the label with key topologyKey matches that of any node on which any of the 637 + selected pods is running. 638 + Empty topologyKey is not allowed. 639 + type: string 640 + required: 641 + - topologyKey 642 + type: object 643 + type: array 644 + x-kubernetes-list-type: atomic 645 + type: object 646 + podAntiAffinity: 647 + description: Describes pod anti-affinity scheduling rules 648 + (e.g. avoid putting this pod in the same node, zone, etc. 649 + as some other pod(s)). 650 + properties: 651 + preferredDuringSchedulingIgnoredDuringExecution: 652 + description: |- 653 + The scheduler will prefer to schedule pods to nodes that satisfy 654 + the anti-affinity expressions specified by this field, but it may choose 655 + a node that violates one or more of the expressions. The node that is 656 + most preferred is the one with the greatest sum of weights, i.e. 657 + for each node that meets all of the scheduling requirements (resource 658 + request, requiredDuringScheduling anti-affinity expressions, etc.), 659 + compute a sum by iterating through the elements of this field and adding 660 + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the 661 + node(s) with the highest sum are the most preferred. 662 + items: 663 + description: The weights of all of the matched WeightedPodAffinityTerm 664 + fields are added per-node to find the most preferred 665 + node(s) 666 + properties: 667 + podAffinityTerm: 668 + description: Required. A pod affinity term, associated 669 + with the corresponding weight. 670 + properties: 671 + labelSelector: 672 + description: |- 673 + A label query over a set of resources, in this case pods. 674 + If it's null, this PodAffinityTerm matches with no Pods. 675 + properties: 676 + matchExpressions: 677 + description: matchExpressions is a list 678 + of label selector requirements. The requirements 679 + are ANDed. 680 + items: 681 + description: |- 682 + A label selector requirement is a selector that contains values, a key, and an operator that 683 + relates the key and values. 684 + properties: 685 + key: 686 + description: key is the label key 687 + that the selector applies to. 688 + type: string 689 + operator: 690 + description: |- 691 + operator represents a key's relationship to a set of values. 692 + Valid operators are In, NotIn, Exists and DoesNotExist. 693 + type: string 694 + values: 695 + description: |- 696 + values is an array of string values. If the operator is In or NotIn, 697 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 698 + the values array must be empty. This array is replaced during a strategic 699 + merge patch. 700 + items: 701 + type: string 702 + type: array 703 + x-kubernetes-list-type: atomic 704 + required: 705 + - key 706 + - operator 707 + type: object 708 + type: array 709 + x-kubernetes-list-type: atomic 710 + matchLabels: 711 + additionalProperties: 712 + type: string 713 + description: |- 714 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 715 + map is equivalent to an element of matchExpressions, whose key field is "key", the 716 + operator is "In", and the values array contains only "value". The requirements are ANDed. 717 + type: object 718 + type: object 719 + x-kubernetes-map-type: atomic 720 + matchLabelKeys: 721 + description: |- 722 + MatchLabelKeys is a set of pod label keys to select which pods will 723 + be taken into consideration. The keys are used to lookup values from the 724 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` 725 + to select the group of existing pods which pods will be taken into consideration 726 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 727 + pod labels will be ignored. The default value is empty. 728 + The same key is forbidden to exist in both matchLabelKeys and labelSelector. 729 + Also, matchLabelKeys cannot be set when labelSelector isn't set. 730 + items: 731 + type: string 732 + type: array 733 + x-kubernetes-list-type: atomic 734 + mismatchLabelKeys: 735 + description: |- 736 + MismatchLabelKeys is a set of pod label keys to select which pods will 737 + be taken into consideration. The keys are used to lookup values from the 738 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` 739 + to select the group of existing pods which pods will be taken into consideration 740 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 741 + pod labels will be ignored. The default value is empty. 742 + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. 743 + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. 744 + items: 745 + type: string 746 + type: array 747 + x-kubernetes-list-type: atomic 748 + namespaceSelector: 749 + description: |- 750 + A label query over the set of namespaces that the term applies to. 751 + The term is applied to the union of the namespaces selected by this field 752 + and the ones listed in the namespaces field. 753 + null selector and null or empty namespaces list means "this pod's namespace". 754 + An empty selector ({}) matches all namespaces. 755 + properties: 756 + matchExpressions: 757 + description: matchExpressions is a list 758 + of label selector requirements. The requirements 759 + are ANDed. 760 + items: 761 + description: |- 762 + A label selector requirement is a selector that contains values, a key, and an operator that 763 + relates the key and values. 764 + properties: 765 + key: 766 + description: key is the label key 767 + that the selector applies to. 768 + type: string 769 + operator: 770 + description: |- 771 + operator represents a key's relationship to a set of values. 772 + Valid operators are In, NotIn, Exists and DoesNotExist. 773 + type: string 774 + values: 775 + description: |- 776 + values is an array of string values. If the operator is In or NotIn, 777 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 778 + the values array must be empty. This array is replaced during a strategic 779 + merge patch. 780 + items: 781 + type: string 782 + type: array 783 + x-kubernetes-list-type: atomic 784 + required: 785 + - key 786 + - operator 787 + type: object 788 + type: array 789 + x-kubernetes-list-type: atomic 790 + matchLabels: 791 + additionalProperties: 792 + type: string 793 + description: |- 794 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 795 + map is equivalent to an element of matchExpressions, whose key field is "key", the 796 + operator is "In", and the values array contains only "value". The requirements are ANDed. 797 + type: object 798 + type: object 799 + x-kubernetes-map-type: atomic 800 + namespaces: 801 + description: |- 802 + namespaces specifies a static list of namespace names that the term applies to. 803 + The term is applied to the union of the namespaces listed in this field 804 + and the ones selected by namespaceSelector. 805 + null or empty namespaces list and null namespaceSelector means "this pod's namespace". 806 + items: 807 + type: string 808 + type: array 809 + x-kubernetes-list-type: atomic 810 + topologyKey: 811 + description: |- 812 + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching 813 + the labelSelector in the specified namespaces, where co-located is defined as running on a node 814 + whose value of the label with key topologyKey matches that of any node on which any of the 815 + selected pods is running. 816 + Empty topologyKey is not allowed. 817 + type: string 818 + required: 819 + - topologyKey 820 + type: object 821 + weight: 822 + description: |- 823 + weight associated with matching the corresponding podAffinityTerm, 824 + in the range 1-100. 825 + format: int32 826 + type: integer 827 + required: 828 + - podAffinityTerm 829 + - weight 830 + type: object 831 + type: array 832 + x-kubernetes-list-type: atomic 833 + requiredDuringSchedulingIgnoredDuringExecution: 834 + description: |- 835 + If the anti-affinity requirements specified by this field are not met at 836 + scheduling time, the pod will not be scheduled onto the node. 837 + If the anti-affinity requirements specified by this field cease to be met 838 + at some point during pod execution (e.g. due to a pod label update), the 839 + system may or may not try to eventually evict the pod from its node. 840 + When there are multiple elements, the lists of nodes corresponding to each 841 + podAffinityTerm are intersected, i.e. all terms must be satisfied. 842 + items: 843 + description: |- 844 + Defines a set of pods (namely those matching the labelSelector 845 + relative to the given namespace(s)) that this pod should be 846 + co-located (affinity) or not co-located (anti-affinity) with, 847 + where co-located is defined as running on a node whose value of 848 + the label with key <topologyKey> matches that of any node on which 849 + a pod of the set of pods is running 850 + properties: 851 + labelSelector: 852 + description: |- 853 + A label query over a set of resources, in this case pods. 854 + If it's null, this PodAffinityTerm matches with no Pods. 855 + properties: 856 + matchExpressions: 857 + description: matchExpressions is a list of label 858 + selector requirements. The requirements are 859 + ANDed. 860 + items: 861 + description: |- 862 + A label selector requirement is a selector that contains values, a key, and an operator that 863 + relates the key and values. 864 + properties: 865 + key: 866 + description: key is the label key that 867 + the selector applies to. 868 + type: string 869 + operator: 870 + description: |- 871 + operator represents a key's relationship to a set of values. 872 + Valid operators are In, NotIn, Exists and DoesNotExist. 873 + type: string 874 + values: 875 + description: |- 876 + values is an array of string values. If the operator is In or NotIn, 877 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 878 + the values array must be empty. This array is replaced during a strategic 879 + merge patch. 880 + items: 881 + type: string 882 + type: array 883 + x-kubernetes-list-type: atomic 884 + required: 885 + - key 886 + - operator 887 + type: object 888 + type: array 889 + x-kubernetes-list-type: atomic 890 + matchLabels: 891 + additionalProperties: 892 + type: string 893 + description: |- 894 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 895 + map is equivalent to an element of matchExpressions, whose key field is "key", the 896 + operator is "In", and the values array contains only "value". The requirements are ANDed. 897 + type: object 898 + type: object 899 + x-kubernetes-map-type: atomic 900 + matchLabelKeys: 901 + description: |- 902 + MatchLabelKeys is a set of pod label keys to select which pods will 903 + be taken into consideration. The keys are used to lookup values from the 904 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` 905 + to select the group of existing pods which pods will be taken into consideration 906 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 907 + pod labels will be ignored. The default value is empty. 908 + The same key is forbidden to exist in both matchLabelKeys and labelSelector. 909 + Also, matchLabelKeys cannot be set when labelSelector isn't set. 910 + items: 911 + type: string 912 + type: array 913 + x-kubernetes-list-type: atomic 914 + mismatchLabelKeys: 915 + description: |- 916 + MismatchLabelKeys is a set of pod label keys to select which pods will 917 + be taken into consideration. The keys are used to lookup values from the 918 + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` 919 + to select the group of existing pods which pods will be taken into consideration 920 + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming 921 + pod labels will be ignored. The default value is empty. 922 + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. 923 + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. 924 + items: 925 + type: string 926 + type: array 927 + x-kubernetes-list-type: atomic 928 + namespaceSelector: 929 + description: |- 930 + A label query over the set of namespaces that the term applies to. 931 + The term is applied to the union of the namespaces selected by this field 932 + and the ones listed in the namespaces field. 933 + null selector and null or empty namespaces list means "this pod's namespace". 934 + An empty selector ({}) matches all namespaces. 935 + properties: 936 + matchExpressions: 937 + description: matchExpressions is a list of label 938 + selector requirements. The requirements are 939 + ANDed. 940 + items: 941 + description: |- 942 + A label selector requirement is a selector that contains values, a key, and an operator that 943 + relates the key and values. 944 + properties: 945 + key: 946 + description: key is the label key that 947 + the selector applies to. 948 + type: string 949 + operator: 950 + description: |- 951 + operator represents a key's relationship to a set of values. 952 + Valid operators are In, NotIn, Exists and DoesNotExist. 953 + type: string 954 + values: 955 + description: |- 956 + values is an array of string values. If the operator is In or NotIn, 957 + the values array must be non-empty. If the operator is Exists or DoesNotExist, 958 + the values array must be empty. This array is replaced during a strategic 959 + merge patch. 960 + items: 961 + type: string 962 + type: array 963 + x-kubernetes-list-type: atomic 964 + required: 965 + - key 966 + - operator 967 + type: object 968 + type: array 969 + x-kubernetes-list-type: atomic 970 + matchLabels: 971 + additionalProperties: 972 + type: string 973 + description: |- 974 + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 975 + map is equivalent to an element of matchExpressions, whose key field is "key", the 976 + operator is "In", and the values array contains only "value". The requirements are ANDed. 977 + type: object 978 + type: object 979 + x-kubernetes-map-type: atomic 980 + namespaces: 981 + description: |- 982 + namespaces specifies a static list of namespace names that the term applies to. 983 + The term is applied to the union of the namespaces listed in this field 984 + and the ones selected by namespaceSelector. 985 + null or empty namespaces list and null namespaceSelector means "this pod's namespace". 986 + items: 987 + type: string 988 + type: array 989 + x-kubernetes-list-type: atomic 990 + topologyKey: 991 + description: |- 992 + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching 993 + the labelSelector in the specified namespaces, where co-located is defined as running on a node 994 + whose value of the label with key topologyKey matches that of any node on which any of the 995 + selected pods is running. 996 + Empty topologyKey is not allowed. 997 + type: string 998 + required: 999 + - topologyKey 1000 + type: object 1001 + type: array 1002 + x-kubernetes-list-type: atomic 1003 + type: object 1004 + type: object 1005 + nodeSelector: 1006 + additionalProperties: 1007 + type: string 1008 + description: NodeSelector is a selector which must be true for 1009 + the pod to fit on a node. 1010 + type: object 1011 + resources: 1012 + description: Resources defines the compute resource requirements 1013 + for spindle jobs. 1014 + properties: 1015 + claims: 1016 + description: |- 1017 + Claims lists the names of resources, defined in spec.resourceClaims, 1018 + that are used by this container. 1019 + 1020 + This is an alpha field and requires enabling the 1021 + DynamicResourceAllocation feature gate. 1022 + 1023 + This field is immutable. It can only be set for containers. 1024 + items: 1025 + description: ResourceClaim references one entry in PodSpec.ResourceClaims. 1026 + properties: 1027 + name: 1028 + description: |- 1029 + Name must match the name of one entry in pod.spec.resourceClaims of 1030 + the Pod where this field is used. It makes that resource available 1031 + inside a container. 1032 + type: string 1033 + request: 1034 + description: |- 1035 + Request is the name chosen for a request in the referenced claim. 1036 + If empty, everything from the claim is made available, otherwise 1037 + only the result of this request. 1038 + type: string 1039 + required: 1040 + - name 1041 + type: object 1042 + type: array 1043 + x-kubernetes-list-map-keys: 1044 + - name 1045 + x-kubernetes-list-type: map 1046 + limits: 1047 + additionalProperties: 1048 + anyOf: 1049 + - type: integer 1050 + - type: string 1051 + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 1052 + x-kubernetes-int-or-string: true 1053 + description: |- 1054 + Limits describes the maximum amount of compute resources allowed. 1055 + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 1056 + type: object 1057 + requests: 1058 + additionalProperties: 1059 + anyOf: 1060 + - type: integer 1061 + - type: string 1062 + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 1063 + x-kubernetes-int-or-string: true 1064 + description: |- 1065 + Requests describes the minimum amount of compute resources required. 1066 + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, 1067 + otherwise to an implementation-defined value. Requests cannot exceed Limits. 1068 + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 1069 + type: object 1070 + type: object 1071 + tolerations: 1072 + description: Tolerations allows pods to schedule onto nodes with 1073 + matching taints. 1074 + items: 1075 + description: |- 1076 + The pod this Toleration is attached to tolerates any taint that matches 1077 + the triple <key,value,effect> using the matching operator <operator>. 1078 + properties: 1079 + effect: 1080 + description: |- 1081 + Effect indicates the taint effect to match. Empty means match all taint effects. 1082 + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. 1083 + type: string 1084 + key: 1085 + description: |- 1086 + Key is the taint key that the toleration applies to. Empty means match all taint keys. 1087 + If the key is empty, operator must be Exists; this combination means to match all values and all keys. 1088 + type: string 1089 + operator: 1090 + description: |- 1091 + Operator represents a key's relationship to the value. 1092 + Valid operators are Exists and Equal. Defaults to Equal. 1093 + Exists is equivalent to wildcard for value, so that a pod can 1094 + tolerate all taints of a particular category. 1095 + type: string 1096 + tolerationSeconds: 1097 + description: |- 1098 + TolerationSeconds represents the period of time the toleration (which must be 1099 + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, 1100 + it is not set, which means tolerate the taint forever (do not evict). Zero and 1101 + negative values will be treated as 0 (evict immediately) by the system. 1102 + format: int64 1103 + type: integer 1104 + value: 1105 + description: |- 1106 + Value is the taint value the toleration matches to. 1107 + If the operator is Exists, the value should be empty, otherwise just a regular string. 1108 + type: string 1109 + type: object 1110 + type: array 1111 + type: object 1112 + required: 1113 + - knotAuthSecret 1114 + - knotUrl 1115 + type: object 1116 + status: 1117 + description: SpindleSetStatus defines the observed state of SpindleSet. 1118 + properties: 1119 + completedJobs: 1120 + description: CompletedJobs is the total number of spindle jobs that 1121 + have completed successfully. 1122 + format: int32 1123 + type: integer 1124 + conditions: 1125 + description: Conditions represent the latest available observations 1126 + of the SpindleSet's state. 1127 + items: 1128 + description: Condition contains details for one aspect of the current 1129 + state of this API Resource. 1130 + properties: 1131 + lastTransitionTime: 1132 + description: |- 1133 + lastTransitionTime is the last time the condition transitioned from one status to another. 1134 + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. 1135 + format: date-time 1136 + type: string 1137 + message: 1138 + description: |- 1139 + message is a human readable message indicating details about the transition. 1140 + This may be an empty string. 1141 + maxLength: 32768 1142 + type: string 1143 + observedGeneration: 1144 + description: |- 1145 + observedGeneration represents the .metadata.generation that the condition was set based upon. 1146 + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date 1147 + with respect to the current state of the instance. 1148 + format: int64 1149 + minimum: 0 1150 + type: integer 1151 + reason: 1152 + description: |- 1153 + reason contains a programmatic identifier indicating the reason for the condition's last transition. 1154 + Producers of specific condition types may define expected values and meanings for this field, 1155 + and whether the values are considered a guaranteed API. 1156 + The value should be a CamelCase string. 1157 + This field may not be empty. 1158 + maxLength: 1024 1159 + minLength: 1 1160 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 1161 + type: string 1162 + status: 1163 + description: status of the condition, one of True, False, Unknown. 1164 + enum: 1165 + - "True" 1166 + - "False" 1167 + - Unknown 1168 + type: string 1169 + type: 1170 + description: type of condition in CamelCase or in foo.example.com/CamelCase. 1171 + maxLength: 316 1172 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 1173 + type: string 1174 + required: 1175 + - lastTransitionTime 1176 + - message 1177 + - reason 1178 + - status 1179 + - type 1180 + type: object 1181 + type: array 1182 + x-kubernetes-list-map-keys: 1183 + - type 1184 + x-kubernetes-list-type: map 1185 + failedJobs: 1186 + description: FailedJobs is the total number of spindle jobs that have 1187 + failed. 1188 + format: int32 1189 + type: integer 1190 + lastEventTime: 1191 + description: LastEventTime is the timestamp of the last pipeline event 1192 + received from the knot. 1193 + format: date-time 1194 + type: string 1195 + pendingJobs: 1196 + description: PendingJobs is the number of spindle jobs currently pending. 1197 + format: int32 1198 + type: integer 1199 + runningJobs: 1200 + description: RunningJobs is the number of spindle jobs currently running. 1201 + format: int32 1202 + type: integer 1203 + webSocketConnected: 1204 + description: WebSocketConnected indicates whether the WebSocket connection 1205 + to the knot is active. 1206 + type: boolean 1207 + type: object 1208 + type: object 1209 + served: true 1210 + storage: true 1211 + subresources: 1212 + status: {}
+16
config/crd/kustomization.yaml
··· 1 + # This kustomization.yaml is not intended to be run by itself, 2 + # since it depends on service name and namespace that are out of this kustomize package. 3 + # It should be run by config/default 4 + resources: 5 + - bases/loom.j5t.io_spindlesets.yaml 6 + # +kubebuilder:scaffold:crdkustomizeresource 7 + 8 + patches: 9 + # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 + # patches here are for enabling the conversion webhook for each CRD 11 + # +kubebuilder:scaffold:crdkustomizewebhookpatch 12 + 13 + # [WEBHOOK] To enable webhook, uncomment the following section 14 + # the following config is for teaching kustomize how to do kustomization for CRDs. 15 + #configurations: 16 + #- kustomizeconfig.yaml
+19
config/crd/kustomizeconfig.yaml
··· 1 + # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 + nameReference: 3 + - kind: Service 4 + version: v1 5 + fieldSpecs: 6 + - kind: CustomResourceDefinition 7 + version: v1 8 + group: apiextensions.k8s.io 9 + path: spec/conversion/webhook/clientConfig/service/name 10 + 11 + namespace: 12 + - kind: CustomResourceDefinition 13 + version: v1 14 + group: apiextensions.k8s.io 15 + path: spec/conversion/webhook/clientConfig/service/namespace 16 + create: false 17 + 18 + varReference: 19 + - path: metadata/annotations
+30
config/default/cert_metrics_manager_patch.yaml
··· 1 + # This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. 2 + 3 + # Add the volumeMount for the metrics-server certs 4 + - op: add 5 + path: /spec/template/spec/containers/0/volumeMounts/- 6 + value: 7 + mountPath: /tmp/k8s-metrics-server/metrics-certs 8 + name: metrics-certs 9 + readOnly: true 10 + 11 + # Add the --metrics-cert-path argument for the metrics server 12 + - op: add 13 + path: /spec/template/spec/containers/0/args/- 14 + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs 15 + 16 + # Add the metrics-server certs volume configuration 17 + - op: add 18 + path: /spec/template/spec/volumes/- 19 + value: 20 + name: metrics-certs 21 + secret: 22 + secretName: metrics-server-cert 23 + optional: false 24 + items: 25 + - key: ca.crt 26 + path: ca.crt 27 + - key: tls.crt 28 + path: tls.crt 29 + - key: tls.key 30 + path: tls.key
+234
config/default/kustomization.yaml
··· 1 + # Adds namespace to all resources. 2 + namespace: loom-system 3 + 4 + # Value of this field is prepended to the 5 + # names of all resources, e.g. a deployment named 6 + # "wordpress" becomes "alices-wordpress". 7 + # Note that it should also match with the prefix (text before '-') of the namespace 8 + # field above. 9 + namePrefix: loom- 10 + 11 + # Labels to add to all resources and selectors. 12 + #labels: 13 + #- includeSelectors: true 14 + # pairs: 15 + # someName: someValue 16 + 17 + resources: 18 + - ../crd 19 + - ../rbac 20 + - ../manager 21 + # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 22 + # crd/kustomization.yaml 23 + #- ../webhook 24 + # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 25 + #- ../certmanager 26 + # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 27 + #- ../prometheus 28 + # [METRICS] Expose the controller manager metrics service. 29 + - metrics_service.yaml 30 + # [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. 31 + # Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. 32 + # Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will 33 + # be able to communicate with the Webhook Server. 34 + #- ../network-policy 35 + 36 + # Uncomment the patches line if you enable Metrics 37 + patches: 38 + # [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. 39 + # More info: https://book.kubebuilder.io/reference/metrics 40 + - path: manager_metrics_patch.yaml 41 + target: 42 + kind: Deployment 43 + 44 + # Uncomment the patches line if you enable Metrics and CertManager 45 + # [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. 46 + # This patch will protect the metrics with certManager self-signed certs. 47 + #- path: cert_metrics_manager_patch.yaml 48 + # target: 49 + # kind: Deployment 50 + 51 + # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 52 + # crd/kustomization.yaml 53 + #- path: manager_webhook_patch.yaml 54 + # target: 55 + # kind: Deployment 56 + 57 + # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 58 + # Uncomment the following replacements to add the cert-manager CA injection annotations 59 + #replacements: 60 + # - source: # Uncomment the following block to enable certificates for metrics 61 + # kind: Service 62 + # version: v1 63 + # name: controller-manager-metrics-service 64 + # fieldPath: metadata.name 65 + # targets: 66 + # - select: 67 + # kind: Certificate 68 + # group: cert-manager.io 69 + # version: v1 70 + # name: metrics-certs 71 + # fieldPaths: 72 + # - spec.dnsNames.0 73 + # - spec.dnsNames.1 74 + # options: 75 + # delimiter: '.' 76 + # index: 0 77 + # create: true 78 + # - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor 79 + # kind: ServiceMonitor 80 + # group: monitoring.coreos.com 81 + # version: v1 82 + # name: controller-manager-metrics-monitor 83 + # fieldPaths: 84 + # - spec.endpoints.0.tlsConfig.serverName 85 + # options: 86 + # delimiter: '.' 87 + # index: 0 88 + # create: true 89 + # 90 + # - source: 91 + # kind: Service 92 + # version: v1 93 + # name: controller-manager-metrics-service 94 + # fieldPath: metadata.namespace 95 + # targets: 96 + # - select: 97 + # kind: Certificate 98 + # group: cert-manager.io 99 + # version: v1 100 + # name: metrics-certs 101 + # fieldPaths: 102 + # - spec.dnsNames.0 103 + # - spec.dnsNames.1 104 + # options: 105 + # delimiter: '.' 106 + # index: 1 107 + # create: true 108 + # - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor 109 + # kind: ServiceMonitor 110 + # group: monitoring.coreos.com 111 + # version: v1 112 + # name: controller-manager-metrics-monitor 113 + # fieldPaths: 114 + # - spec.endpoints.0.tlsConfig.serverName 115 + # options: 116 + # delimiter: '.' 117 + # index: 1 118 + # create: true 119 + # 120 + # - source: # Uncomment the following block if you have any webhook 121 + # kind: Service 122 + # version: v1 123 + # name: webhook-service 124 + # fieldPath: .metadata.name # Name of the service 125 + # targets: 126 + # - select: 127 + # kind: Certificate 128 + # group: cert-manager.io 129 + # version: v1 130 + # name: serving-cert 131 + # fieldPaths: 132 + # - .spec.dnsNames.0 133 + # - .spec.dnsNames.1 134 + # options: 135 + # delimiter: '.' 136 + # index: 0 137 + # create: true 138 + # - source: 139 + # kind: Service 140 + # version: v1 141 + # name: webhook-service 142 + # fieldPath: .metadata.namespace # Namespace of the service 143 + # targets: 144 + # - select: 145 + # kind: Certificate 146 + # group: cert-manager.io 147 + # version: v1 148 + # name: serving-cert 149 + # fieldPaths: 150 + # - .spec.dnsNames.0 151 + # - .spec.dnsNames.1 152 + # options: 153 + # delimiter: '.' 154 + # index: 1 155 + # create: true 156 + # 157 + # - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) 158 + # kind: Certificate 159 + # group: cert-manager.io 160 + # version: v1 161 + # name: serving-cert # This name should match the one in certificate.yaml 162 + # fieldPath: .metadata.namespace # Namespace of the certificate CR 163 + # targets: 164 + # - select: 165 + # kind: ValidatingWebhookConfiguration 166 + # fieldPaths: 167 + # - .metadata.annotations.[cert-manager.io/inject-ca-from] 168 + # options: 169 + # delimiter: '/' 170 + # index: 0 171 + # create: true 172 + # - source: 173 + # kind: Certificate 174 + # group: cert-manager.io 175 + # version: v1 176 + # name: serving-cert 177 + # fieldPath: .metadata.name 178 + # targets: 179 + # - select: 180 + # kind: ValidatingWebhookConfiguration 181 + # fieldPaths: 182 + # - .metadata.annotations.[cert-manager.io/inject-ca-from] 183 + # options: 184 + # delimiter: '/' 185 + # index: 1 186 + # create: true 187 + # 188 + # - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) 189 + # kind: Certificate 190 + # group: cert-manager.io 191 + # version: v1 192 + # name: serving-cert 193 + # fieldPath: .metadata.namespace # Namespace of the certificate CR 194 + # targets: 195 + # - select: 196 + # kind: MutatingWebhookConfiguration 197 + # fieldPaths: 198 + # - .metadata.annotations.[cert-manager.io/inject-ca-from] 199 + # options: 200 + # delimiter: '/' 201 + # index: 0 202 + # create: true 203 + # - source: 204 + # kind: Certificate 205 + # group: cert-manager.io 206 + # version: v1 207 + # name: serving-cert 208 + # fieldPath: .metadata.name 209 + # targets: 210 + # - select: 211 + # kind: MutatingWebhookConfiguration 212 + # fieldPaths: 213 + # - .metadata.annotations.[cert-manager.io/inject-ca-from] 214 + # options: 215 + # delimiter: '/' 216 + # index: 1 217 + # create: true 218 + # 219 + # - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) 220 + # kind: Certificate 221 + # group: cert-manager.io 222 + # version: v1 223 + # name: serving-cert 224 + # fieldPath: .metadata.namespace # Namespace of the certificate CR 225 + # targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. 226 + # +kubebuilder:scaffold:crdkustomizecainjectionns 227 + # - source: 228 + # kind: Certificate 229 + # group: cert-manager.io 230 + # version: v1 231 + # name: serving-cert 232 + # fieldPath: .metadata.name 233 + # targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. 234 + # +kubebuilder:scaffold:crdkustomizecainjectionname
+4
config/default/manager_metrics_patch.yaml
··· 1 + # This patch adds the args to allow exposing the metrics endpoint using HTTPS 2 + - op: add 3 + path: /spec/template/spec/containers/0/args/0 4 + value: --metrics-bind-address=:8443
+18
config/default/metrics_service.yaml
··· 1 + apiVersion: v1 2 + kind: Service 3 + metadata: 4 + labels: 5 + control-plane: controller-manager 6 + app.kubernetes.io/name: loom 7 + app.kubernetes.io/managed-by: kustomize 8 + name: controller-manager-metrics-service 9 + namespace: system 10 + spec: 11 + ports: 12 + - name: https 13 + port: 8443 14 + protocol: TCP 15 + targetPort: 8443 16 + selector: 17 + control-plane: controller-manager 18 + app.kubernetes.io/name: loom
+15
config/gateway/httproute.yaml
··· 1 + apiVersion: gateway.networking.k8s.io/v1 2 + kind: HTTPRoute 3 + metadata: 4 + name: loom-tls-route 5 + namespace: loom-system 6 + spec: 7 + parentRefs: 8 + - name: jarrett-net 9 + namespace: gateway 10 + hostnames: 11 + - loom.jarrett.net 12 + rules: 13 + - backendRefs: 14 + - name: loom-loom-spindle-service 15 + port: 6555
+9
config/manager/kustomization.yaml
··· 1 + resources: 2 + - manager.yaml 3 + - service.yaml 4 + apiVersion: kustomize.config.k8s.io/v1beta1 5 + kind: Kustomization 6 + images: 7 + - name: controller 8 + newName: atcr.io/evan.jarrett.net/loom 9 + newTag: v0.0.1
+102
config/manager/manager.yaml
··· 1 + apiVersion: v1 2 + kind: Namespace 3 + metadata: 4 + labels: 5 + control-plane: controller-manager 6 + app.kubernetes.io/name: loom 7 + app.kubernetes.io/managed-by: kustomize 8 + name: system 9 + --- 10 + apiVersion: apps/v1 11 + kind: Deployment 12 + metadata: 13 + name: controller-manager 14 + namespace: system 15 + labels: 16 + control-plane: controller-manager 17 + app.kubernetes.io/name: loom 18 + app.kubernetes.io/managed-by: kustomize 19 + spec: 20 + selector: 21 + matchLabels: 22 + control-plane: controller-manager 23 + app.kubernetes.io/name: loom 24 + replicas: 1 25 + template: 26 + metadata: 27 + annotations: 28 + kubectl.kubernetes.io/default-container: manager 29 + labels: 30 + control-plane: controller-manager 31 + app.kubernetes.io/name: loom 32 + spec: 33 + # Target amd64 nodes only (image built for amd64) 34 + affinity: 35 + nodeAffinity: 36 + requiredDuringSchedulingIgnoredDuringExecution: 37 + nodeSelectorTerms: 38 + - matchExpressions: 39 + - key: kubernetes.io/arch 40 + operator: In 41 + values: 42 + - amd64 43 + - key: kubernetes.io/os 44 + operator: In 45 + values: 46 + - linux 47 + securityContext: 48 + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. 49 + # This ensures that deployments meet the highest security requirements for Kubernetes. 50 + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted 51 + runAsNonRoot: true 52 + seccompProfile: 53 + type: RuntimeDefault 54 + containers: 55 + - command: 56 + - /manager 57 + args: 58 + - --leader-elect 59 + - --health-probe-bind-address=:8081 60 + image: controller:latest 61 + imagePullPolicy: Always 62 + name: manager 63 + env: 64 + - name: SPINDLE_SERVER_HOSTNAME 65 + value: "loom.jarrett.net" 66 + - name: SPINDLE_SERVER_OWNER 67 + value: "did:plc:pddp4xt5lgnv2qsegbzzs4xg" 68 + - name: SPINDLE_SERVER_DB_PATH 69 + value: "/tmp/spindle.db" 70 + - name: SPINDLE_SERVER_LOG_DIR 71 + value: "/tmp/spindle-logs" 72 + ports: [] 73 + securityContext: 74 + allowPrivilegeEscalation: false 75 + capabilities: 76 + drop: 77 + - "ALL" 78 + livenessProbe: 79 + httpGet: 80 + path: /healthz 81 + port: 8081 82 + initialDelaySeconds: 15 83 + periodSeconds: 20 84 + readinessProbe: 85 + httpGet: 86 + path: /readyz 87 + port: 8081 88 + initialDelaySeconds: 5 89 + periodSeconds: 10 90 + # TODO(user): Configure the resources accordingly based on the project requirements. 91 + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 92 + resources: 93 + limits: 94 + cpu: 500m 95 + memory: 128Mi 96 + requests: 97 + cpu: 10m 98 + memory: 64Mi 99 + volumeMounts: [] 100 + volumes: [] 101 + serviceAccountName: controller-manager 102 + terminationGracePeriodSeconds: 10
+19
config/manager/service.yaml
··· 1 + --- 2 + apiVersion: v1 3 + kind: Service 4 + metadata: 5 + name: loom-spindle-service 6 + namespace: system 7 + labels: 8 + app.kubernetes.io/name: loom 9 + app.kubernetes.io/managed-by: kustomize 10 + spec: 11 + selector: 12 + control-plane: controller-manager 13 + app.kubernetes.io/name: loom 14 + ports: 15 + - name: spindle-http 16 + port: 6555 17 + protocol: TCP 18 + targetPort: 6555 19 + type: ClusterIP
+28
config/manifests/kustomization.yaml
··· 1 + # These resources constitute the fully configured set of manifests 2 + # used to generate the 'manifests/' directory in a bundle. 3 + resources: 4 + - bases/loom.clusterserviceversion.yaml 5 + - ../default 6 + - ../samples 7 + - ../scorecard 8 + 9 + # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 + # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 + # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 + #patches: 13 + #- target: 14 + # group: apps 15 + # version: v1 16 + # kind: Deployment 17 + # name: controller-manager 18 + # namespace: system 19 + # patch: |- 20 + # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 + # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 + # - op: remove 23 + 24 + # path: /spec/template/spec/containers/0/volumeMounts/0 25 + # # Remove the "cert" volume, since OLM will create and mount a set of certs. 26 + # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 27 + # - op: remove 28 + # path: /spec/template/spec/volumes/0
+27
config/network-policy/allow-metrics-traffic.yaml
··· 1 + # This NetworkPolicy allows ingress traffic 2 + # with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those 3 + # namespaces are able to gather data from the metrics endpoint. 4 + apiVersion: networking.k8s.io/v1 5 + kind: NetworkPolicy 6 + metadata: 7 + labels: 8 + app.kubernetes.io/name: loom 9 + app.kubernetes.io/managed-by: kustomize 10 + name: allow-metrics-traffic 11 + namespace: system 12 + spec: 13 + podSelector: 14 + matchLabels: 15 + control-plane: controller-manager 16 + app.kubernetes.io/name: loom 17 + policyTypes: 18 + - Ingress 19 + ingress: 20 + # This allows ingress traffic from any namespace with the label metrics: enabled 21 + - from: 22 + - namespaceSelector: 23 + matchLabels: 24 + metrics: enabled # Only from namespaces with this label 25 + ports: 26 + - port: 8443 27 + protocol: TCP
+2
config/network-policy/kustomization.yaml
··· 1 + resources: 2 + - allow-metrics-traffic.yaml
+11
config/prometheus/kustomization.yaml
··· 1 + resources: 2 + - monitor.yaml 3 + 4 + # [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus 5 + # to securely reference certificates created and managed by cert-manager. 6 + # Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml 7 + # to mount the "metrics-server-cert" secret in the Manager Deployment. 8 + #patches: 9 + # - path: monitor_tls_patch.yaml 10 + # target: 11 + # kind: ServiceMonitor
+27
config/prometheus/monitor.yaml
··· 1 + # Prometheus Monitor Service (Metrics) 2 + apiVersion: monitoring.coreos.com/v1 3 + kind: ServiceMonitor 4 + metadata: 5 + labels: 6 + control-plane: controller-manager 7 + app.kubernetes.io/name: loom 8 + app.kubernetes.io/managed-by: kustomize 9 + name: controller-manager-metrics-monitor 10 + namespace: system 11 + spec: 12 + endpoints: 13 + - path: /metrics 14 + port: https # Ensure this is the name of the port that exposes HTTPS metrics 15 + scheme: https 16 + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 17 + tlsConfig: 18 + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables 19 + # certificate verification, exposing the system to potential man-in-the-middle attacks. 20 + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. 21 + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, 22 + # which securely references the certificate from the 'metrics-server-cert' secret. 23 + insecureSkipVerify: true 24 + selector: 25 + matchLabels: 26 + control-plane: controller-manager 27 + app.kubernetes.io/name: loom
+19
config/prometheus/monitor_tls_patch.yaml
··· 1 + # Patch for Prometheus ServiceMonitor to enable secure TLS configuration 2 + # using certificates managed by cert-manager 3 + - op: replace 4 + path: /spec/endpoints/0/tlsConfig 5 + value: 6 + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize 7 + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc 8 + insecureSkipVerify: false 9 + ca: 10 + secret: 11 + name: metrics-server-cert 12 + key: ca.crt 13 + cert: 14 + secret: 15 + name: metrics-server-cert 16 + key: tls.crt 17 + keySecret: 18 + name: metrics-server-cert 19 + key: tls.key
+28
config/rbac/kustomization.yaml
··· 1 + resources: 2 + # All RBAC will be applied under this service account in 3 + # the deployment namespace. You may comment out this resource 4 + # if your manager will use a service account that exists at 5 + # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 + # subjects if changing service account names. 7 + - service_account.yaml 8 + - role.yaml 9 + - role_binding.yaml 10 + - leader_election_role.yaml 11 + - leader_election_role_binding.yaml 12 + # The following RBAC configurations are used to protect 13 + # the metrics endpoint with authn/authz. These configurations 14 + # ensure that only authorized users and service accounts 15 + # can access the metrics endpoint. Comment the following 16 + # permissions if you want to disable this protection. 17 + # More info: https://book.kubebuilder.io/reference/metrics.html 18 + - metrics_auth_role.yaml 19 + - metrics_auth_role_binding.yaml 20 + - metrics_reader_role.yaml 21 + # For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by 22 + # default, aiding admins in cluster management. Those roles are 23 + # not used by the loom itself. You can comment the following lines 24 + # if you do not want those helpers be installed with your Project. 25 + - spindleset_admin_role.yaml 26 + - spindleset_editor_role.yaml 27 + - spindleset_viewer_role.yaml 28 +
+40
config/rbac/leader_election_role.yaml
··· 1 + # permissions to do leader election. 2 + apiVersion: rbac.authorization.k8s.io/v1 3 + kind: Role 4 + metadata: 5 + labels: 6 + app.kubernetes.io/name: loom 7 + app.kubernetes.io/managed-by: kustomize 8 + name: leader-election-role 9 + rules: 10 + - apiGroups: 11 + - "" 12 + resources: 13 + - configmaps 14 + verbs: 15 + - get 16 + - list 17 + - watch 18 + - create 19 + - update 20 + - patch 21 + - delete 22 + - apiGroups: 23 + - coordination.k8s.io 24 + resources: 25 + - leases 26 + verbs: 27 + - get 28 + - list 29 + - watch 30 + - create 31 + - update 32 + - patch 33 + - delete 34 + - apiGroups: 35 + - "" 36 + resources: 37 + - events 38 + verbs: 39 + - create 40 + - patch
+15
config/rbac/leader_election_role_binding.yaml
··· 1 + apiVersion: rbac.authorization.k8s.io/v1 2 + kind: RoleBinding 3 + metadata: 4 + labels: 5 + app.kubernetes.io/name: loom 6 + app.kubernetes.io/managed-by: kustomize 7 + name: leader-election-rolebinding 8 + roleRef: 9 + apiGroup: rbac.authorization.k8s.io 10 + kind: Role 11 + name: leader-election-role 12 + subjects: 13 + - kind: ServiceAccount 14 + name: controller-manager 15 + namespace: system
+17
config/rbac/metrics_auth_role.yaml
··· 1 + apiVersion: rbac.authorization.k8s.io/v1 2 + kind: ClusterRole 3 + metadata: 4 + name: metrics-auth-role 5 + rules: 6 + - apiGroups: 7 + - authentication.k8s.io 8 + resources: 9 + - tokenreviews 10 + verbs: 11 + - create 12 + - apiGroups: 13 + - authorization.k8s.io 14 + resources: 15 + - subjectaccessreviews 16 + verbs: 17 + - create
+12
config/rbac/metrics_auth_role_binding.yaml
··· 1 + apiVersion: rbac.authorization.k8s.io/v1 2 + kind: ClusterRoleBinding 3 + metadata: 4 + name: metrics-auth-rolebinding 5 + roleRef: 6 + apiGroup: rbac.authorization.k8s.io 7 + kind: ClusterRole 8 + name: metrics-auth-role 9 + subjects: 10 + - kind: ServiceAccount 11 + name: controller-manager 12 + namespace: system
+9
config/rbac/metrics_reader_role.yaml
··· 1 + apiVersion: rbac.authorization.k8s.io/v1 2 + kind: ClusterRole 3 + metadata: 4 + name: metrics-reader 5 + rules: 6 + - nonResourceURLs: 7 + - "/metrics" 8 + verbs: 9 + - get
+77
config/rbac/role.yaml
··· 1 + --- 2 + apiVersion: rbac.authorization.k8s.io/v1 3 + kind: ClusterRole 4 + metadata: 5 + name: manager-role 6 + rules: 7 + - apiGroups: 8 + - "" 9 + resources: 10 + - pods 11 + - secrets 12 + verbs: 13 + - get 14 + - list 15 + - watch 16 + - apiGroups: 17 + - "" 18 + resources: 19 + - pods/log 20 + verbs: 21 + - get 22 + - apiGroups: 23 + - "" 24 + resources: 25 + - services 26 + verbs: 27 + - create 28 + - delete 29 + - get 30 + - list 31 + - patch 32 + - update 33 + - watch 34 + - apiGroups: 35 + - batch 36 + resources: 37 + - jobs 38 + verbs: 39 + - create 40 + - delete 41 + - get 42 + - list 43 + - patch 44 + - update 45 + - watch 46 + - apiGroups: 47 + - batch 48 + resources: 49 + - jobs/status 50 + verbs: 51 + - get 52 + - apiGroups: 53 + - loom.j5t.io 54 + resources: 55 + - spindlesets 56 + verbs: 57 + - create 58 + - delete 59 + - get 60 + - list 61 + - patch 62 + - update 63 + - watch 64 + - apiGroups: 65 + - loom.j5t.io 66 + resources: 67 + - spindlesets/finalizers 68 + verbs: 69 + - update 70 + - apiGroups: 71 + - loom.j5t.io 72 + resources: 73 + - spindlesets/status 74 + verbs: 75 + - get 76 + - patch 77 + - update
+15
config/rbac/role_binding.yaml
··· 1 + apiVersion: rbac.authorization.k8s.io/v1 2 + kind: ClusterRoleBinding 3 + metadata: 4 + labels: 5 + app.kubernetes.io/name: loom 6 + app.kubernetes.io/managed-by: kustomize 7 + name: manager-rolebinding 8 + roleRef: 9 + apiGroup: rbac.authorization.k8s.io 10 + kind: ClusterRole 11 + name: manager-role 12 + subjects: 13 + - kind: ServiceAccount 14 + name: controller-manager 15 + namespace: system
+10
config/rbac/service_account.yaml
··· 1 + apiVersion: v1 2 + kind: ServiceAccount 3 + metadata: 4 + labels: 5 + app.kubernetes.io/name: loom 6 + app.kubernetes.io/managed-by: kustomize 7 + name: controller-manager 8 + namespace: system 9 + imagePullSecrets: 10 + - name: atcr-login
+27
config/rbac/spindleset_admin_role.yaml
··· 1 + # This rule is not used by the project loom itself. 2 + # It is provided to allow the cluster admin to help manage permissions for users. 3 + # 4 + # Grants full permissions ('*') over loom.j5t.io. 5 + # This role is intended for users authorized to modify roles and bindings within the cluster, 6 + # enabling them to delegate specific permissions to other users or groups as needed. 7 + 8 + apiVersion: rbac.authorization.k8s.io/v1 9 + kind: ClusterRole 10 + metadata: 11 + labels: 12 + app.kubernetes.io/name: loom 13 + app.kubernetes.io/managed-by: kustomize 14 + name: spindleset-admin-role 15 + rules: 16 + - apiGroups: 17 + - loom.j5t.io 18 + resources: 19 + - spindlesets 20 + verbs: 21 + - '*' 22 + - apiGroups: 23 + - loom.j5t.io 24 + resources: 25 + - spindlesets/status 26 + verbs: 27 + - get
+33
config/rbac/spindleset_editor_role.yaml
··· 1 + # This rule is not used by the project loom itself. 2 + # It is provided to allow the cluster admin to help manage permissions for users. 3 + # 4 + # Grants permissions to create, update, and delete resources within the loom.j5t.io. 5 + # This role is intended for users who need to manage these resources 6 + # but should not control RBAC or manage permissions for others. 7 + 8 + apiVersion: rbac.authorization.k8s.io/v1 9 + kind: ClusterRole 10 + metadata: 11 + labels: 12 + app.kubernetes.io/name: loom 13 + app.kubernetes.io/managed-by: kustomize 14 + name: spindleset-editor-role 15 + rules: 16 + - apiGroups: 17 + - loom.j5t.io 18 + resources: 19 + - spindlesets 20 + verbs: 21 + - create 22 + - delete 23 + - get 24 + - list 25 + - patch 26 + - update 27 + - watch 28 + - apiGroups: 29 + - loom.j5t.io 30 + resources: 31 + - spindlesets/status 32 + verbs: 33 + - get
+29
config/rbac/spindleset_viewer_role.yaml
··· 1 + # This rule is not used by the project loom itself. 2 + # It is provided to allow the cluster admin to help manage permissions for users. 3 + # 4 + # Grants read-only access to loom.j5t.io resources. 5 + # This role is intended for users who need visibility into these resources 6 + # without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. 7 + 8 + apiVersion: rbac.authorization.k8s.io/v1 9 + kind: ClusterRole 10 + metadata: 11 + labels: 12 + app.kubernetes.io/name: loom 13 + app.kubernetes.io/managed-by: kustomize 14 + name: spindleset-viewer-role 15 + rules: 16 + - apiGroups: 17 + - loom.j5t.io 18 + resources: 19 + - spindlesets 20 + verbs: 21 + - get 22 + - list 23 + - watch 24 + - apiGroups: 25 + - loom.j5t.io 26 + resources: 27 + - spindlesets/status 28 + verbs: 29 + - get
+4
config/samples/kustomization.yaml
··· 1 + ## Append samples of your project ## 2 + resources: 3 + - loom_v1alpha1_spindleset.yaml 4 + # +kubebuilder:scaffold:manifestskustomizesamples
+35
config/samples/loom_v1alpha1_spindleset.yaml
··· 1 + apiVersion: loom.j5t.io/v1alpha1 2 + kind: SpindleSet 3 + metadata: 4 + labels: 5 + app.kubernetes.io/name: loom 6 + app.kubernetes.io/managed-by: kustomize 7 + name: spindleset-sample 8 + spec: 9 + # URL of the tangled.org knot to connect to 10 + knotUrl: https://knot1.tangled.sh 11 + 12 + # Name of the Secret containing the auth token (key: token) 13 + knotAuthSecret: spindle-auth 14 + 15 + # Maximum number of concurrent spindle jobs (default: 10) 16 + maxConcurrentJobs: 5 17 + 18 + # Default template for spindle job pods 19 + template: 20 + resources: 21 + requests: 22 + cpu: "500m" 23 + memory: "1Gi" 24 + limits: 25 + cpu: "2" 26 + memory: "4Gi" 27 + 28 + # Optional: Node selector for pod placement 29 + nodeSelector: {} 30 + 31 + # Optional: Tolerations for scheduling 32 + tolerations: [] 33 + 34 + # Optional: Pod affinity/anti-affinity rules 35 + affinity: {}
+7
config/scorecard/bases/config.yaml
··· 1 + apiVersion: scorecard.operatorframework.io/v1alpha3 2 + kind: Configuration 3 + metadata: 4 + name: config 5 + stages: 6 + - parallel: true 7 + tests: []
+18
config/scorecard/kustomization.yaml
··· 1 + resources: 2 + - bases/config.yaml 3 + apiVersion: kustomize.config.k8s.io/v1beta1 4 + kind: Kustomization 5 + patches: 6 + - path: patches/basic.config.yaml 7 + target: 8 + group: scorecard.operatorframework.io 9 + kind: Configuration 10 + name: config 11 + version: v1alpha3 12 + - path: patches/olm.config.yaml 13 + target: 14 + group: scorecard.operatorframework.io 15 + kind: Configuration 16 + name: config 17 + version: v1alpha3 18 + # +kubebuilder:scaffold:patches
+10
config/scorecard/patches/basic.config.yaml
··· 1 + - op: add 2 + path: /stages/0/tests/- 3 + value: 4 + entrypoint: 5 + - scorecard-test 6 + - basic-check-spec 7 + image: quay.io/operator-framework/scorecard-test:v1.41.1 8 + labels: 9 + suite: basic 10 + test: basic-check-spec-test
+50
config/scorecard/patches/olm.config.yaml
··· 1 + - op: add 2 + path: /stages/0/tests/- 3 + value: 4 + entrypoint: 5 + - scorecard-test 6 + - olm-bundle-validation 7 + image: quay.io/operator-framework/scorecard-test:v1.41.1 8 + labels: 9 + suite: olm 10 + test: olm-bundle-validation-test 11 + - op: add 12 + path: /stages/0/tests/- 13 + value: 14 + entrypoint: 15 + - scorecard-test 16 + - olm-crds-have-validation 17 + image: quay.io/operator-framework/scorecard-test:v1.41.1 18 + labels: 19 + suite: olm 20 + test: olm-crds-have-validation-test 21 + - op: add 22 + path: /stages/0/tests/- 23 + value: 24 + entrypoint: 25 + - scorecard-test 26 + - olm-crds-have-resources 27 + image: quay.io/operator-framework/scorecard-test:v1.41.1 28 + labels: 29 + suite: olm 30 + test: olm-crds-have-resources-test 31 + - op: add 32 + path: /stages/0/tests/- 33 + value: 34 + entrypoint: 35 + - scorecard-test 36 + - olm-spec-descriptors 37 + image: quay.io/operator-framework/scorecard-test:v1.41.1 38 + labels: 39 + suite: olm 40 + test: olm-spec-descriptors-test 41 + - op: add 42 + path: /stages/0/tests/- 43 + value: 44 + entrypoint: 45 + - scorecard-test 46 + - olm-status-descriptors 47 + image: quay.io/operator-framework/scorecard-test:v1.41.1 48 + labels: 49 + suite: olm 50 + test: olm-status-descriptors-test
+200
go.mod
··· 1 + module tangled.org/evan.jarrett.net/loom 2 + 3 + go 1.24.4 4 + 5 + toolchain go1.24.9 6 + 7 + require ( 8 + github.com/onsi/ginkgo/v2 v2.23.4 9 + github.com/onsi/gomega v1.37.0 10 + gopkg.in/yaml.v3 v3.0.1 11 + k8s.io/api v0.33.0 12 + k8s.io/apimachinery v0.33.0 13 + k8s.io/client-go v0.33.0 14 + sigs.k8s.io/controller-runtime v0.21.0 15 + tangled.org/core v1.10.0-alpha 16 + ) 17 + 18 + require ( 19 + cel.dev/expr v0.23.0 // indirect 20 + github.com/Blank-Xu/sql-adapter v1.1.1 // indirect 21 + github.com/Microsoft/go-winio v0.6.2 // indirect 22 + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect 23 + github.com/avast/retry-go/v4 v4.6.1 // indirect 24 + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 25 + github.com/beorn7/perks v1.0.1 // indirect 26 + github.com/blang/semver/v4 v4.0.0 // indirect 27 + github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e // indirect 28 + github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 // indirect 29 + github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect 30 + github.com/carlmjohnson/versioninfo v0.22.5 // indirect 31 + github.com/casbin/casbin/v2 v2.103.0 // indirect 32 + github.com/casbin/govaluate v1.3.0 // indirect 33 + github.com/cenkalti/backoff/v4 v4.3.0 // indirect 34 + github.com/cespare/xxhash/v2 v2.3.0 // indirect 35 + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect 36 + github.com/charmbracelet/lipgloss v1.1.0 // indirect 37 + github.com/charmbracelet/log v0.4.2 // indirect 38 + github.com/charmbracelet/x/ansi v0.8.0 // indirect 39 + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect 40 + github.com/charmbracelet/x/term v0.2.1 // indirect 41 + github.com/containerd/errdefs v1.0.0 // indirect 42 + github.com/containerd/errdefs/pkg v0.3.0 // indirect 43 + github.com/cyphar/filepath-securejoin v0.4.1 // indirect 44 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 45 + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 46 + github.com/distribution/reference v0.6.0 // indirect 47 + github.com/docker/docker v28.2.2+incompatible // indirect 48 + github.com/docker/go-connections v0.5.0 // indirect 49 + github.com/docker/go-units v0.5.0 // indirect 50 + github.com/emicklei/go-restful/v3 v3.11.0 // indirect 51 + github.com/evanphx/json-patch/v5 v5.9.11 // indirect 52 + github.com/felixge/httpsnoop v1.0.4 // indirect 53 + github.com/fsnotify/fsnotify v1.7.0 // indirect 54 + github.com/fxamacker/cbor/v2 v2.7.0 // indirect 55 + github.com/go-chi/chi/v5 v5.2.0 // indirect 56 + github.com/go-git/go-git/v5 v5.14.0 // indirect 57 + github.com/go-jose/go-jose/v3 v3.0.4 // indirect 58 + github.com/go-logfmt/logfmt v0.6.0 // indirect 59 + github.com/go-logr/logr v1.4.3 // indirect 60 + github.com/go-logr/stdr v1.2.2 // indirect 61 + github.com/go-logr/zapr v1.3.0 // indirect 62 + github.com/go-openapi/jsonpointer v0.21.0 // indirect 63 + github.com/go-openapi/jsonreference v0.20.2 // indirect 64 + github.com/go-openapi/swag v0.23.0 // indirect 65 + github.com/go-redis/cache/v9 v9.0.0 // indirect 66 + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 67 + github.com/goccy/go-json v0.10.5 // indirect 68 + github.com/gogo/protobuf v1.3.2 // indirect 69 + github.com/golang-jwt/jwt/v5 v5.2.3 // indirect 70 + github.com/google/btree v1.1.3 // indirect 71 + github.com/google/cel-go v0.23.2 // indirect 72 + github.com/google/gnostic-models v0.6.9 // indirect 73 + github.com/google/go-cmp v0.7.0 // indirect 74 + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect 75 + github.com/google/uuid v1.6.0 // indirect 76 + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect 77 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect 78 + github.com/hashicorp/errwrap v1.1.0 // indirect 79 + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 80 + github.com/hashicorp/go-multierror v1.1.1 // indirect 81 + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect 82 + github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect 83 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect 84 + github.com/hashicorp/go-sockaddr v1.0.7 // indirect 85 + github.com/hashicorp/golang-lru v1.0.2 // indirect 86 + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 87 + github.com/hashicorp/hcl v1.0.1-vault-7 // indirect 88 + github.com/hpcloud/tail v1.0.0 // indirect 89 + github.com/inconshreveable/mousetrap v1.1.0 // indirect 90 + github.com/ipfs/bbloom v0.0.4 // indirect 91 + github.com/ipfs/boxo v0.33.0 // indirect 92 + github.com/ipfs/go-block-format v0.2.2 // indirect 93 + github.com/ipfs/go-cid v0.5.0 // indirect 94 + github.com/ipfs/go-datastore v0.8.2 // indirect 95 + github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect 96 + github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect 97 + github.com/ipfs/go-ipld-cbor v0.2.1 // indirect 98 + github.com/ipfs/go-ipld-format v0.6.2 // indirect 99 + github.com/ipfs/go-log v1.0.5 // indirect 100 + github.com/ipfs/go-log/v2 v2.6.0 // indirect 101 + github.com/ipfs/go-metrics-interface v0.3.0 // indirect 102 + github.com/josharian/intern v1.0.0 // indirect 103 + github.com/json-iterator/go v1.1.12 // indirect 104 + github.com/klauspost/compress v1.18.0 // indirect 105 + github.com/klauspost/cpuid/v2 v2.3.0 // indirect 106 + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 107 + github.com/mailru/easyjson v0.7.7 // indirect 108 + github.com/mattn/go-isatty v0.0.20 // indirect 109 + github.com/mattn/go-runewidth v0.0.16 // indirect 110 + github.com/mattn/go-sqlite3 v1.14.24 // indirect 111 + github.com/minio/sha256-simd v1.0.1 // indirect 112 + github.com/mitchellh/mapstructure v1.5.0 // indirect 113 + github.com/moby/docker-image-spec v1.3.1 // indirect 114 + github.com/moby/sys/sequential v0.6.0 // indirect 115 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 116 + github.com/modern-go/reflect2 v1.0.2 // indirect 117 + github.com/mr-tron/base58 v1.2.0 // indirect 118 + github.com/muesli/termenv v0.16.0 // indirect 119 + github.com/multiformats/go-base32 v0.1.0 // indirect 120 + github.com/multiformats/go-base36 v0.2.0 // indirect 121 + github.com/multiformats/go-multibase v0.2.0 // indirect 122 + github.com/multiformats/go-multihash v0.2.3 // indirect 123 + github.com/multiformats/go-varint v0.0.7 // indirect 124 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 125 + github.com/openbao/openbao/api/v2 v2.3.0 // indirect 126 + github.com/opencontainers/go-digest v1.0.0 // indirect 127 + github.com/opencontainers/image-spec v1.1.1 // indirect 128 + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect 129 + github.com/pjbgf/sha1cd v0.3.2 // indirect 130 + github.com/pkg/errors v0.9.1 // indirect 131 + github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect 132 + github.com/prometheus/client_golang v1.22.0 // indirect 133 + github.com/prometheus/client_model v0.6.2 // indirect 134 + github.com/prometheus/common v0.64.0 // indirect 135 + github.com/prometheus/procfs v0.16.1 // indirect 136 + github.com/redis/go-redis/v9 v9.7.3 // indirect 137 + github.com/rivo/uniseg v0.4.7 // indirect 138 + github.com/ryanuber/go-glob v1.0.0 // indirect 139 + github.com/sethvargo/go-envconfig v1.1.0 // indirect 140 + github.com/spaolacci/murmur3 v1.1.0 // indirect 141 + github.com/spf13/cobra v1.8.1 // indirect 142 + github.com/spf13/pflag v1.0.5 // indirect 143 + github.com/stoewer/go-strcase v1.3.0 // indirect 144 + github.com/vmihailenco/go-tinylfu v0.2.2 // indirect 145 + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect 146 + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 147 + github.com/whyrusleeping/cbor-gen v0.3.1 // indirect 148 + github.com/x448/float16 v0.8.4 // indirect 149 + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 150 + gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect 151 + gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect 152 + go.opentelemetry.io/auto/sdk v1.1.0 // indirect 153 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect 154 + go.opentelemetry.io/otel v1.37.0 // indirect 155 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect 156 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect 157 + go.opentelemetry.io/otel/metric v1.37.0 // indirect 158 + go.opentelemetry.io/otel/sdk v1.37.0 // indirect 159 + go.opentelemetry.io/otel/trace v1.37.0 // indirect 160 + go.opentelemetry.io/proto/otlp v1.6.0 // indirect 161 + go.uber.org/atomic v1.11.0 // indirect 162 + go.uber.org/automaxprocs v1.6.0 // indirect 163 + go.uber.org/multierr v1.11.0 // indirect 164 + go.uber.org/zap v1.27.0 // indirect 165 + golang.org/x/crypto v0.41.0 // indirect 166 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect 167 + golang.org/x/net v0.43.0 // indirect 168 + golang.org/x/oauth2 v0.30.0 // indirect 169 + golang.org/x/sync v0.17.0 // indirect 170 + golang.org/x/sys v0.35.0 // indirect 171 + golang.org/x/term v0.34.0 // indirect 172 + golang.org/x/text v0.29.0 // indirect 173 + golang.org/x/time v0.12.0 // indirect 174 + golang.org/x/tools v0.36.0 // indirect 175 + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect 176 + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 177 + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect 178 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect 179 + google.golang.org/grpc v1.73.0 // indirect 180 + google.golang.org/protobuf v1.36.6 // indirect 181 + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 182 + gopkg.in/fsnotify.v1 v1.4.7 // indirect 183 + gopkg.in/inf.v0 v0.9.1 // indirect 184 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 185 + k8s.io/apiextensions-apiserver v0.33.0 // indirect 186 + k8s.io/apiserver v0.33.0 // indirect 187 + k8s.io/component-base v0.33.0 // indirect 188 + k8s.io/klog/v2 v2.130.1 // indirect 189 + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 190 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 191 + lukechampine.com/blake3 v1.4.1 // indirect 192 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect 193 + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 194 + sigs.k8s.io/randfill v1.0.0 // indirect 195 + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 196 + sigs.k8s.io/yaml v1.4.0 // indirect 197 + ) 198 + 199 + // Use local tangled.org/core for development 200 + replace tangled.org/core => ../core
+689
go.sum
··· 1 + cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss= 2 + cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= 3 + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= 4 + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= 5 + github.com/Blank-Xu/sql-adapter v1.1.1 h1:+g7QXU9sl/qT6Po97teMpf3GjAO0X9aFaqgSePXvYko= 6 + github.com/Blank-Xu/sql-adapter v1.1.1/go.mod h1:o2g8EZhZ3TudnYEGDkoU+3jCTCgDgx1o/Ig5ajKkaLY= 7 + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 8 + github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= 9 + github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= 10 + github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= 11 + github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= 12 + github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= 13 + github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= 14 + github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= 15 + github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= 16 + github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= 17 + github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= 18 + github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= 19 + github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= 20 + github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 21 + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 22 + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= 23 + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= 24 + github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e h1:IutKPwmbU0LrYqw03EuwJtMdAe67rDTrL1U8S8dicRU= 25 + github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e/go.mod h1:n6QE1NDPFoi7PRbMUZmc2y7FibCqiVU4ePpsvhHUBR8= 26 + github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 h1:CFvRtYNSnWRAi/98M3O466t9dYuwtesNbu6FVPymRrA= 27 + github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1/go.mod h1:WiYEeyJSdUwqoaZ71KJSpTblemUCpwJfh5oVXplK6T4= 28 + github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 29 + github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 30 + github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= 31 + github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 32 + github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 33 + github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 34 + github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 35 + github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 36 + github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc= 37 + github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8= 38 + github.com/casbin/casbin/v2 v2.100.0/go.mod h1:LO7YPez4dX3LgoTCqSQAleQDo0S0BeZBDxYnPUl95Ng= 39 + github.com/casbin/casbin/v2 v2.103.0 h1:dHElatNXNrr8XcseUov0ZSiWjauwmZZE6YMV3eU1yic= 40 + github.com/casbin/casbin/v2 v2.103.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco= 41 + github.com/casbin/govaluate v1.2.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= 42 + github.com/casbin/govaluate v1.3.0 h1:VA0eSY0M2lA86dYd5kPPuNZMUD9QkWnOCnavGrw9myc= 43 + github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= 44 + github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= 45 + github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 46 + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 47 + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 48 + github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 49 + github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 50 + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= 51 + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= 52 + github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= 53 + github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= 54 + github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= 55 + github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= 56 + github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= 57 + github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= 58 + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= 59 + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= 60 + github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= 61 + github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= 62 + github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 63 + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 64 + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 65 + github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= 66 + github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= 67 + github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= 68 + github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= 69 + github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= 70 + github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= 71 + github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 72 + github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 73 + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 74 + github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= 75 + github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= 76 + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 77 + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 78 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 79 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 80 + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 81 + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 82 + github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= 83 + github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= 84 + github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= 85 + github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 86 + github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= 87 + github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= 88 + github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= 89 + github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 90 + github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= 91 + github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 92 + github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= 93 + github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= 94 + github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= 95 + github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= 96 + github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= 97 + github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= 98 + github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= 99 + github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= 100 + github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 101 + github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 102 + github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= 103 + github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= 104 + github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= 105 + github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= 106 + github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= 107 + github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= 108 + github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= 109 + github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= 110 + github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= 111 + github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= 112 + github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= 113 + github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= 114 + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 115 + github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 116 + github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= 117 + github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 118 + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 119 + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 120 + github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= 121 + github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= 122 + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= 123 + github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= 124 + github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= 125 + github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= 126 + github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= 127 + github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= 128 + github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= 129 + github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= 130 + github.com/go-redis/cache/v9 v9.0.0 h1:0thdtFo0xJi0/WXbRVu8B066z8OvVymXTJGaXrVWnN0= 131 + github.com/go-redis/cache/v9 v9.0.0/go.mod h1:cMwi1N8ASBOufbIvk7cdXe2PbPjK/WMRL95FFHWsSgI= 132 + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= 133 + github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 134 + github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 135 + github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= 136 + github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= 137 + github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= 138 + github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= 139 + github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 140 + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 141 + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 142 + github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0= 143 + github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= 144 + github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= 145 + github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= 146 + github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= 147 + github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 148 + github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 149 + github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 150 + github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 151 + github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 152 + github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 153 + github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 154 + github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 155 + github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 156 + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 157 + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 158 + github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= 159 + github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= 160 + github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= 161 + github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= 162 + github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= 163 + github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= 164 + github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 165 + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 166 + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 167 + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 168 + github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 169 + github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 170 + github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 171 + github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 172 + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 173 + github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= 174 + github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 175 + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= 176 + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= 177 + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= 178 + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= 179 + github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 180 + github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 181 + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 182 + github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= 183 + github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 184 + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= 185 + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= 186 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= 187 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= 188 + github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 189 + github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 190 + github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 191 + github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= 192 + github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= 193 + github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= 194 + github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= 195 + github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 196 + github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 197 + github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= 198 + github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= 199 + github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= 200 + github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= 201 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= 202 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= 203 + github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= 204 + github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= 205 + github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= 206 + github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= 207 + github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= 208 + github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= 209 + github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= 210 + github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= 211 + github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= 212 + github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= 213 + github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= 214 + github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 215 + github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= 216 + github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 217 + github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 218 + github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= 219 + github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= 220 + github.com/ipfs/boxo v0.33.0 h1:9ow3chwkDzMj0Deq4AWRUEI7WnIIV7SZhPTzzG2mmfw= 221 + github.com/ipfs/boxo v0.33.0/go.mod h1:3IPh7YFcCIcKp6o02mCHovrPntoT5Pctj/7j4syh/RM= 222 + github.com/ipfs/go-block-format v0.2.2 h1:uecCTgRwDIXyZPgYspaLXoMiMmxQpSx2aq34eNc4YvQ= 223 + github.com/ipfs/go-block-format v0.2.2/go.mod h1:vmuefuWU6b+9kIU0vZJgpiJt1yicQz9baHXE8qR+KB8= 224 + github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= 225 + github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= 226 + github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= 227 + github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= 228 + github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= 229 + github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= 230 + github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= 231 + github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= 232 + github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= 233 + github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= 234 + github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= 235 + github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= 236 + github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= 237 + github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= 238 + github.com/ipfs/go-ipld-format v0.6.2 h1:bPZQ+A05ol0b3lsJSl0bLvwbuQ+HQbSsdGTy4xtYUkU= 239 + github.com/ipfs/go-ipld-format v0.6.2/go.mod h1:nni2xFdHKx5lxvXJ6brt/pndtGxKAE+FPR1rg4jTkyk= 240 + github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= 241 + github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= 242 + github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= 243 + github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg= 244 + github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8= 245 + github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= 246 + github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= 247 + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 248 + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 249 + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 250 + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 251 + github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= 252 + github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 253 + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 254 + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 255 + github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= 256 + github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 257 + github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 258 + github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= 259 + github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 260 + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 261 + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 262 + github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= 263 + github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 264 + github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 265 + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 266 + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 267 + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 268 + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 269 + github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 270 + github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 271 + github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 272 + github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 273 + github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 274 + github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 275 + github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= 276 + github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= 277 + github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 278 + github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 279 + github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 280 + github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 281 + github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= 282 + github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= 283 + github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 284 + github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 285 + github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= 286 + github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= 287 + github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= 288 + github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= 289 + github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= 290 + github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= 291 + github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= 292 + github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= 293 + github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= 294 + github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= 295 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 296 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 297 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 298 + github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 299 + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 300 + github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= 301 + github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= 302 + github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= 303 + github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 304 + github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= 305 + github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= 306 + github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= 307 + github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= 308 + github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= 309 + github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= 310 + github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= 311 + github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= 312 + github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= 313 + github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= 314 + github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= 315 + github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= 316 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 317 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 318 + github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 319 + github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= 320 + github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 321 + github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 322 + github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 323 + github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= 324 + github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= 325 + github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= 326 + github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= 327 + github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= 328 + github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= 329 + github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= 330 + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= 331 + github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= 332 + github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= 333 + github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= 334 + github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= 335 + github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 336 + github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 337 + github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= 338 + github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= 339 + github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= 340 + github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= 341 + github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= 342 + github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= 343 + github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= 344 + github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= 345 + github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= 346 + github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= 347 + github.com/openbao/openbao/api/v2 v2.3.0 h1:61FO3ILtpKoxbD9kTWeGaCq8pz1sdt4dv2cmTXsiaAc= 348 + github.com/openbao/openbao/api/v2 v2.3.0/go.mod h1:T47WKHb7DqHa3Ms3xicQtl5EiPE+U8diKjb9888okWs= 349 + github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= 350 + github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= 351 + github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= 352 + github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= 353 + github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= 354 + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= 355 + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= 356 + github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= 357 + github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= 358 + github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 359 + github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 360 + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 361 + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 362 + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 363 + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 364 + github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0= 365 + github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= 366 + github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= 367 + github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= 368 + github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 369 + github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 370 + github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 371 + github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 372 + github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= 373 + github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= 374 + github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 375 + github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 376 + github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= 377 + github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= 378 + github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= 379 + github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 380 + github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 381 + github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 382 + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= 383 + github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= 384 + github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= 385 + github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= 386 + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 387 + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 388 + github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= 389 + github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= 390 + github.com/sethvargo/go-envconfig v1.1.0 h1:cWZiJxeTm7AlCvzGXrEXaSTCNgip5oJepekh/BOQuog= 391 + github.com/sethvargo/go-envconfig v1.1.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= 392 + github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 393 + github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 394 + github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 395 + github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= 396 + github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= 397 + github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= 398 + github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= 399 + github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 400 + github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 401 + github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= 402 + github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= 403 + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 404 + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 405 + github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= 406 + github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= 407 + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 408 + github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 409 + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 410 + github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 411 + github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 412 + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 413 + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 414 + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 415 + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 416 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 417 + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 418 + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 419 + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 420 + github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 421 + github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 422 + github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 423 + github.com/vmihailenco/go-tinylfu v0.2.2 h1:H1eiG6HM36iniK6+21n9LLpzx1G9R3DJa2UjUjbynsI= 424 + github.com/vmihailenco/go-tinylfu v0.2.2/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q= 425 + github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= 426 + github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= 427 + github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= 428 + github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= 429 + github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= 430 + github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= 431 + github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 432 + github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= 433 + github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= 434 + github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= 435 + github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= 436 + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 437 + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 438 + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 439 + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 440 + github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 441 + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 442 + gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA= 443 + gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8= 444 + gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q= 445 + gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02/go.mod h1:JTnUj0mpYiAsuZLmKjTx/ex3AtMowcCgnE7YNyCEP0I= 446 + go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 447 + go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 448 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= 449 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= 450 + go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= 451 + go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= 452 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= 453 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= 454 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= 455 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= 456 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= 457 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= 458 + go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= 459 + go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= 460 + go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= 461 + go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= 462 + go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= 463 + go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= 464 + go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= 465 + go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= 466 + go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= 467 + go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= 468 + go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= 469 + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 470 + go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= 471 + go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= 472 + go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= 473 + go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= 474 + go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 475 + go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 476 + go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= 477 + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 478 + go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 479 + go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 480 + go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= 481 + go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= 482 + go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 483 + go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 484 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 485 + golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 486 + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 487 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 488 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 489 + golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= 490 + golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= 491 + golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= 492 + golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= 493 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= 494 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= 495 + golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 496 + golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= 497 + golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 498 + golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 499 + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= 500 + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 501 + golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= 502 + golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 503 + golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 504 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 505 + golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 506 + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 507 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 508 + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 509 + golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 510 + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 511 + golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 512 + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= 513 + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 514 + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 515 + golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= 516 + golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 517 + golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= 518 + golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= 519 + golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= 520 + golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= 521 + golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 522 + golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= 523 + golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= 524 + golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= 525 + golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= 526 + golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= 527 + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 528 + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 529 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 530 + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 531 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 532 + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 533 + golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 534 + golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= 535 + golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= 536 + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 537 + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 538 + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 539 + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 540 + golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 541 + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 542 + golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 543 + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 544 + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 545 + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 546 + golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 547 + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 548 + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 549 + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 550 + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 551 + golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 552 + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 553 + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 554 + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 555 + golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 556 + golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 557 + golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 558 + golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 559 + golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 560 + golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 561 + golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 562 + golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 563 + golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= 564 + golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 565 + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 566 + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 567 + golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 568 + golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= 569 + golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= 570 + golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= 571 + golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 572 + golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= 573 + golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= 574 + golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= 575 + golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= 576 + golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 577 + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 578 + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 579 + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 580 + golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 581 + golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 582 + golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 583 + golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 584 + golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 585 + golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 586 + golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= 587 + golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= 588 + golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= 589 + golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= 590 + golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 591 + golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 592 + golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 593 + golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 594 + golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= 595 + golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 596 + golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 597 + golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 598 + golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 599 + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 600 + golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 601 + golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= 602 + golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 603 + golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= 604 + golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= 605 + golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= 606 + golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= 607 + golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= 608 + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 609 + golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 610 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 611 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 612 + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= 613 + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= 614 + gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= 615 + gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= 616 + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= 617 + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= 618 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= 619 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= 620 + google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= 621 + google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= 622 + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 623 + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 624 + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 625 + google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 626 + google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 627 + google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 628 + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 629 + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 630 + google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 631 + google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 632 + google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 633 + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 634 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 635 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 636 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 637 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 638 + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= 639 + gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= 640 + gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= 641 + gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= 642 + gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 643 + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 644 + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 645 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 646 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 647 + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 648 + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 649 + gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 650 + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 651 + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 652 + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 653 + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 654 + gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= 655 + gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= 656 + honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= 657 + k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= 658 + k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= 659 + k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= 660 + k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= 661 + k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= 662 + k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= 663 + k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= 664 + k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= 665 + k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= 666 + k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= 667 + k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= 668 + k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= 669 + k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= 670 + k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= 671 + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= 672 + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= 673 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= 674 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 675 + lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= 676 + lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= 677 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= 678 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= 679 + sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= 680 + sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= 681 + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= 682 + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= 683 + sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 684 + sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= 685 + sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 686 + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= 687 + sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= 688 + sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= 689 + sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+15
hack/boilerplate.go.txt
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */
+434
internal/controller/spindleset_controller.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package controller 18 + 19 + import ( 20 + "context" 21 + "fmt" 22 + "sync" 23 + 24 + "tangled.org/core/spindle" 25 + "tangled.org/core/spindle/models" 26 + 27 + batchv1 "k8s.io/api/batch/v1" 28 + corev1 "k8s.io/api/core/v1" 29 + apierrors "k8s.io/apimachinery/pkg/api/errors" 30 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 + "k8s.io/apimachinery/pkg/runtime" 32 + "k8s.io/apimachinery/pkg/util/intstr" 33 + "k8s.io/client-go/rest" 34 + ctrl "sigs.k8s.io/controller-runtime" 35 + "sigs.k8s.io/controller-runtime/pkg/client" 36 + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 37 + "sigs.k8s.io/controller-runtime/pkg/log" 38 + 39 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 40 + ) 41 + 42 + // SpindleSetReconciler reconciles a SpindleSet object 43 + type SpindleSetReconciler struct { 44 + client.Client 45 + Scheme *runtime.Scheme 46 + Config *rest.Config 47 + Spindle *spindle.Spindle 48 + 49 + // Track watched Jobs for status reporting 50 + watchedJobs sync.Map // map[string]models.WorkflowId 51 + } 52 + 53 + // +kubebuilder:rbac:groups=loom.j5t.io,resources=spindlesets,verbs=get;list;watch;create;update;patch;delete 54 + // +kubebuilder:rbac:groups=loom.j5t.io,resources=spindlesets/status,verbs=get;update;patch 55 + // +kubebuilder:rbac:groups=loom.j5t.io,resources=spindlesets/finalizers,verbs=update 56 + // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 57 + // +kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get 58 + // +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch 59 + // +kubebuilder:rbac:groups="",resources=pods/log,verbs=get 60 + // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch 61 + // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete 62 + 63 + // Reconcile is part of the main kubernetes reconciliation loop which aims to 64 + // move the current state of the cluster closer to the desired state. 65 + // 66 + // The Reconcile function handles: 67 + // - Fetching the SpindleSet resource 68 + // - Managing the WebSocket connection to the tangled.org knot 69 + // - Monitoring spindle Jobs and updating status 70 + // - Cleaning up completed Jobs 71 + // 72 + // For more details, check Reconcile and its Result here: 73 + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile 74 + func (r *SpindleSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 75 + logger := log.FromContext(ctx) 76 + 77 + // Fetch the SpindleSet instance 78 + spindleSet := &loomv1alpha1.SpindleSet{} 79 + if err := r.Get(ctx, req.NamespacedName, spindleSet); err != nil { 80 + if apierrors.IsNotFound(err) { 81 + // SpindleSet was deleted, nothing to do 82 + logger.Info("SpindleSet resource not found, likely deleted") 83 + return ctrl.Result{}, nil 84 + } 85 + logger.Error(err, "Failed to get SpindleSet") 86 + return ctrl.Result{}, err 87 + } 88 + 89 + logger.Info("Reconciling SpindleSet", "name", spindleSet.Name, "knotUrl", spindleSet.Spec.KnotUrl) 90 + 91 + // Add finalizer if not present 92 + if !controllerutil.ContainsFinalizer(spindleSet, "loom.j5t.io/finalizer") { 93 + controllerutil.AddFinalizer(spindleSet, "loom.j5t.io/finalizer") 94 + if err := r.Update(ctx, spindleSet); err != nil { 95 + return ctrl.Result{}, err 96 + } 97 + } 98 + 99 + // Handle deletion 100 + if !spindleSet.DeletionTimestamp.IsZero() { 101 + return r.handleDeletion(ctx, spindleSet) 102 + } 103 + 104 + // Ensure spindle HTTP Service exists 105 + if err := r.ensureSpindleService(ctx, spindleSet); err != nil { 106 + logger.Error(err, "Failed to ensure spindle service") 107 + // Don't return error - we'll retry on next reconcile 108 + } 109 + 110 + // Monitor Job statuses and report to spindle DB 111 + if err := r.monitorJobStatuses(ctx, spindleSet); err != nil { 112 + logger.Error(err, "Failed to monitor job statuses") 113 + // Don't return error - we'll retry on next reconcile 114 + } 115 + 116 + // Update status based on current Jobs 117 + if err := r.updateStatus(ctx, spindleSet); err != nil { 118 + logger.Error(err, "Failed to update status") 119 + return ctrl.Result{}, err 120 + } 121 + 122 + // Requeue after 30 seconds to update status 123 + return ctrl.Result{RequeueAfter: 30 * ctrl.Result{}.RequeueAfter}, nil 124 + } 125 + 126 + // handleDeletion handles cleanup when SpindleSet is being deleted 127 + func (r *SpindleSetReconciler) handleDeletion(ctx context.Context, spindleSet *loomv1alpha1.SpindleSet) (ctrl.Result, error) { 128 + logger := log.FromContext(ctx) 129 + logger.Info("Handling SpindleSet deletion") 130 + 131 + // List all Jobs owned by this SpindleSet 132 + jobList := &batchv1.JobList{} 133 + if err := r.List(ctx, jobList, client.InNamespace(spindleSet.Namespace), client.MatchingLabels{ 134 + "loom.j5t.io/spindleset": spindleSet.Name, 135 + }); err != nil { 136 + logger.Error(err, "Failed to list Jobs for cleanup") 137 + return ctrl.Result{}, err 138 + } 139 + 140 + // Delete all Jobs 141 + for _, job := range jobList.Items { 142 + logger.Info("Deleting Job", "jobName", job.Name) 143 + if err := r.Delete(ctx, &job); client.IgnoreNotFound(err) != nil { 144 + logger.Error(err, "Failed to delete Job", "jobName", job.Name) 145 + return ctrl.Result{}, err 146 + } 147 + } 148 + 149 + // Remove finalizer 150 + controllerutil.RemoveFinalizer(spindleSet, "loom.j5t.io/finalizer") 151 + if err := r.Update(ctx, spindleSet); err != nil { 152 + return ctrl.Result{}, err 153 + } 154 + 155 + logger.Info("SpindleSet cleanup complete") 156 + return ctrl.Result{}, nil 157 + } 158 + 159 + // updateStatus updates the SpindleSet status based on current Jobs 160 + func (r *SpindleSetReconciler) updateStatus(ctx context.Context, spindleSet *loomv1alpha1.SpindleSet) error { 161 + logger := log.FromContext(ctx) 162 + 163 + // List all Jobs owned by this SpindleSet 164 + jobList := &batchv1.JobList{} 165 + if err := r.List(ctx, jobList, client.InNamespace(spindleSet.Namespace), client.MatchingLabels{ 166 + "loom.j5t.io/spindleset": spindleSet.Name, 167 + }); err != nil { 168 + return fmt.Errorf("failed to list Jobs: %w", err) 169 + } 170 + 171 + // Count Jobs by status 172 + var pending, running, completed, failed int32 173 + for _, job := range jobList.Items { 174 + if job.Status.Active > 0 { 175 + running++ 176 + } else if job.Status.Succeeded > 0 { 177 + completed++ 178 + } else if job.Status.Failed > 0 { 179 + failed++ 180 + } else { 181 + pending++ 182 + } 183 + } 184 + 185 + // Update status 186 + statusChanged := false 187 + if spindleSet.Status.PendingJobs != pending { 188 + spindleSet.Status.PendingJobs = pending 189 + statusChanged = true 190 + } 191 + if spindleSet.Status.RunningJobs != running { 192 + spindleSet.Status.RunningJobs = running 193 + statusChanged = true 194 + } 195 + if spindleSet.Status.CompletedJobs != completed { 196 + spindleSet.Status.CompletedJobs = completed 197 + statusChanged = true 198 + } 199 + if spindleSet.Status.FailedJobs != failed { 200 + spindleSet.Status.FailedJobs = failed 201 + statusChanged = true 202 + } 203 + 204 + // Update conditions 205 + readyCondition := metav1.Condition{ 206 + Type: "Ready", 207 + Status: metav1.ConditionTrue, 208 + Reason: "SpindleSetReady", 209 + Message: "SpindleSet is ready to process pipeline events", 210 + LastTransitionTime: metav1.Now(), 211 + ObservedGeneration: spindleSet.Generation, 212 + } 213 + 214 + // Check if condition needs to be updated 215 + existingCondition := findCondition(spindleSet.Status.Conditions, "Ready") 216 + if existingCondition == nil || existingCondition.Status != readyCondition.Status { 217 + setCondition(&spindleSet.Status.Conditions, readyCondition) 218 + statusChanged = true 219 + } 220 + 221 + // Only update if something changed 222 + if statusChanged { 223 + logger.Info("Updating SpindleSet status", 224 + "pending", pending, 225 + "running", running, 226 + "completed", completed, 227 + "failed", failed) 228 + 229 + if err := r.Status().Update(ctx, spindleSet); err != nil { 230 + return fmt.Errorf("failed to update SpindleSet status: %w", err) 231 + } 232 + } 233 + 234 + return nil 235 + } 236 + 237 + // findCondition finds a condition in the list by type 238 + func findCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition { 239 + for i := range conditions { 240 + if conditions[i].Type == conditionType { 241 + return &conditions[i] 242 + } 243 + } 244 + return nil 245 + } 246 + 247 + // setCondition adds or updates a condition in the list 248 + func setCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { 249 + if conditions == nil { 250 + *conditions = []metav1.Condition{} 251 + } 252 + 253 + for i := range *conditions { 254 + if (*conditions)[i].Type == newCondition.Type { 255 + (*conditions)[i] = newCondition 256 + return 257 + } 258 + } 259 + 260 + *conditions = append(*conditions, newCondition) 261 + } 262 + 263 + // ensureSpindleService ensures a Service exists to expose the spindle HTTP server 264 + func (r *SpindleSetReconciler) ensureSpindleService(ctx context.Context, spindleSet *loomv1alpha1.SpindleSet) error { 265 + logger := log.FromContext(ctx) 266 + 267 + // Service name based on SpindleSet 268 + serviceName := fmt.Sprintf("%s-spindle", spindleSet.Name) 269 + 270 + // Check if Service already exists 271 + existingService := &corev1.Service{} 272 + err := r.Get(ctx, client.ObjectKey{ 273 + Name: serviceName, 274 + Namespace: spindleSet.Namespace, 275 + }, existingService) 276 + 277 + if err == nil { 278 + // Service already exists 279 + logger.V(1).Info("Spindle service already exists", "service", serviceName) 280 + return nil 281 + } 282 + 283 + if !apierrors.IsNotFound(err) { 284 + return fmt.Errorf("failed to get service: %w", err) 285 + } 286 + 287 + // Create new Service 288 + service := &corev1.Service{ 289 + ObjectMeta: metav1.ObjectMeta{ 290 + Name: serviceName, 291 + Namespace: spindleSet.Namespace, 292 + Labels: map[string]string{ 293 + "loom.j5t.io/component": "spindle-http", 294 + "loom.j5t.io/spindleset": spindleSet.Name, 295 + }, 296 + }, 297 + Spec: corev1.ServiceSpec{ 298 + Selector: map[string]string{ 299 + // Select the loom operator pod 300 + // This assumes the operator deployment has this label 301 + "control-plane": "controller-manager", 302 + }, 303 + Ports: []corev1.ServicePort{ 304 + { 305 + Name: "http", 306 + Protocol: corev1.ProtocolTCP, 307 + Port: 6555, // Default spindle HTTP port 308 + TargetPort: intstr.FromInt(6555), 309 + }, 310 + }, 311 + Type: corev1.ServiceTypeClusterIP, 312 + }, 313 + } 314 + 315 + // Set SpindleSet as owner 316 + if err := controllerutil.SetControllerReference(spindleSet, service, r.Scheme); err != nil { 317 + return fmt.Errorf("failed to set controller reference: %w", err) 318 + } 319 + 320 + logger.Info("Creating spindle HTTP service", "service", serviceName, "port", 6555) 321 + if err := r.Create(ctx, service); err != nil { 322 + return fmt.Errorf("failed to create service: %w", err) 323 + } 324 + 325 + logger.Info("Spindle HTTP service created successfully", "service", serviceName) 326 + return nil 327 + } 328 + 329 + // monitorJobStatuses monitors Jobs and reports status changes to spindle DB 330 + func (r *SpindleSetReconciler) monitorJobStatuses(ctx context.Context, spindleSet *loomv1alpha1.SpindleSet) error { 331 + logger := log.FromContext(ctx) 332 + 333 + // List all Jobs owned by this SpindleSet 334 + jobList := &batchv1.JobList{} 335 + if err := r.List(ctx, jobList, client.InNamespace(spindleSet.Namespace), client.MatchingLabels{ 336 + "loom.j5t.io/component": "spindle", 337 + }); err != nil { 338 + return fmt.Errorf("failed to list Jobs: %w", err) 339 + } 340 + 341 + // Check each Job's status 342 + for _, job := range jobList.Items { 343 + jobKey := fmt.Sprintf("%s/%s", job.Namespace, job.Name) 344 + 345 + // Extract workflow ID from Job labels 346 + pipelineID := job.Labels["loom.j5t.io/pipeline-id"] 347 + workflowName := job.Labels["loom.j5t.io/workflow"] 348 + knot := job.Annotations["loom.j5t.io/knot"] 349 + 350 + if pipelineID == "" || workflowName == "" { 351 + continue 352 + } 353 + 354 + wid := models.WorkflowId{ 355 + PipelineId: models.PipelineId{ 356 + Knot: knot, 357 + Rkey: pipelineID, 358 + }, 359 + Name: workflowName, 360 + } 361 + 362 + // Check if this is a new Job or status changed 363 + _, exists := r.watchedJobs.Load(jobKey) 364 + newJob := !exists 365 + 366 + // Determine current status 367 + var currentStatus string 368 + if job.Status.Active > 0 { 369 + currentStatus = "running" 370 + } else if job.Status.Succeeded > 0 { 371 + currentStatus = "succeeded" 372 + } else if job.Status.Failed > 0 { 373 + currentStatus = "failed" 374 + } else { 375 + currentStatus = "pending" 376 + } 377 + 378 + // Report status changes to spindle DB 379 + if newJob && currentStatus == "pending" { 380 + logger.Info("Reporting pending status", "job", job.Name, "workflow", workflowName) 381 + if err := r.Spindle.DB().StatusPending(wid, r.Spindle.Notifier()); err != nil { 382 + logger.Error(err, "Failed to report pending status") 383 + } 384 + r.watchedJobs.Store(jobKey, wid) 385 + } 386 + 387 + if currentStatus == "running" { 388 + if newJob || !exists { 389 + logger.Info("Reporting running status", "job", job.Name, "workflow", workflowName) 390 + if err := r.Spindle.DB().StatusRunning(wid, r.Spindle.Notifier()); err != nil { 391 + logger.Error(err, "Failed to report running status") 392 + } 393 + r.watchedJobs.Store(jobKey, wid) 394 + } 395 + } 396 + 397 + if currentStatus == "succeeded" { 398 + logger.Info("Reporting success status", "job", job.Name, "workflow", workflowName) 399 + if err := r.Spindle.DB().StatusSuccess(wid, r.Spindle.Notifier()); err != nil { 400 + logger.Error(err, "Failed to report success status") 401 + } 402 + r.watchedJobs.Delete(jobKey) 403 + } 404 + 405 + if currentStatus == "failed" { 406 + logger.Info("Reporting failed status", "job", job.Name, "workflow", workflowName) 407 + // Try to get error message from Job conditions 408 + errorMsg := "Job failed" 409 + for _, cond := range job.Status.Conditions { 410 + if cond.Type == batchv1.JobFailed { 411 + errorMsg = cond.Message 412 + break 413 + } 414 + } 415 + exitCode := int64(1) 416 + if err := r.Spindle.DB().StatusFailed(wid, errorMsg, exitCode, r.Spindle.Notifier()); err != nil { 417 + logger.Error(err, "Failed to report failed status") 418 + } 419 + r.watchedJobs.Delete(jobKey) 420 + } 421 + } 422 + 423 + return nil 424 + } 425 + 426 + // SetupWithManager sets up the controller with the Manager. 427 + func (r *SpindleSetReconciler) SetupWithManager(mgr ctrl.Manager) error { 428 + return ctrl.NewControllerManagedBy(mgr). 429 + For(&loomv1alpha1.SpindleSet{}). 430 + Owns(&batchv1.Job{}). // Watch Jobs owned by SpindleSet 431 + Owns(&corev1.Service{}). // Watch Services owned by SpindleSet 432 + Named("spindleset"). 433 + Complete(r) 434 + }
+84
internal/controller/spindleset_controller_test.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package controller 18 + 19 + import ( 20 + "context" 21 + 22 + . "github.com/onsi/ginkgo/v2" 23 + . "github.com/onsi/gomega" 24 + "k8s.io/apimachinery/pkg/api/errors" 25 + "k8s.io/apimachinery/pkg/types" 26 + "sigs.k8s.io/controller-runtime/pkg/reconcile" 27 + 28 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 + 30 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 31 + ) 32 + 33 + var _ = Describe("SpindleSet Controller", func() { 34 + Context("When reconciling a resource", func() { 35 + const resourceName = "test-resource" 36 + 37 + ctx := context.Background() 38 + 39 + typeNamespacedName := types.NamespacedName{ 40 + Name: resourceName, 41 + Namespace: "default", // TODO(user):Modify as needed 42 + } 43 + spindleset := &loomv1alpha1.SpindleSet{} 44 + 45 + BeforeEach(func() { 46 + By("creating the custom resource for the Kind SpindleSet") 47 + err := k8sClient.Get(ctx, typeNamespacedName, spindleset) 48 + if err != nil && errors.IsNotFound(err) { 49 + resource := &loomv1alpha1.SpindleSet{ 50 + ObjectMeta: metav1.ObjectMeta{ 51 + Name: resourceName, 52 + Namespace: "default", 53 + }, 54 + // TODO(user): Specify other spec details if needed. 55 + } 56 + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) 57 + } 58 + }) 59 + 60 + AfterEach(func() { 61 + // TODO(user): Cleanup logic after each test, like removing the resource instance. 62 + resource := &loomv1alpha1.SpindleSet{} 63 + err := k8sClient.Get(ctx, typeNamespacedName, resource) 64 + Expect(err).NotTo(HaveOccurred()) 65 + 66 + By("Cleanup the specific resource instance SpindleSet") 67 + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) 68 + }) 69 + It("should successfully reconcile the resource", func() { 70 + By("Reconciling the created resource") 71 + controllerReconciler := &SpindleSetReconciler{ 72 + Client: k8sClient, 73 + Scheme: k8sClient.Scheme(), 74 + } 75 + 76 + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ 77 + NamespacedName: typeNamespacedName, 78 + }) 79 + Expect(err).NotTo(HaveOccurred()) 80 + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. 81 + // Example: If you expect a certain status condition after reconciliation, verify it here. 82 + }) 83 + }) 84 + })
+116
internal/controller/suite_test.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package controller 18 + 19 + import ( 20 + "context" 21 + "os" 22 + "path/filepath" 23 + "testing" 24 + 25 + . "github.com/onsi/ginkgo/v2" 26 + . "github.com/onsi/gomega" 27 + 28 + "k8s.io/client-go/kubernetes/scheme" 29 + "k8s.io/client-go/rest" 30 + "sigs.k8s.io/controller-runtime/pkg/client" 31 + "sigs.k8s.io/controller-runtime/pkg/envtest" 32 + logf "sigs.k8s.io/controller-runtime/pkg/log" 33 + "sigs.k8s.io/controller-runtime/pkg/log/zap" 34 + 35 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 36 + // +kubebuilder:scaffold:imports 37 + ) 38 + 39 + // These tests use Ginkgo (BDD-style Go testing framework). Refer to 40 + // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 41 + 42 + var ( 43 + ctx context.Context 44 + cancel context.CancelFunc 45 + testEnv *envtest.Environment 46 + cfg *rest.Config 47 + k8sClient client.Client 48 + ) 49 + 50 + func TestControllers(t *testing.T) { 51 + RegisterFailHandler(Fail) 52 + 53 + RunSpecs(t, "Controller Suite") 54 + } 55 + 56 + var _ = BeforeSuite(func() { 57 + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 58 + 59 + ctx, cancel = context.WithCancel(context.TODO()) 60 + 61 + var err error 62 + err = loomv1alpha1.AddToScheme(scheme.Scheme) 63 + Expect(err).NotTo(HaveOccurred()) 64 + 65 + // +kubebuilder:scaffold:scheme 66 + 67 + By("bootstrapping test environment") 68 + testEnv = &envtest.Environment{ 69 + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 70 + ErrorIfCRDPathMissing: true, 71 + } 72 + 73 + // Retrieve the first found binary directory to allow running tests from IDEs 74 + if getFirstFoundEnvTestBinaryDir() != "" { 75 + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() 76 + } 77 + 78 + // cfg is defined in this file globally. 79 + cfg, err = testEnv.Start() 80 + Expect(err).NotTo(HaveOccurred()) 81 + Expect(cfg).NotTo(BeNil()) 82 + 83 + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 84 + Expect(err).NotTo(HaveOccurred()) 85 + Expect(k8sClient).NotTo(BeNil()) 86 + }) 87 + 88 + var _ = AfterSuite(func() { 89 + By("tearing down the test environment") 90 + cancel() 91 + err := testEnv.Stop() 92 + Expect(err).NotTo(HaveOccurred()) 93 + }) 94 + 95 + // getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. 96 + // ENVTEST-based tests depend on specific binaries, usually located in paths set by 97 + // controller-runtime. When running tests directly (e.g., via an IDE) without using 98 + // Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. 99 + // 100 + // This function streamlines the process by finding the required binaries, similar to 101 + // setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are 102 + // properly set up, run 'make setup-envtest' beforehand. 103 + func getFirstFoundEnvTestBinaryDir() string { 104 + basePath := filepath.Join("..", "..", "bin", "k8s") 105 + entries, err := os.ReadDir(basePath) 106 + if err != nil { 107 + logf.Log.Error(err, "Failed to read directory", "path", basePath) 108 + return "" 109 + } 110 + for _, entry := range entries { 111 + if entry.IsDir() { 112 + return filepath.Join(basePath, entry.Name()) 113 + } 114 + } 115 + return "" 116 + }
+306
internal/engine/kubernetes_engine.go
··· 1 + package engine 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "time" 7 + 8 + "gopkg.in/yaml.v3" 9 + batchv1 "k8s.io/api/batch/v1" 10 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 + "sigs.k8s.io/controller-runtime/pkg/client" 12 + "sigs.k8s.io/controller-runtime/pkg/log" 13 + 14 + "tangled.org/core/api/tangled" 15 + "tangled.org/core/spindle/models" 16 + "tangled.org/core/spindle/secrets" 17 + 18 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 19 + "tangled.org/evan.jarrett.net/loom/pkg/jobbuilder" 20 + ) 21 + 22 + // KubernetesEngine implements the spindle Engine interface for Kubernetes Jobs. 23 + type KubernetesEngine struct { 24 + client client.Client 25 + namespace string 26 + template loomv1alpha1.SpindleTemplate 27 + 28 + // Track created jobs for cleanup 29 + jobs map[string]*batchv1.Job 30 + 31 + // Store current knot for Job annotations 32 + currentKnot string 33 + } 34 + 35 + // NewKubernetesEngine creates a new Kubernetes-based spindle engine. 36 + func NewKubernetesEngine(client client.Client, namespace string, template loomv1alpha1.SpindleTemplate) *KubernetesEngine { 37 + return &KubernetesEngine{ 38 + client: client, 39 + namespace: namespace, 40 + template: template, 41 + jobs: make(map[string]*batchv1.Job), 42 + } 43 + } 44 + 45 + // WorkflowSpec is the structure of the workflow YAML file. 46 + type WorkflowSpec struct { 47 + Image string `yaml:"image"` 48 + Architecture string `yaml:"architecture"` 49 + Steps []WorkflowStepSpec `yaml:"steps"` 50 + When []WorkflowWhenSpec `yaml:"when,omitempty"` 51 + Environment map[string]string `yaml:"environment,omitempty"` 52 + Dependencies *WorkflowDependenciesSpec `yaml:"dependencies,omitempty"` 53 + } 54 + 55 + type WorkflowStepSpec struct { 56 + Name string `yaml:"name"` 57 + Command string `yaml:"command"` 58 + Environment map[string]string `yaml:"environment,omitempty"` 59 + } 60 + 61 + type WorkflowWhenSpec struct { 62 + Event []string `yaml:"event,omitempty"` 63 + Branch []string `yaml:"branch,omitempty"` 64 + } 65 + 66 + type WorkflowDependenciesSpec struct { 67 + Nixpkgs []string `yaml:"nixpkgs,omitempty"` 68 + } 69 + 70 + // SimpleStep implements the models.Step interface. 71 + type SimpleStep struct { 72 + StepName string 73 + StepCommand string 74 + StepKind models.StepKind 75 + } 76 + 77 + func (s SimpleStep) Name() string { 78 + return s.StepName 79 + } 80 + 81 + func (s SimpleStep) Command() string { 82 + return s.StepCommand 83 + } 84 + 85 + func (s SimpleStep) Kind() models.StepKind { 86 + return s.StepKind 87 + } 88 + 89 + // InitWorkflow parses the workflow YAML and initializes a Workflow model. 90 + func (e *KubernetesEngine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) { 91 + // Parse the Raw YAML 92 + var spec WorkflowSpec 93 + if err := yaml.Unmarshal([]byte(twf.Raw), &spec); err != nil { 94 + return nil, fmt.Errorf("failed to parse workflow YAML: %w", err) 95 + } 96 + 97 + // Validate required fields 98 + if spec.Image == "" { 99 + return nil, fmt.Errorf("workflow must specify an 'image' field") 100 + } 101 + 102 + // Default architecture to amd64 if not specified 103 + if spec.Architecture == "" { 104 + spec.Architecture = "amd64" 105 + } 106 + 107 + // Convert steps to models.Step interface 108 + steps := make([]models.Step, 0, len(spec.Steps)) 109 + for _, stepSpec := range spec.Steps { 110 + steps = append(steps, SimpleStep{ 111 + StepName: stepSpec.Name, 112 + StepCommand: stepSpec.Command, 113 + StepKind: models.StepKindUser, 114 + }) 115 + } 116 + 117 + // Store the parsed spec in Data field for later use 118 + workflowData := map[string]interface{}{ 119 + "spec": spec, 120 + "triggerRepo": tpl.TriggerMetadata.Repo, 121 + "cloneOpts": twf.Clone, 122 + } 123 + 124 + workflow := &models.Workflow{ 125 + Steps: steps, 126 + Name: twf.Name, 127 + Data: workflowData, 128 + } 129 + 130 + return workflow, nil 131 + } 132 + 133 + // SetupWorkflow creates the Kubernetes Job for the workflow. 134 + func (e *KubernetesEngine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error { 135 + logger := log.FromContext(ctx).WithValues("workflow", wid.Name, "pipeline", wid.PipelineId.Rkey) 136 + 137 + // Extract workflow data 138 + workflowData, ok := wf.Data.(map[string]interface{}) 139 + if !ok { 140 + return fmt.Errorf("invalid workflow data type") 141 + } 142 + 143 + spec, ok := workflowData["spec"].(WorkflowSpec) 144 + if !ok { 145 + return fmt.Errorf("workflow spec not found in data") 146 + } 147 + 148 + triggerRepo, ok := workflowData["triggerRepo"].(*tangled.Pipeline_TriggerRepo) 149 + if !ok { 150 + return fmt.Errorf("trigger repo not found in data") 151 + } 152 + 153 + // Determine commit SHA based on trigger type 154 + // For now, assume we're handling push events with NewSha 155 + // TODO: Handle other trigger types (PR, manual) 156 + commitSHA := "" // Need to extract from trigger metadata 157 + 158 + // Build repository clone URL 159 + repoURL := fmt.Sprintf("https://%s/%s/%s", triggerRepo.Knot, triggerRepo.Did, triggerRepo.Repo) 160 + 161 + // Store knot for status reporting 162 + e.currentKnot = triggerRepo.Knot 163 + 164 + // Convert workflow steps to jobbuilder format 165 + jobSteps := make([]jobbuilder.WorkflowStep, 0, len(wf.Steps)) 166 + for _, step := range wf.Steps { 167 + jobSteps = append(jobSteps, jobbuilder.WorkflowStep{ 168 + Name: step.Name(), 169 + Command: step.Command(), 170 + Env: nil, // TODO: Extract from step if available 171 + }) 172 + } 173 + 174 + // Build Job configuration 175 + jobConfig := jobbuilder.WorkflowConfig{ 176 + WorkflowName: wf.Name, 177 + PipelineID: wid.PipelineId.Rkey, // Use rkey as pipeline ID 178 + Image: spec.Image, 179 + Architecture: spec.Architecture, 180 + Steps: jobSteps, 181 + RepoURL: repoURL, 182 + CommitSHA: commitSHA, 183 + Secrets: nil, // TODO: Handle secrets 184 + Template: e.template, 185 + Namespace: e.namespace, 186 + Knot: e.currentKnot, 187 + } 188 + 189 + // Create the Job 190 + job, err := jobbuilder.BuildJob(jobConfig) 191 + if err != nil { 192 + return fmt.Errorf("failed to build job: %w", err) 193 + } 194 + 195 + // Create the Job in Kubernetes 196 + logger.Info("Creating Kubernetes Job", "jobName", job.Name) 197 + if err := e.client.Create(ctx, job); err != nil { 198 + return fmt.Errorf("failed to create Kubernetes Job: %w", err) 199 + } 200 + 201 + // Track the job for cleanup 202 + e.jobs[wid.String()] = job 203 + 204 + logger.Info("Kubernetes Job created successfully", "jobName", job.Name) 205 + return nil 206 + } 207 + 208 + // WorkflowTimeout returns the maximum time a workflow is allowed to run. 209 + func (e *KubernetesEngine) WorkflowTimeout() time.Duration { 210 + // Default timeout: 1 hour 211 + return 1 * time.Hour 212 + } 213 + 214 + // DestroyWorkflow cleans up the Kubernetes Job after completion. 215 + func (e *KubernetesEngine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error { 216 + logger := log.FromContext(ctx).WithValues("workflow", wid.Name, "pipeline", wid.PipelineId.Rkey) 217 + 218 + job, exists := e.jobs[wid.String()] 219 + if !exists { 220 + logger.Info("No job found to destroy") 221 + return nil 222 + } 223 + 224 + // Delete the Job 225 + // Note: Job has TTLSecondsAfterFinished set, so it will auto-cleanup 226 + // This is just for explicit cleanup if needed 227 + logger.Info("Cleaning up Kubernetes Job", "jobName", job.Name) 228 + 229 + // Delete with propagation policy to remove pods 230 + deletePolicy := metav1.DeletePropagationForeground 231 + deleteOptions := &client.DeleteOptions{ 232 + PropagationPolicy: &deletePolicy, 233 + } 234 + 235 + if err := e.client.Delete(ctx, job, deleteOptions); err != nil { 236 + // Ignore not found errors (job may have already been deleted by TTL) 237 + if client.IgnoreNotFound(err) != nil { 238 + return fmt.Errorf("failed to delete Kubernetes Job: %w", err) 239 + } 240 + } 241 + 242 + // Remove from tracking map 243 + delete(e.jobs, wid.String()) 244 + 245 + logger.Info("Kubernetes Job cleaned up successfully") 246 + return nil 247 + } 248 + 249 + // RunStep waits for the Job to complete and streams logs. 250 + // For Kubernetes engine, all steps run in a single Job, but we need to wait for completion. 251 + func (e *KubernetesEngine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error { 252 + logger := log.FromContext(ctx).WithValues("workflow", wid.Name, "pipeline", wid.PipelineId.Rkey, "step", idx) 253 + 254 + // Only wait on the first step - the Job runs all steps together 255 + if idx != 0 { 256 + return nil 257 + } 258 + 259 + job, exists := e.jobs[wid.String()] 260 + if !exists { 261 + return fmt.Errorf("no job found for workflow") 262 + } 263 + 264 + logger.Info("Waiting for Kubernetes Job to complete", "jobName", job.Name) 265 + 266 + // Wait for the Job to complete (with timeout from WorkflowTimeout) 267 + timeout := time.After(e.WorkflowTimeout()) 268 + ticker := time.NewTicker(5 * time.Second) 269 + defer ticker.Stop() 270 + 271 + for { 272 + select { 273 + case <-ctx.Done(): 274 + return ctx.Err() 275 + case <-timeout: 276 + return fmt.Errorf("job timed out after %v", e.WorkflowTimeout()) 277 + case <-ticker.C: 278 + // Check Job status 279 + currentJob := &batchv1.Job{} 280 + err := e.client.Get(ctx, client.ObjectKey{ 281 + Namespace: job.Namespace, 282 + Name: job.Name, 283 + }, currentJob) 284 + if err != nil { 285 + return fmt.Errorf("failed to get job status: %w", err) 286 + } 287 + 288 + // Check if Job completed 289 + if currentJob.Status.Succeeded > 0 { 290 + logger.Info("Kubernetes Job completed successfully") 291 + return nil 292 + } 293 + 294 + // Check if Job failed 295 + if currentJob.Status.Failed > 0 { 296 + logger.Error(nil, "Kubernetes Job failed") 297 + return fmt.Errorf("job failed") 298 + } 299 + 300 + // Still running, continue waiting 301 + } 302 + } 303 + } 304 + 305 + // Ensure KubernetesEngine implements the Engine interface 306 + var _ models.Engine = (*KubernetesEngine)(nil)
+88
pkg/jobbuilder/affinity.go
··· 1 + package jobbuilder 2 + 3 + import ( 4 + corev1 "k8s.io/api/core/v1" 5 + ) 6 + 7 + // BuildArchitectureAffinity creates node affinity rules for the specified architecture. 8 + // This ensures jobs are scheduled on nodes that match the requested architecture (amd64/arm64). 9 + func BuildArchitectureAffinity(architecture string) *corev1.Affinity { 10 + if architecture == "" { 11 + // Default to amd64 if not specified 12 + architecture = "amd64" 13 + } 14 + 15 + return &corev1.Affinity{ 16 + NodeAffinity: &corev1.NodeAffinity{ 17 + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ 18 + NodeSelectorTerms: []corev1.NodeSelectorTerm{ 19 + { 20 + MatchExpressions: []corev1.NodeSelectorRequirement{ 21 + { 22 + Key: "kubernetes.io/arch", 23 + Operator: corev1.NodeSelectorOpIn, 24 + Values: []string{architecture}, 25 + }, 26 + }, 27 + }, 28 + }, 29 + }, 30 + }, 31 + } 32 + } 33 + 34 + // MergeAffinity merges architecture-based affinity with user-provided affinity. 35 + // Architecture affinity takes precedence as a hard requirement. 36 + func MergeAffinity(archAffinity, userAffinity *corev1.Affinity) *corev1.Affinity { 37 + if userAffinity == nil { 38 + return archAffinity 39 + } 40 + 41 + merged := archAffinity.DeepCopy() 42 + 43 + // Merge pod affinity if user specified it 44 + if userAffinity.PodAffinity != nil { 45 + merged.PodAffinity = userAffinity.PodAffinity.DeepCopy() 46 + } 47 + 48 + // Merge pod anti-affinity if user specified it 49 + if userAffinity.PodAntiAffinity != nil { 50 + merged.PodAntiAffinity = userAffinity.PodAntiAffinity.DeepCopy() 51 + } 52 + 53 + // Merge node affinity - architecture requirement is preserved, user preferences are added 54 + if userAffinity.NodeAffinity != nil { 55 + if merged.NodeAffinity == nil { 56 + merged.NodeAffinity = &corev1.NodeAffinity{} 57 + } 58 + 59 + // Add user's preferred scheduling terms 60 + if userAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { 61 + merged.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = 62 + userAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution 63 + } 64 + 65 + // Merge required node selector terms (AND operation) 66 + if userAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { 67 + for _, userTerm := range userAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { 68 + // Append user's match expressions to architecture term 69 + if len(merged.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 { 70 + merged.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions = 71 + append( 72 + merged.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, 73 + userTerm.MatchExpressions..., 74 + ) 75 + if len(userTerm.MatchFields) > 0 { 76 + merged.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields = 77 + append( 78 + merged.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields, 79 + userTerm.MatchFields..., 80 + ) 81 + } 82 + } 83 + } 84 + } 85 + } 86 + 87 + return merged 88 + }
+294
pkg/jobbuilder/job_template.go
··· 1 + package jobbuilder 2 + 3 + import ( 4 + "fmt" 5 + 6 + batchv1 "k8s.io/api/batch/v1" 7 + corev1 "k8s.io/api/core/v1" 8 + "k8s.io/apimachinery/pkg/api/resource" 9 + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 + 11 + loomv1alpha1 "tangled.org/evan.jarrett.net/loom/api/v1alpha1" 12 + ) 13 + 14 + // WorkflowConfig contains all the information needed to create a spindle Job. 15 + type WorkflowConfig struct { 16 + // WorkflowName is the name of the workflow (used in Job name and labels) 17 + WorkflowName string 18 + 19 + // PipelineID is a unique identifier for this pipeline run 20 + PipelineID string 21 + 22 + // Image is the container image to use for execution 23 + Image string 24 + 25 + // Architecture is the target architecture (amd64, arm64) 26 + Architecture string 27 + 28 + // Steps are the workflow steps to execute 29 + Steps []WorkflowStep 30 + 31 + // RepoURL is the git repository URL to clone 32 + RepoURL string 33 + 34 + // CommitSHA is the git commit to checkout 35 + CommitSHA string 36 + 37 + // Secrets contains auth tokens or other secrets needed 38 + Secrets []string 39 + 40 + // Knot is the tangled.org knot URL 41 + Knot string 42 + 43 + // Template is the SpindleSet template to apply 44 + Template loomv1alpha1.SpindleTemplate 45 + 46 + // Namespace is the Kubernetes namespace for the Job 47 + Namespace string 48 + } 49 + 50 + // BuildJob creates a Kubernetes Job specification for running a spindle workflow. 51 + func BuildJob(config WorkflowConfig) (*batchv1.Job, error) { 52 + if config.WorkflowName == "" { 53 + return nil, fmt.Errorf("workflow name is required") 54 + } 55 + if config.Image == "" { 56 + return nil, fmt.Errorf("container image is required") 57 + } 58 + if config.PipelineID == "" { 59 + return nil, fmt.Errorf("pipeline ID is required") 60 + } 61 + 62 + // Generate the step execution script 63 + script := BuildStepExecutionScript(config.Steps) 64 + 65 + // Build architecture-based node affinity 66 + archAffinity := BuildArchitectureAffinity(config.Architecture) 67 + 68 + // Merge with user-provided affinity from template 69 + finalAffinity := MergeAffinity(archAffinity, config.Template.Affinity) 70 + 71 + // Default resources if not specified 72 + resources := config.Template.Resources 73 + if resources.Requests == nil && resources.Limits == nil { 74 + resources = corev1.ResourceRequirements{ 75 + Requests: corev1.ResourceList{ 76 + corev1.ResourceCPU: resource.MustParse("500m"), 77 + corev1.ResourceMemory: resource.MustParse("1Gi"), 78 + }, 79 + Limits: corev1.ResourceList{ 80 + corev1.ResourceCPU: resource.MustParse("2"), 81 + corev1.ResourceMemory: resource.MustParse("4Gi"), 82 + }, 83 + } 84 + } 85 + 86 + // Job name: spindle-{pipelineID}-{workflowName} (truncated if needed) 87 + jobName := fmt.Sprintf("spindle-%s-%s", config.PipelineID, config.WorkflowName) 88 + if len(jobName) > 63 { 89 + // Kubernetes names must be 63 chars or less 90 + jobName = jobName[:63] 91 + } 92 + 93 + // Labels for the Job 94 + labels := map[string]string{ 95 + "loom.j5t.io/component": "spindle", 96 + "loom.j5t.io/pipeline-id": config.PipelineID, 97 + "loom.j5t.io/workflow": config.WorkflowName, 98 + } 99 + 100 + // Backoff limit: don't retry failed jobs 101 + backoffLimit := int32(0) 102 + ttlAfterFinished := int32(3600) // Clean up Jobs after 1 hour 103 + 104 + job := &batchv1.Job{ 105 + ObjectMeta: metav1.ObjectMeta{ 106 + Name: jobName, 107 + Namespace: config.Namespace, 108 + Labels: labels, 109 + Annotations: map[string]string{ 110 + "loom.j5t.io/repo-url": config.RepoURL, 111 + "loom.j5t.io/commit-sha": config.CommitSHA, 112 + "loom.j5t.io/architecture": config.Architecture, 113 + "loom.j5t.io/knot": config.Knot, 114 + }, 115 + }, 116 + Spec: batchv1.JobSpec{ 117 + BackoffLimit: &backoffLimit, 118 + TTLSecondsAfterFinished: &ttlAfterFinished, 119 + Template: corev1.PodTemplateSpec{ 120 + ObjectMeta: metav1.ObjectMeta{ 121 + Labels: labels, 122 + }, 123 + Spec: corev1.PodSpec{ 124 + RestartPolicy: corev1.RestartPolicyNever, 125 + SecurityContext: &corev1.PodSecurityContext{ 126 + RunAsNonRoot: &[]bool{true}[0], 127 + SeccompProfile: &corev1.SeccompProfile{ 128 + Type: corev1.SeccompProfileTypeRuntimeDefault, 129 + }, 130 + }, 131 + 132 + // Init container: clone repository 133 + InitContainers: []corev1.Container{ 134 + { 135 + Name: "clone-repo", 136 + Image: "alpine/git:latest", 137 + Command: []string{"/bin/sh", "-c"}, 138 + SecurityContext: &corev1.SecurityContext{ 139 + AllowPrivilegeEscalation: &[]bool{false}[0], 140 + RunAsNonRoot: &[]bool{true}[0], 141 + Capabilities: &corev1.Capabilities{ 142 + Drop: []corev1.Capability{"ALL"}, 143 + }, 144 + SeccompProfile: &corev1.SeccompProfile{ 145 + Type: corev1.SeccompProfileTypeRuntimeDefault, 146 + }, 147 + }, 148 + Args: []string{ 149 + fmt.Sprintf(` 150 + set -e 151 + echo "Cloning repository..." 152 + git clone %s /tangled/workspace || { 153 + echo "Failed to clone repository" 154 + exit 1 155 + } 156 + cd /tangled/workspace 157 + echo "Checking out commit %s..." 158 + git checkout %s || { 159 + echo "Failed to checkout commit" 160 + exit 1 161 + } 162 + echo "Repository ready" 163 + `, config.RepoURL, config.CommitSHA, config.CommitSHA), 164 + }, 165 + VolumeMounts: []corev1.VolumeMount{ 166 + { 167 + Name: "workspace", 168 + MountPath: "/tangled/workspace", 169 + }, 170 + }, 171 + }, 172 + }, 173 + 174 + // Main container: execute workflow steps 175 + Containers: []corev1.Container{ 176 + { 177 + Name: "runner", 178 + Image: config.Image, 179 + Command: []string{"/bin/bash", "-c"}, 180 + Args: []string{script}, 181 + WorkingDir: "/tangled/workspace", 182 + 183 + SecurityContext: &corev1.SecurityContext{ 184 + AllowPrivilegeEscalation: &[]bool{false}[0], 185 + RunAsNonRoot: &[]bool{true}[0], 186 + Capabilities: &corev1.Capabilities{ 187 + Drop: []corev1.Capability{"ALL"}, 188 + }, 189 + SeccompProfile: &corev1.SeccompProfile{ 190 + Type: corev1.SeccompProfileTypeRuntimeDefault, 191 + }, 192 + }, 193 + 194 + Resources: resources, 195 + 196 + VolumeMounts: []corev1.VolumeMount{ 197 + { 198 + Name: "workspace", 199 + MountPath: "/tangled/workspace", 200 + }, 201 + { 202 + Name: "step-outputs", 203 + MountPath: "/tmp/step-outputs", 204 + }, 205 + { 206 + Name: "github-env", 207 + MountPath: "/tmp/github", 208 + }, 209 + }, 210 + 211 + Env: buildEnvironmentVariables(config), 212 + }, 213 + }, 214 + 215 + // Volumes 216 + Volumes: []corev1.Volume{ 217 + { 218 + Name: "workspace", 219 + VolumeSource: corev1.VolumeSource{ 220 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 221 + }, 222 + }, 223 + { 224 + Name: "step-outputs", 225 + VolumeSource: corev1.VolumeSource{ 226 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 227 + }, 228 + }, 229 + { 230 + Name: "github-env", 231 + VolumeSource: corev1.VolumeSource{ 232 + EmptyDir: &corev1.EmptyDirVolumeSource{}, 233 + }, 234 + }, 235 + }, 236 + 237 + // Node targeting 238 + NodeSelector: config.Template.NodeSelector, 239 + Tolerations: config.Template.Tolerations, 240 + Affinity: finalAffinity, 241 + }, 242 + }, 243 + }, 244 + } 245 + 246 + return job, nil 247 + } 248 + 249 + // buildEnvironmentVariables creates the environment variables for the runner container. 250 + func buildEnvironmentVariables(config WorkflowConfig) []corev1.EnvVar { 251 + env := []corev1.EnvVar{ 252 + { 253 + Name: "TANGLED_WORKFLOW", 254 + Value: config.WorkflowName, 255 + }, 256 + { 257 + Name: "TANGLED_PIPELINE_ID", 258 + Value: config.PipelineID, 259 + }, 260 + { 261 + Name: "TANGLED_REPO_URL", 262 + Value: config.RepoURL, 263 + }, 264 + { 265 + Name: "TANGLED_COMMIT_SHA", 266 + Value: config.CommitSHA, 267 + }, 268 + { 269 + Name: "TANGLED_ARCHITECTURE", 270 + Value: config.Architecture, 271 + }, 272 + { 273 + Name: "CI", 274 + Value: "true", 275 + }, 276 + } 277 + 278 + // Add secret references if provided 279 + for i, secretName := range config.Secrets { 280 + env = append(env, corev1.EnvVar{ 281 + Name: fmt.Sprintf("TANGLED_SECRET_%d", i), 282 + ValueFrom: &corev1.EnvVarSource{ 283 + SecretKeyRef: &corev1.SecretKeySelector{ 284 + LocalObjectReference: corev1.LocalObjectReference{ 285 + Name: secretName, 286 + }, 287 + Key: "value", 288 + }, 289 + }, 290 + }) 291 + } 292 + 293 + return env 294 + }
+129
pkg/jobbuilder/script_builder.go
··· 1 + package jobbuilder 2 + 3 + import ( 4 + "fmt" 5 + "strings" 6 + ) 7 + 8 + // WorkflowStep represents a single step in a workflow. 9 + type WorkflowStep struct { 10 + Name string 11 + Command string 12 + Env map[string]string 13 + } 14 + 15 + // BuildStepExecutionScript generates a bash script that executes all workflow steps sequentially. 16 + // The script implements GitHub Actions-compatible environment passing between steps. 17 + func BuildStepExecutionScript(steps []WorkflowStep) string { 18 + var script strings.Builder 19 + 20 + // Script header with error handling 21 + script.WriteString(`#!/bin/bash 22 + set -e 23 + set -o pipefail 24 + 25 + `) 26 + 27 + // GitHub Actions-compatible environment setup 28 + script.WriteString(`# GitHub Actions-compatible environment setup 29 + export GITHUB_ENV=/tmp/github/env 30 + export GITHUB_PATH=/tmp/github/path 31 + export GITHUB_OUTPUT=/tmp/github/output 32 + export GITHUB_STEP_SUMMARY=/tmp/github/step-summary.md 33 + 34 + # Initialize files 35 + mkdir -p /tmp/github 36 + touch $GITHUB_ENV $GITHUB_PATH $GITHUB_OUTPUT $GITHUB_STEP_SUMMARY 37 + 38 + # Helper functions for environment passing 39 + add_to_env() { 40 + echo "$1=$2" >> $GITHUB_ENV 41 + export "$1"="$2" 42 + } 43 + 44 + add_to_path() { 45 + echo "$1" >> $GITHUB_PATH 46 + export PATH="$1:$PATH" 47 + } 48 + 49 + set_output() { 50 + echo "$1=$2" >> $GITHUB_OUTPUT 51 + } 52 + 53 + # Load environment variables set by previous steps 54 + load_github_env() { 55 + if [ -f "$GITHUB_ENV" ]; then 56 + set -a 57 + source "$GITHUB_ENV" 2>/dev/null || true 58 + set +a 59 + fi 60 + if [ -f "$GITHUB_PATH" ]; then 61 + while IFS= read -r line; do 62 + if [ -n "$line" ]; then 63 + export PATH="$line:$PATH" 64 + fi 65 + done < "$GITHUB_PATH" 66 + fi 67 + } 68 + 69 + # Timestamp helper 70 + timestamp() { 71 + date -u +"%Y-%m-%dT%H:%M:%S.%3NZ" 72 + } 73 + 74 + `) 75 + 76 + // Generate code for each step 77 + for i, step := range steps { 78 + script.WriteString(fmt.Sprintf(` 79 + ############################################################################## 80 + # Step %d: %s 81 + ############################################################################## 82 + echo "" 83 + echo "$(timestamp) ===== Starting Step %d: %s =====" 84 + echo "" 85 + 86 + # Load environment from previous steps 87 + load_github_env 88 + 89 + `, i+1, step.Name, i+1, step.Name)) 90 + 91 + // Set step-specific environment variables 92 + if len(step.Env) > 0 { 93 + script.WriteString("# Step-specific environment variables\n") 94 + for k, v := range step.Env { 95 + // Escape single quotes in the value 96 + escapedValue := strings.ReplaceAll(v, "'", "'\\''") 97 + script.WriteString(fmt.Sprintf("export %s='%s'\n", k, escapedValue)) 98 + } 99 + script.WriteString("\n") 100 + } 101 + 102 + // Execute the step command 103 + script.WriteString("# Execute step command\n") 104 + script.WriteString("(\n") 105 + script.WriteString(" cd /tangled/workspace\n") 106 + script.WriteString(" " + step.Command + "\n") 107 + script.WriteString(") || {\n") 108 + script.WriteString(fmt.Sprintf(" echo \"$(timestamp) Step '%s' failed with exit code $?\"\n", step.Name)) 109 + script.WriteString(" exit 1\n") 110 + script.WriteString("}\n\n") 111 + 112 + script.WriteString(fmt.Sprintf("echo \"$(timestamp) ===== Completed Step %d: %s =====\"\n", i+1, step.Name)) 113 + } 114 + 115 + // Script footer 116 + script.WriteString(` 117 + echo "" 118 + echo "$(timestamp) ===== All steps completed successfully =====" 119 + exit 0 120 + `) 121 + 122 + return script.String() 123 + } 124 + 125 + // EscapeShellString escapes a string for safe use in shell scripts. 126 + func EscapeShellString(s string) string { 127 + // Replace single quotes with '\'' 128 + return strings.ReplaceAll(s, "'", "'\\''") 129 + }
+89
test/e2e/e2e_suite_test.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package e2e 18 + 19 + import ( 20 + "fmt" 21 + "os" 22 + "os/exec" 23 + "testing" 24 + 25 + . "github.com/onsi/ginkgo/v2" 26 + . "github.com/onsi/gomega" 27 + 28 + "tangled.org/evan.jarrett.net/loom/test/utils" 29 + ) 30 + 31 + var ( 32 + // Optional Environment Variables: 33 + // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup. 34 + // These variables are useful if CertManager is already installed, avoiding 35 + // re-installation and conflicts. 36 + skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" 37 + // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster 38 + isCertManagerAlreadyInstalled = false 39 + 40 + // projectImage is the name of the image which will be build and loaded 41 + // with the code source changes to be tested. 42 + projectImage = "example.com/loom:v0.0.1" 43 + ) 44 + 45 + // TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated, 46 + // temporary environment to validate project changes with the purposed to be used in CI jobs. 47 + // The default setup requires Kind, builds/loads the Manager Docker image locally, and installs 48 + // CertManager. 49 + func TestE2E(t *testing.T) { 50 + RegisterFailHandler(Fail) 51 + _, _ = fmt.Fprintf(GinkgoWriter, "Starting loom integration test suite\n") 52 + RunSpecs(t, "e2e suite") 53 + } 54 + 55 + var _ = BeforeSuite(func() { 56 + By("building the manager(Operator) image") 57 + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage)) 58 + _, err := utils.Run(cmd) 59 + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image") 60 + 61 + // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is 62 + // built and available before running the tests. Also, remove the following block. 63 + By("loading the manager(Operator) image on Kind") 64 + err = utils.LoadImageToKindClusterWithName(projectImage) 65 + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind") 66 + 67 + // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing. 68 + // To prevent errors when tests run in environments with CertManager already installed, 69 + // we check for its presence before execution. 70 + // Setup CertManager before the suite if not skipped and if not already installed 71 + if !skipCertManagerInstall { 72 + By("checking if cert manager is installed already") 73 + isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled() 74 + if !isCertManagerAlreadyInstalled { 75 + _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n") 76 + Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") 77 + } else { 78 + _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n") 79 + } 80 + } 81 + }) 82 + 83 + var _ = AfterSuite(func() { 84 + // Teardown CertManager after the suite if not skipped and if it was not already installed 85 + if !skipCertManagerInstall && !isCertManagerAlreadyInstalled { 86 + _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n") 87 + utils.UninstallCertManager() 88 + } 89 + })
+329
test/e2e/e2e_test.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package e2e 18 + 19 + import ( 20 + "encoding/json" 21 + "fmt" 22 + "os" 23 + "os/exec" 24 + "path/filepath" 25 + "time" 26 + 27 + . "github.com/onsi/ginkgo/v2" 28 + . "github.com/onsi/gomega" 29 + 30 + "tangled.org/evan.jarrett.net/loom/test/utils" 31 + ) 32 + 33 + // namespace where the project is deployed in 34 + const namespace = "loom-system" 35 + 36 + // serviceAccountName created for the project 37 + const serviceAccountName = "loom-controller-manager" 38 + 39 + // metricsServiceName is the name of the metrics service of the project 40 + const metricsServiceName = "loom-controller-manager-metrics-service" 41 + 42 + // metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data 43 + const metricsRoleBindingName = "loom-metrics-binding" 44 + 45 + var _ = Describe("Manager", Ordered, func() { 46 + var controllerPodName string 47 + 48 + // Before running the tests, set up the environment by creating the namespace, 49 + // enforce the restricted security policy to the namespace, installing CRDs, 50 + // and deploying the controller. 51 + BeforeAll(func() { 52 + By("creating manager namespace") 53 + cmd := exec.Command("kubectl", "create", "ns", namespace) 54 + _, err := utils.Run(cmd) 55 + Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") 56 + 57 + By("labeling the namespace to enforce the restricted security policy") 58 + cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace, 59 + "pod-security.kubernetes.io/enforce=restricted") 60 + _, err = utils.Run(cmd) 61 + Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy") 62 + 63 + By("installing CRDs") 64 + cmd = exec.Command("make", "install") 65 + _, err = utils.Run(cmd) 66 + Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") 67 + 68 + By("deploying the controller-manager") 69 + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage)) 70 + _, err = utils.Run(cmd) 71 + Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") 72 + }) 73 + 74 + // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, 75 + // and deleting the namespace. 76 + AfterAll(func() { 77 + By("cleaning up the curl pod for metrics") 78 + cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) 79 + _, _ = utils.Run(cmd) 80 + 81 + By("undeploying the controller-manager") 82 + cmd = exec.Command("make", "undeploy") 83 + _, _ = utils.Run(cmd) 84 + 85 + By("uninstalling CRDs") 86 + cmd = exec.Command("make", "uninstall") 87 + _, _ = utils.Run(cmd) 88 + 89 + By("removing manager namespace") 90 + cmd = exec.Command("kubectl", "delete", "ns", namespace) 91 + _, _ = utils.Run(cmd) 92 + }) 93 + 94 + // After each test, check for failures and collect logs, events, 95 + // and pod descriptions for debugging. 96 + AfterEach(func() { 97 + specReport := CurrentSpecReport() 98 + if specReport.Failed() { 99 + By("Fetching controller manager pod logs") 100 + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) 101 + controllerLogs, err := utils.Run(cmd) 102 + if err == nil { 103 + _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) 104 + } else { 105 + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) 106 + } 107 + 108 + By("Fetching Kubernetes events") 109 + cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") 110 + eventsOutput, err := utils.Run(cmd) 111 + if err == nil { 112 + _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) 113 + } else { 114 + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) 115 + } 116 + 117 + By("Fetching curl-metrics logs") 118 + cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) 119 + metricsOutput, err := utils.Run(cmd) 120 + if err == nil { 121 + _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) 122 + } else { 123 + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) 124 + } 125 + 126 + By("Fetching controller manager pod description") 127 + cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) 128 + podDescription, err := utils.Run(cmd) 129 + if err == nil { 130 + fmt.Println("Pod description:\n", podDescription) 131 + } else { 132 + fmt.Println("Failed to describe controller pod") 133 + } 134 + } 135 + }) 136 + 137 + SetDefaultEventuallyTimeout(2 * time.Minute) 138 + SetDefaultEventuallyPollingInterval(time.Second) 139 + 140 + Context("Manager", func() { 141 + It("should run successfully", func() { 142 + By("validating that the controller-manager pod is running as expected") 143 + verifyControllerUp := func(g Gomega) { 144 + // Get the name of the controller-manager pod 145 + cmd := exec.Command("kubectl", "get", 146 + "pods", "-l", "control-plane=controller-manager", 147 + "-o", "go-template={{ range .items }}"+ 148 + "{{ if not .metadata.deletionTimestamp }}"+ 149 + "{{ .metadata.name }}"+ 150 + "{{ \"\\n\" }}{{ end }}{{ end }}", 151 + "-n", namespace, 152 + ) 153 + 154 + podOutput, err := utils.Run(cmd) 155 + g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") 156 + podNames := utils.GetNonEmptyLines(podOutput) 157 + g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") 158 + controllerPodName = podNames[0] 159 + g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) 160 + 161 + // Validate the pod's status 162 + cmd = exec.Command("kubectl", "get", 163 + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", 164 + "-n", namespace, 165 + ) 166 + output, err := utils.Run(cmd) 167 + g.Expect(err).NotTo(HaveOccurred()) 168 + g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") 169 + } 170 + Eventually(verifyControllerUp).Should(Succeed()) 171 + }) 172 + 173 + It("should ensure the metrics endpoint is serving metrics", func() { 174 + By("creating a ClusterRoleBinding for the service account to allow access to metrics") 175 + cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, 176 + "--clusterrole=loom-metrics-reader", 177 + fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), 178 + ) 179 + _, err := utils.Run(cmd) 180 + Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") 181 + 182 + By("validating that the metrics service is available") 183 + cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) 184 + _, err = utils.Run(cmd) 185 + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") 186 + 187 + By("getting the service account token") 188 + token, err := serviceAccountToken() 189 + Expect(err).NotTo(HaveOccurred()) 190 + Expect(token).NotTo(BeEmpty()) 191 + 192 + By("waiting for the metrics endpoint to be ready") 193 + verifyMetricsEndpointReady := func(g Gomega) { 194 + cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace) 195 + output, err := utils.Run(cmd) 196 + g.Expect(err).NotTo(HaveOccurred()) 197 + g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready") 198 + } 199 + Eventually(verifyMetricsEndpointReady).Should(Succeed()) 200 + 201 + By("verifying that the controller manager is serving the metrics server") 202 + verifyMetricsServerStarted := func(g Gomega) { 203 + cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) 204 + output, err := utils.Run(cmd) 205 + g.Expect(err).NotTo(HaveOccurred()) 206 + g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"), 207 + "Metrics server not yet started") 208 + } 209 + Eventually(verifyMetricsServerStarted).Should(Succeed()) 210 + 211 + By("creating the curl-metrics pod to access the metrics endpoint") 212 + cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", 213 + "--namespace", namespace, 214 + "--image=curlimages/curl:latest", 215 + "--overrides", 216 + fmt.Sprintf(`{ 217 + "spec": { 218 + "containers": [{ 219 + "name": "curl", 220 + "image": "curlimages/curl:latest", 221 + "command": ["/bin/sh", "-c"], 222 + "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"], 223 + "securityContext": { 224 + "allowPrivilegeEscalation": false, 225 + "capabilities": { 226 + "drop": ["ALL"] 227 + }, 228 + "runAsNonRoot": true, 229 + "runAsUser": 1000, 230 + "seccompProfile": { 231 + "type": "RuntimeDefault" 232 + } 233 + } 234 + }], 235 + "serviceAccount": "%s" 236 + } 237 + }`, token, metricsServiceName, namespace, serviceAccountName)) 238 + _, err = utils.Run(cmd) 239 + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") 240 + 241 + By("waiting for the curl-metrics pod to complete.") 242 + verifyCurlUp := func(g Gomega) { 243 + cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", 244 + "-o", "jsonpath={.status.phase}", 245 + "-n", namespace) 246 + output, err := utils.Run(cmd) 247 + g.Expect(err).NotTo(HaveOccurred()) 248 + g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") 249 + } 250 + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) 251 + 252 + By("getting the metrics by checking curl-metrics logs") 253 + metricsOutput := getMetricsOutput() 254 + Expect(metricsOutput).To(ContainSubstring( 255 + "controller_runtime_reconcile_total", 256 + )) 257 + }) 258 + 259 + // +kubebuilder:scaffold:e2e-webhooks-checks 260 + 261 + // TODO: Customize the e2e test suite with scenarios specific to your project. 262 + // Consider applying sample/CR(s) and check their status and/or verifying 263 + // the reconciliation by using the metrics, i.e.: 264 + // metricsOutput := getMetricsOutput() 265 + // Expect(metricsOutput).To(ContainSubstring( 266 + // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, 267 + // strings.ToLower(<Kind>), 268 + // )) 269 + }) 270 + }) 271 + 272 + // serviceAccountToken returns a token for the specified service account in the given namespace. 273 + // It uses the Kubernetes TokenRequest API to generate a token by directly sending a request 274 + // and parsing the resulting token from the API response. 275 + func serviceAccountToken() (string, error) { 276 + const tokenRequestRawString = `{ 277 + "apiVersion": "authentication.k8s.io/v1", 278 + "kind": "TokenRequest" 279 + }` 280 + 281 + // Temporary file to store the token request 282 + secretName := fmt.Sprintf("%s-token-request", serviceAccountName) 283 + tokenRequestFile := filepath.Join("/tmp", secretName) 284 + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) 285 + if err != nil { 286 + return "", err 287 + } 288 + 289 + var out string 290 + verifyTokenCreation := func(g Gomega) { 291 + // Execute kubectl command to create the token 292 + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( 293 + "/api/v1/namespaces/%s/serviceaccounts/%s/token", 294 + namespace, 295 + serviceAccountName, 296 + ), "-f", tokenRequestFile) 297 + 298 + output, err := cmd.CombinedOutput() 299 + g.Expect(err).NotTo(HaveOccurred()) 300 + 301 + // Parse the JSON output to extract the token 302 + var token tokenRequest 303 + err = json.Unmarshal(output, &token) 304 + g.Expect(err).NotTo(HaveOccurred()) 305 + 306 + out = token.Status.Token 307 + } 308 + Eventually(verifyTokenCreation).Should(Succeed()) 309 + 310 + return out, err 311 + } 312 + 313 + // getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. 314 + func getMetricsOutput() string { 315 + By("getting the curl-metrics logs") 316 + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) 317 + metricsOutput, err := utils.Run(cmd) 318 + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") 319 + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) 320 + return metricsOutput 321 + } 322 + 323 + // tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, 324 + // containing only the token field that we need to extract. 325 + type tokenRequest struct { 326 + Status struct { 327 + Token string `json:"token"` 328 + } `json:"status"` 329 + }
+254
test/utils/utils.go
··· 1 + /* 2 + Copyright 2025 Evan Jarrett. 3 + 4 + Licensed under the Apache License, Version 2.0 (the "License"); 5 + you may not use this file except in compliance with the License. 6 + You may obtain a copy of the License at 7 + 8 + http://www.apache.org/licenses/LICENSE-2.0 9 + 10 + Unless required by applicable law or agreed to in writing, software 11 + distributed under the License is distributed on an "AS IS" BASIS, 12 + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 + See the License for the specific language governing permissions and 14 + limitations under the License. 15 + */ 16 + 17 + package utils 18 + 19 + import ( 20 + "bufio" 21 + "bytes" 22 + "fmt" 23 + "os" 24 + "os/exec" 25 + "strings" 26 + 27 + . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck 28 + ) 29 + 30 + const ( 31 + prometheusOperatorVersion = "v0.77.1" 32 + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + 33 + "releases/download/%s/bundle.yaml" 34 + 35 + certmanagerVersion = "v1.16.3" 36 + certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" 37 + ) 38 + 39 + func warnError(err error) { 40 + _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) 41 + } 42 + 43 + // Run executes the provided command within this context 44 + func Run(cmd *exec.Cmd) (string, error) { 45 + dir, _ := GetProjectDir() 46 + cmd.Dir = dir 47 + 48 + if err := os.Chdir(cmd.Dir); err != nil { 49 + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) 50 + } 51 + 52 + cmd.Env = append(os.Environ(), "GO111MODULE=on") 53 + command := strings.Join(cmd.Args, " ") 54 + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) 55 + output, err := cmd.CombinedOutput() 56 + if err != nil { 57 + return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) 58 + } 59 + 60 + return string(output), nil 61 + } 62 + 63 + // InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. 64 + func InstallPrometheusOperator() error { 65 + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) 66 + cmd := exec.Command("kubectl", "create", "-f", url) 67 + _, err := Run(cmd) 68 + return err 69 + } 70 + 71 + // UninstallPrometheusOperator uninstalls the prometheus 72 + func UninstallPrometheusOperator() { 73 + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) 74 + cmd := exec.Command("kubectl", "delete", "-f", url) 75 + if _, err := Run(cmd); err != nil { 76 + warnError(err) 77 + } 78 + } 79 + 80 + // IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed 81 + // by verifying the existence of key CRDs related to Prometheus. 82 + func IsPrometheusCRDsInstalled() bool { 83 + // List of common Prometheus CRDs 84 + prometheusCRDs := []string{ 85 + "prometheuses.monitoring.coreos.com", 86 + "prometheusrules.monitoring.coreos.com", 87 + "prometheusagents.monitoring.coreos.com", 88 + } 89 + 90 + cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name") 91 + output, err := Run(cmd) 92 + if err != nil { 93 + return false 94 + } 95 + crdList := GetNonEmptyLines(output) 96 + for _, crd := range prometheusCRDs { 97 + for _, line := range crdList { 98 + if strings.Contains(line, crd) { 99 + return true 100 + } 101 + } 102 + } 103 + 104 + return false 105 + } 106 + 107 + // UninstallCertManager uninstalls the cert manager 108 + func UninstallCertManager() { 109 + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) 110 + cmd := exec.Command("kubectl", "delete", "-f", url) 111 + if _, err := Run(cmd); err != nil { 112 + warnError(err) 113 + } 114 + } 115 + 116 + // InstallCertManager installs the cert manager bundle. 117 + func InstallCertManager() error { 118 + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) 119 + cmd := exec.Command("kubectl", "apply", "-f", url) 120 + if _, err := Run(cmd); err != nil { 121 + return err 122 + } 123 + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager 124 + // was re-installed after uninstalling on a cluster. 125 + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", 126 + "--for", "condition=Available", 127 + "--namespace", "cert-manager", 128 + "--timeout", "5m", 129 + ) 130 + 131 + _, err := Run(cmd) 132 + return err 133 + } 134 + 135 + // IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed 136 + // by verifying the existence of key CRDs related to Cert Manager. 137 + func IsCertManagerCRDsInstalled() bool { 138 + // List of common Cert Manager CRDs 139 + certManagerCRDs := []string{ 140 + "certificates.cert-manager.io", 141 + "issuers.cert-manager.io", 142 + "clusterissuers.cert-manager.io", 143 + "certificaterequests.cert-manager.io", 144 + "orders.acme.cert-manager.io", 145 + "challenges.acme.cert-manager.io", 146 + } 147 + 148 + // Execute the kubectl command to get all CRDs 149 + cmd := exec.Command("kubectl", "get", "crds") 150 + output, err := Run(cmd) 151 + if err != nil { 152 + return false 153 + } 154 + 155 + // Check if any of the Cert Manager CRDs are present 156 + crdList := GetNonEmptyLines(output) 157 + for _, crd := range certManagerCRDs { 158 + for _, line := range crdList { 159 + if strings.Contains(line, crd) { 160 + return true 161 + } 162 + } 163 + } 164 + 165 + return false 166 + } 167 + 168 + // LoadImageToKindClusterWithName loads a local docker image to the kind cluster 169 + func LoadImageToKindClusterWithName(name string) error { 170 + cluster := "kind" 171 + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { 172 + cluster = v 173 + } 174 + kindOptions := []string{"load", "docker-image", name, "--name", cluster} 175 + cmd := exec.Command("kind", kindOptions...) 176 + _, err := Run(cmd) 177 + return err 178 + } 179 + 180 + // GetNonEmptyLines converts given command output string into individual objects 181 + // according to line breakers, and ignores the empty elements in it. 182 + func GetNonEmptyLines(output string) []string { 183 + var res []string 184 + elements := strings.Split(output, "\n") 185 + for _, element := range elements { 186 + if element != "" { 187 + res = append(res, element) 188 + } 189 + } 190 + 191 + return res 192 + } 193 + 194 + // GetProjectDir will return the directory where the project is 195 + func GetProjectDir() (string, error) { 196 + wd, err := os.Getwd() 197 + if err != nil { 198 + return wd, fmt.Errorf("failed to get current working directory: %w", err) 199 + } 200 + wd = strings.ReplaceAll(wd, "/test/e2e", "") 201 + return wd, nil 202 + } 203 + 204 + // UncommentCode searches for target in the file and remove the comment prefix 205 + // of the target content. The target content may span multiple lines. 206 + func UncommentCode(filename, target, prefix string) error { 207 + // false positive 208 + // nolint:gosec 209 + content, err := os.ReadFile(filename) 210 + if err != nil { 211 + return fmt.Errorf("failed to read file %q: %w", filename, err) 212 + } 213 + strContent := string(content) 214 + 215 + idx := strings.Index(strContent, target) 216 + if idx < 0 { 217 + return fmt.Errorf("unable to find the code %q to be uncomment", target) 218 + } 219 + 220 + out := new(bytes.Buffer) 221 + _, err = out.Write(content[:idx]) 222 + if err != nil { 223 + return fmt.Errorf("failed to write to output: %w", err) 224 + } 225 + 226 + scanner := bufio.NewScanner(bytes.NewBufferString(target)) 227 + if !scanner.Scan() { 228 + return nil 229 + } 230 + for { 231 + if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { 232 + return fmt.Errorf("failed to write to output: %w", err) 233 + } 234 + // Avoid writing a newline in case the previous line was the last in target. 235 + if !scanner.Scan() { 236 + break 237 + } 238 + if _, err = out.WriteString("\n"); err != nil { 239 + return fmt.Errorf("failed to write to output: %w", err) 240 + } 241 + } 242 + 243 + if _, err = out.Write(content[idx+len(target):]); err != nil { 244 + return fmt.Errorf("failed to write to output: %w", err) 245 + } 246 + 247 + // false positive 248 + // nolint:gosec 249 + if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { 250 + return fmt.Errorf("failed to write file %q: %w", filename, err) 251 + } 252 + 253 + return nil 254 + }