Monorepo for Tangled
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

spindle/qemu,nix: initial qemu engine impl using ssh command execution

Signed-off-by: dawn <90008@gaze.systems>

dawn c5883388 36685bea

+1248 -88
+56 -3
cmd/spindle/main.go
··· 2 2 3 3 import ( 4 4 "context" 5 + "fmt" 5 6 "log/slog" 6 7 "os" 7 8 9 + "github.com/urfave/cli/v3" 8 10 tlog "tangled.org/core/log" 9 11 "tangled.org/core/spindle" 12 + "tangled.org/core/spindle/config" 13 + "tangled.org/core/spindle/engines/qemu" 14 + "tangled.org/core/spindle/engines/qemu/bakers" 10 15 ) 11 16 12 17 func main() { 18 + cmd := &cli.Command{ 19 + Name: "spindle", 20 + Usage: "spindle continuous integration runner", 21 + Commands: []*cli.Command{ 22 + Command(), 23 + SetupQemuCommand(), 24 + BakeImageCommand(), 25 + }, 26 + DefaultCommand: "run", 27 + } 28 + 13 29 logger := tlog.New("spindle") 14 30 slog.SetDefault(logger) 15 31 16 32 ctx := context.Background() 17 33 ctx = tlog.IntoContext(ctx, logger) 18 34 19 - err := spindle.Run(ctx) 20 - if err != nil { 21 - logger.Error("error running spindle", "error", err) 35 + if err := cmd.Run(ctx, os.Args); err != nil { 36 + logger.Error(err.Error()) 22 37 os.Exit(-1) 23 38 } 24 39 } 40 + 41 + func Command() *cli.Command { 42 + return &cli.Command{ 43 + Name: "run", 44 + Usage: "run the spindle server", 45 + Action: func(ctx context.Context, cmd *cli.Command) error { 46 + return spindle.Run(ctx) 47 + }, 48 + } 49 + } 50 + 51 + func SetupQemuCommand() *cli.Command { 52 + return &cli.Command{ 53 + Name: "setup-qemu", 54 + Usage: "downloads and prepares default qemu engine images", 55 + Action: func(ctx context.Context, cmd *cli.Command) error { 56 + return qemu.SetupDefaultImages(ctx) 57 + }, 58 + } 59 + } 60 + 61 + func BakeImageCommand() *cli.Command { 62 + return &cli.Command{ 63 + Name: "bake-image", 64 + Usage: "(needs root) prepare a downloaded qemu image (e.g. extract kernel/initrd etc.)", 65 + Action: func(ctx context.Context, cmd *cli.Command) error { 66 + cfg, err := config.Load(ctx) 67 + if err != nil { 68 + return fmt.Errorf("loading config: %w", err) 69 + } 70 + name := cmd.Args().First() 71 + if name == "" { 72 + return fmt.Errorf("name argument (e.g. 'alpine') is required") 73 + } 74 + return bakers.PrepareImage(ctx, cfg, name) 75 + }, 76 + } 77 + }
+3
flake.nix
··· 197 197 pkgs.redis 198 198 pkgs.worker-build 199 199 pkgs.cargo-generate 200 + pkgs.qemu 201 + pkgs.cloud-utils 202 + pkgs.cdrkit 200 203 (fenix.packages.${system}.combine [ 201 204 fenix.packages.${system}.stable.cargo 202 205 fenix.packages.${system}.stable.rustc
+4 -8
go.mod
··· 4 4 5 5 require ( 6 6 github.com/Blank-Xu/sql-adapter v1.1.1 7 + github.com/adrg/frontmatter v0.2.0 7 8 github.com/alecthomas/assert/v2 v2.11.0 8 9 github.com/alecthomas/chroma/v2 v2.23.1 9 10 github.com/avast/retry-go/v4 v4.6.1 10 11 github.com/aws/aws-sdk-go-v2 v1.41.4 12 + github.com/aws/aws-sdk-go-v2/config v1.32.12 11 13 github.com/aws/aws-sdk-go-v2/credentials v1.19.12 12 14 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 13 15 github.com/blevesearch/bleve/v2 v2.5.3 ··· 22 24 github.com/cyphar/filepath-securejoin v0.4.1 23 25 github.com/dgraph-io/ristretto v0.2.0 24 26 github.com/did-method-plc/go-didplc v0.2.2 27 + github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242 25 28 github.com/docker/docker v28.2.2+incompatible 26 29 github.com/dustin/go-humanize v1.0.1 27 30 github.com/gliderlabs/ssh v0.3.8 28 31 github.com/go-chi/chi/v5 v5.2.0 29 32 github.com/go-enry/go-enry/v2 v2.9.2 30 33 github.com/go-git/go-git/v5 v5.14.0 31 - github.com/goki/freetype v1.0.5 32 34 github.com/google/uuid v1.6.0 33 35 github.com/gorilla/feeds v1.2.0 34 36 github.com/gorilla/sessions v1.4.0 ··· 68 70 github.com/Microsoft/go-winio v0.6.2 // indirect 69 71 github.com/ProtonMail/go-crypto v1.3.0 // indirect 70 72 github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect 71 - github.com/adrg/frontmatter v0.2.0 // indirect 72 73 github.com/alecthomas/repr v0.5.2 // indirect 73 74 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect 74 75 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect 75 - github.com/aws/aws-sdk-go-v2/config v1.32.12 // indirect 76 76 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect 77 77 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect 78 78 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect ··· 122 122 github.com/containerd/log v0.1.0 // indirect 123 123 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 124 124 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 125 + github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect 125 126 github.com/distribution/reference v0.6.0 // indirect 126 127 github.com/dlclark/regexp2 v1.11.5 // indirect 127 128 github.com/docker/go-connections v0.5.0 // indirect ··· 227 228 go.opentelemetry.io/auto/sdk v1.2.1 // indirect 228 229 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect 229 230 go.opentelemetry.io/otel v1.40.0 // indirect 230 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect 231 231 go.opentelemetry.io/otel/metric v1.40.0 // indirect 232 232 go.opentelemetry.io/otel/trace v1.40.0 // indirect 233 - go.opentelemetry.io/proto/otlp v1.9.0 // indirect 234 233 go.uber.org/atomic v1.11.0 // indirect 235 234 go.uber.org/multierr v1.11.0 // indirect 236 235 go.uber.org/zap v1.27.1 // indirect ··· 240 239 golang.org/x/sys v0.41.0 // indirect 241 240 golang.org/x/text v0.34.0 // indirect 242 241 golang.org/x/time v0.12.0 // indirect 243 - google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect 244 - google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect 245 - google.golang.org/grpc v1.78.0 // indirect 246 242 google.golang.org/protobuf v1.36.11 // indirect 247 243 gopkg.in/fsnotify.v1 v1.4.7 // indirect 248 244 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+8 -32
go.sum
··· 25 25 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= 26 26 github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= 27 27 github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= 28 - github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= 29 - github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= 30 28 github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= 31 29 github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= 32 - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= 33 - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= 34 30 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= 35 31 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= 36 32 github.com/aws/aws-sdk-go-v2/config v1.32.12 h1:O3csC7HUGn2895eNrLytOJQdoL2xyJy0iYXhoZ1OmP0= 37 33 github.com/aws/aws-sdk-go-v2/config v1.32.12/go.mod h1:96zTvoOFR4FURjI+/5wY1vc1ABceROO4lWgWJuxgy0g= 38 - github.com/aws/aws-sdk-go-v2/credentials v1.19.9 h1:sWvTKsyrMlJGEuj/WgrwilpoJ6Xa1+KhIpGdzw7mMU8= 39 - github.com/aws/aws-sdk-go-v2/credentials v1.19.9/go.mod h1:+J44MBhmfVY/lETFiKI+klz0Vym2aCmIjqgClMmW82w= 40 34 github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8= 41 35 github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE= 42 36 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo= 43 37 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE= 44 - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= 45 - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= 46 38 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc= 47 39 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o= 48 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= 49 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= 50 40 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw= 51 41 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc= 52 42 github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= 53 43 github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= 54 - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k= 55 - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0= 56 44 github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 h1:SwGMTMLIlvDNyhMteQ6r8IJSBPlRdXX5d4idhIGbkXA= 57 45 github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21/go.mod h1:UUxgWxofmOdAMuqEsSppbDtGKLfR04HGsD0HXzvhI1k= 58 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= 59 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= 60 46 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= 61 47 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= 62 - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs= 63 - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE= 64 48 github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 h1:qtJZ70afD3ISKWnoX3xB0J2otEqu3LqicRcDBqsj0hQ= 65 49 github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12/go.mod h1:v2pNpJbRNl4vEUWEh5ytQok0zACAKfdmKS51Hotc3pQ= 66 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= 67 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= 68 50 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y= 69 51 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk= 70 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U= 71 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g= 72 52 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 h1:siU1A6xjUZ2N8zjTHSXFhB9L/2OY8Dqs0xXiLjF30jA= 73 53 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20/go.mod h1:4TLZCmVJDM3FOu5P5TJP0zOlu9zWgDWU7aUxWbr+rcw= 74 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA= 75 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= 76 54 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7MSNWeQ6eo247kE= 77 55 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= 78 56 github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= ··· 83 61 github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA= 84 62 github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU= 85 63 github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk= 86 - github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= 87 - github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= 88 64 github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= 89 65 github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= 90 66 github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= ··· 205 181 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 206 182 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 207 183 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 208 - github.com/did-method-plc/go-didplc v0.0.0-20250716171643-635da8b4e038 h1:AGh+Vn9fXhf9eo8erG1CK4+LACduPo64P1OICQLDv88= 209 - github.com/did-method-plc/go-didplc v0.0.0-20250716171643-635da8b4e038/go.mod h1:ddIXqTTSXWtj5kMsHAPj8SvbIx2GZdAkBFgFa6e6+CM= 210 184 github.com/did-method-plc/go-didplc v0.2.2 h1:53HFhTT8NCAeFmZ6fdIZCf3PGDvj7A3cDjzOOEqn5XM= 211 185 github.com/did-method-plc/go-didplc v0.2.2/go.mod h1:bKdJ21irnwNHgVLWWL32zUWqZueXYbJRUcxplZghByo= 186 + github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e h1:SCnqm8SjSa0QqRxXbo5YY//S+OryeJioe17nK+iDZpg= 187 + github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI= 188 + github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242 h1:rh6rt8pF5U4iyQ86h6lRDenJoX4ht2wFnZXB9ogIrIM= 189 + github.com/digitalocean/go-qemu v0.0.0-20250212194115-ee9b0668d242/go.mod h1:LGHUtlhsY4vRGM6AHejEQKVI5e3eHbSylMHwTSpQtVw= 212 190 github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= 213 191 github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= 214 192 github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= ··· 280 258 github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 281 259 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 282 260 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 283 - github.com/goki/freetype v1.0.5 h1:yi2lQeUhXnBgSMqYd0vVmPw6RnnfIeTP3N4uvaJXd7A= 284 - github.com/goki/freetype v1.0.5/go.mod h1:wKmKxddbzKmeci9K96Wknn5kjTWLyfC8tKOqAFbEX8E= 285 261 github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= 286 262 github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= 287 263 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= ··· 333 309 github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= 334 310 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= 335 311 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= 336 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= 337 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= 312 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 h1:NpbJl/eVbvrGE0MJ6X16X9SAifesl6Fwxg/YmCvubRI= 313 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8/go.mod h1:mi7YA+gCzVem12exXy46ZespvGtX/lZmD/RLnQhVW7U= 338 314 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 339 315 github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 340 316 github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= ··· 636 612 go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= 637 613 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= 638 614 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= 639 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= 640 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= 615 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= 616 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= 641 617 go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= 642 618 go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= 643 619 go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+6 -18
nix/gomod2nix.toml
··· 238 238 [mod."github.com/did-method-plc/go-didplc"] 239 239 version = "v0.2.2" 240 240 hash = "sha256-TF5vdW1U2q5F23ELmbqhdvpNgDQjBs9i/CWJlzqWNKs=" 241 + [mod."github.com/digitalocean/go-libvirt"] 242 + version = "v0.0.0-20220804181439-8648fbde413e" 243 + hash = "sha256-xgRZCefeUNM76M9ht5FCgCf4dnwAdeP/r+FZlJ2gmeY=" 244 + [mod."github.com/digitalocean/go-qemu"] 245 + version = "v0.0.0-20250212194115-ee9b0668d242" 246 + hash = "sha256-AzY84aq9CqGqYwmGkKoi4PX08E/hRQC46mrZwKSMwXE=" 241 247 [mod."github.com/distribution/reference"] 242 248 version = "v0.6.0" 243 249 hash = "sha256-gr4tL+qz4jKyAtl8LINcxMSanztdt+pybj1T+2ulQv4=" ··· 314 320 [mod."github.com/gogo/protobuf"] 315 321 version = "v1.3.2" 316 322 hash = "sha256-pogILFrrk+cAtb0ulqn9+gRZJ7sGnnLLdtqITvxvG6c=" 317 - [mod."github.com/goki/freetype"] 318 - version = "v1.0.5" 319 - hash = "sha256-8ILVMx5w1/nV88RZPoG45QJ0jH1YEPJGLpZQdBJFqIs=" 320 323 [mod."github.com/golang-jwt/jwt/v5"] 321 324 version = "v5.3.0" 322 325 hash = "sha256-VdN9Eo74ncMFJEVUSiJ1VRPMbC09FdVGno8wAReseXU=" ··· 654 657 [mod."go.opentelemetry.io/otel"] 655 658 version = "v1.40.0" 656 659 hash = "sha256-Cu9ZCLMAd9kGsmpnvoyqwm0IkF4Uk6Xo+8OsP9l+wUQ=" 657 - [mod."go.opentelemetry.io/otel/exporters/otlp/otlptrace"] 658 - version = "v1.40.0" 659 - hash = "sha256-eu4tFL7b8o9eyzchxAtU/UQX9hIZ5jq7Z67GRCZq8Kw=" 660 660 [mod."go.opentelemetry.io/otel/metric"] 661 661 version = "v1.40.0" 662 662 hash = "sha256-+84fJNAYQ0A5DzH1YjHXCo162GgB7r19PsTHiAP9C8k=" 663 663 [mod."go.opentelemetry.io/otel/trace"] 664 664 version = "v1.40.0" 665 665 hash = "sha256-oPA3DfQ5vXhlVkBPksu/kKYBSrssefq4vtnd4vm9K8w=" 666 - [mod."go.opentelemetry.io/proto/otlp"] 667 - version = "v1.9.0" 668 - hash = "sha256-qO+oKCbSRzyNv0jBpQTiHRaI50bLrWRyyvf6lYWvjPc=" 669 666 [mod."go.uber.org/atomic"] 670 667 version = "v1.11.0" 671 668 hash = "sha256-TyYws/cSPVqYNffFX0gbDml1bD4bBGcysrUWU7mHPIY=" ··· 705 702 [mod."golang.org/x/xerrors"] 706 703 version = "v0.0.0-20240903120638-7835f813f4da" 707 704 hash = "sha256-bE7CcrnAvryNvM26ieJGXqbAtuLwHaGcmtVMsVnksqo=" 708 - [mod."google.golang.org/genproto/googleapis/api"] 709 - version = "v0.0.0-20260209200024-4cfbd4190f57" 710 - hash = "sha256-2C7DZwLpDDdmUhVUcRDaotbtkhQFOQ9a1SsdVC8lOqc=" 711 - [mod."google.golang.org/genproto/googleapis/rpc"] 712 - version = "v0.0.0-20260209200024-4cfbd4190f57" 713 - hash = "sha256-gdgUw1LzgVOrarF1cGBUI9uoaR/d6lur2RwxUDKnOZA=" 714 - [mod."google.golang.org/grpc"] 715 - version = "v1.78.0" 716 - hash = "sha256-oKsu3+Eae5tpFOZ9K2ZzYh1FgdYdEnEIB1C+UIxSD+E=" 717 705 [mod."google.golang.org/protobuf"] 718 706 version = "v1.36.11" 719 707 hash = "sha256-7W+6jntfI/awWL3JP6yQedxqP5S9o3XvPgJ2XxxsIeE="
+55 -1
nix/modules/spindle.nix
··· 1 1 { 2 2 config, 3 3 lib, 4 + pkgs, 4 5 ... 5 6 }: let 6 7 cfg = config.services.tangled.spindle; ··· 98 99 }; 99 100 100 101 pipelines = { 102 + # todo(dawn): move these into their own pipeline configs eventually 101 103 nixery = mkOption { 102 104 type = types.str; 103 105 default = "nixery.tangled.sh"; # note: this is *not* on tangled.org yet ··· 115 117 default = "tangled-logs"; 116 118 description = "S3 bucket for workflow logs"; 117 119 }; 120 + 121 + qemu = { 122 + imageDir = mkOption { 123 + type = types.str; 124 + default = "/var/lib/spindle/images"; 125 + description = "Directory containing QEMU images"; 126 + }; 127 + overlayDir = mkOption { 128 + type = types.str; 129 + default = "/tmp"; 130 + description = "Directory to store QEMU temporary overlay files"; 131 + }; 132 + defaultImage = mkOption { 133 + type = types.str; 134 + default = "ubuntu-24.04"; 135 + description = "Default image to use if none is specified in workflow"; 136 + }; 137 + enableKVM = mkOption { 138 + type = types.bool; 139 + default = true; 140 + description = "Enable KVM hardware acceleration"; 141 + }; 142 + memory = mkOption { 143 + type = types.str; 144 + default = "2048"; 145 + description = "Amount of RAM to assign per-VM (supports formats like 512 or 2G)"; 146 + }; 147 + smp = mkOption { 148 + type = types.int; 149 + default = 2; 150 + description = "Number of CPU cores to assign per-VM"; 151 + }; 152 + }; 118 153 }; 119 154 120 155 environmentFile = mkOption { ··· 133 168 }; 134 169 }; 135 170 136 - config = mkIf cfg.enable { 171 + config = let 172 + deps = [pkgs.cdrkit pkgs.qemu]; 173 + in mkIf cfg.enable { 174 + environment.systemPackages = [ 175 + (pkgs.writeShellScriptBin "spindle" '' 176 + export PATH="${lib.makeBinPath deps}:$PATH" 177 + ${lib.optionalString (cfg.environmentFile != null) "set -a; source ${cfg.environmentFile}; set +a"} 178 + ${lib.concatMapStringsSep "\n" (e: "export ${e}") config.systemd.services.spindle.serviceConfig.Environment} 179 + exec ${cfg.package}/bin/spindle "$@" 180 + '') 181 + ]; 182 + 137 183 virtualisation.docker.enable = true; 138 184 139 185 systemd.services.spindle = { 140 186 description = "spindle service"; 141 187 after = ["network.target" "docker.service"]; 142 188 wantedBy = ["multi-user.target"]; 189 + path = deps; 143 190 serviceConfig = { 144 191 LogsDirectory = "spindle"; 145 192 StateDirectory = "spindle"; ··· 160 207 "SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}" 161 208 "SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}" 162 209 "SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}" 210 + "SPINDLE_QEMU_PIPELINES_IMAGE_DIR=${cfg.pipelines.qemu.imageDir}" 211 + "SPINDLE_QEMU_PIPELINES_OVERLAY_DIR=${cfg.pipelines.qemu.overlayDir}" 212 + "SPINDLE_QEMU_PIPELINES_DEFAULT_IMAGE=${cfg.pipelines.qemu.defaultImage}" 213 + "SPINDLE_QEMU_PIPELINES_ENABLE_KVM=${lib.boolToString cfg.pipelines.qemu.enableKVM}" 214 + "SPINDLE_QEMU_PIPELINES_MEMORY=${cfg.pipelines.qemu.memory}" 215 + "SPINDLE_QEMU_PIPELINES_SMP=${toString cfg.pipelines.qemu.smp}" 216 + "SPINDLE_QEMU_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}" 163 217 "SPINDLE_S3_LOG_BUCKET=${cfg.pipelines.logBucket}" 164 218 ]; 165 219 ExecStart = "${cfg.package}/bin/spindle";
+21 -3
nix/vm.nix
··· 4 4 hostSystem, 5 5 self, 6 6 }: let 7 + lib = nixpkgs.lib; 8 + 7 9 envVar = name: let 8 10 var = builtins.getEnv name; 9 11 in ··· 19 21 20 22 plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory"; 21 23 jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe"; 24 + 25 + checkFile = value: path: if builtins.pathExists path then lib.hasPrefix value (builtins.readFile path) else false; 26 + _nestedVirt = 27 + (checkFile "1" /sys/module/kvm_amd/parameters/nested) 28 + || (checkFile "Y" /sys/module/kvm_intel/parameters/nested); 29 + nestedVirtWarning = '' 30 + KVM nested virtualisation is not enabled on this host. 31 + You should enable it if you can for better performance when testing the QEMU spindle engine! 32 + ''; 33 + nestedVirt = lib.warnIf (!_nestedVirt) nestedVirtWarning _nestedVirt; 22 34 in 23 - nixpkgs.lib.nixosSystem { 35 + lib.nixosSystem { 24 36 inherit system; 25 37 modules = [ 26 38 self.nixosModules.knot ··· 36 48 host.pkgs = import nixpkgs {system = hostSystem;}; 37 49 38 50 graphics = false; 39 - memorySize = 2048; 40 - diskSize = 10 * 1024; 51 + memorySize = 3072; 52 + diskSize = 20 * 1024; 41 53 cores = 2; 54 + qemu.options = lib.optionals nestedVirt ["-enable-kvm" "-cpu host"]; 55 + 42 56 forwardPorts = [ 43 57 # ssh 44 58 { ··· 134 148 135 149 pipelines = { 136 150 logBucket = envVarOr "SPINDLE_S3_LOG_BUCKET" ""; 151 + qemu = { 152 + memory = "1024"; 153 + smp = 2; 154 + }; 137 155 }; 138 156 }; 139 157 services.postgresql = {
+11
spindle/config/config.go
··· 45 45 LogBucket string `env:"LOG_BUCKET"` 46 46 } 47 47 48 + type QemuPipelines struct { 49 + ImageDir string `env:"IMAGE_DIR, required"` 50 + OverlayDir string `env:"OVERLAY_DIR, default="` // where qemu snapshot overlays will live 51 + DefaultImage string `env:"DEFAULT_IMAGE, default=ubuntu-24.04"` 52 + EnableKVM bool `env:"ENABLE_KVM, default=true"` 53 + Memory string `env:"MEMORY, default=2048"` 54 + SMP int `env:"SMP, default=2"` 55 + WorkflowTimeout string `env:"WORKFLOW_TIMEOUT, default=5m"` 56 + } 57 + 48 58 type Config struct { 49 59 Server Server `env:",prefix=SPINDLE_SERVER_"` 50 60 NixeryPipelines NixeryPipelines `env:",prefix=SPINDLE_NIXERY_PIPELINES_"` 61 + QemuPipelines QemuPipelines `env:",prefix=SPINDLE_QEMU_PIPELINES_"` 51 62 S3 S3 `env:",prefix=SPINDLE_S3_"` 52 63 } 53 64
+102
spindle/engines/qemu/bakers/alpine.go
··· 1 + package bakers 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "fmt" 7 + "io" 8 + "os" 9 + "os/exec" 10 + "path/filepath" 11 + 12 + "tangled.org/core/log" 13 + ) 14 + 15 + type AlpineBaker struct { 16 + nbd *NBDManager 17 + } 18 + 19 + func copyFile(src, dst string) error { 20 + in, err := os.Open(src) 21 + if err != nil { 22 + return err 23 + } 24 + defer in.Close() 25 + 26 + out, err := os.Create(dst) 27 + if err != nil { 28 + return err 29 + } 30 + defer out.Close() 31 + 32 + if _, err := io.Copy(out, in); err != nil { 33 + return err 34 + } 35 + return out.Sync() 36 + } 37 + 38 + func (p *AlpineBaker) Prepare(ctx context.Context, dir string) error { 39 + l := log.FromContext(ctx) 40 + diskPath := filepath.Join(dir, "disk.qcow2") 41 + 42 + nbd, err := p.nbd.Connect(ctx, l, diskPath) 43 + if err != nil { 44 + return err 45 + } 46 + defer nbd.Close() 47 + 48 + // alpine images have no partition table, so we mount the base device directly 49 + mntDir, err := nbd.Mount(ctx, nbd.Device()) 50 + if err != nil { 51 + return err 52 + } 53 + defer nbd.Unmount(mntDir) 54 + 55 + resolvDest := filepath.Join(mntDir, "etc/resolv.conf") 56 + if err := os.WriteFile(resolvDest, nil, 0644); err != nil { 57 + return fmt.Errorf("creating resolv.conf mount point: %w", err) 58 + } 59 + if out, err := exec.CommandContext(ctx, "mount", "--bind", "/etc/resolv.conf", resolvDest).CombinedOutput(); err != nil { 60 + return fmt.Errorf("bind mount resolv.conf: %s: %w", string(out), err) 61 + } 62 + defer nbd.Unmount(resolvDest) 63 + 64 + l.Info("installing packages...") 65 + if out, err := exec.CommandContext(ctx, "chroot", mntDir, "/sbin/apk", "add", "--no-cache", "git", "curl").CombinedOutput(); err != nil { 66 + return fmt.Errorf("installing packages: %s: %w", string(out), err) 67 + } 68 + 69 + l.Info("extracting kernel and initrd...") 70 + bootDir := filepath.Join(mntDir, "boot") 71 + kernelSource := filepath.Join(bootDir, "vmlinuz-virt") 72 + if _, err := os.Stat(kernelSource); os.IsNotExist(err) { 73 + return fmt.Errorf("kernel 'vmlinuz-virt' not found in %s", bootDir) 74 + } 75 + initrdSource := filepath.Join(bootDir, "initramfs-virt") 76 + if _, err := os.Stat(initrdSource); os.IsNotExist(err) { 77 + return fmt.Errorf("initrd 'initramfs-virt' not found in %s", bootDir) 78 + } 79 + 80 + if err := copyFile(kernelSource, filepath.Join(dir, "kernel")); err != nil { 81 + return fmt.Errorf("extracting kernel: %w", err) 82 + } 83 + if err := copyFile(initrdSource, filepath.Join(dir, "initrd")); err != nil { 84 + return fmt.Errorf("extracting initrd: %w", err) 85 + } 86 + 87 + configPath := filepath.Join(dir, "config.json") 88 + metadata := ImageMetadata{ 89 + Cmdline: "root=/dev/vda rootfstype=ext4 rw console=ttyS0 modules=sd-mod,usb-storage,ext4,virtio_pci,virtio_blk", 90 + Shell: "/bin/ash", 91 + } 92 + b, err := json.MarshalIndent(metadata, "", " ") 93 + if err != nil { 94 + return fmt.Errorf("marshaling image metadata: %w", err) 95 + } 96 + if err := os.WriteFile(configPath, b, 0644); err != nil { 97 + return fmt.Errorf("writing image metadata: %w", err) 98 + } 99 + 100 + l.Info("alpine preparation complete") 101 + return nil 102 + }
+59
spindle/engines/qemu/bakers/baker.go
··· 1 + package bakers 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "os" 7 + "os/exec" 8 + "path/filepath" 9 + "strings" 10 + 11 + "tangled.org/core/log" 12 + "tangled.org/core/spindle/config" 13 + ) 14 + 15 + type ImageMetadata struct { 16 + Cmdline string `json:"cmdline"` 17 + Shell string `json:"shell"` 18 + } 19 + 20 + type ImageBaker interface { 21 + Prepare(ctx context.Context, imageDir string) error 22 + } 23 + 24 + var ( 25 + nbd = &NBDManager{} 26 + preparers = map[string]ImageBaker{ 27 + "alpine": &AlpineBaker{nbd: nbd}, 28 + "ubuntu": &UbuntuBaker{}, 29 + } 30 + ) 31 + 32 + func PrepareImage(ctx context.Context, cfg *config.Config, name string) error { 33 + name = strings.ToLower(name) 34 + 35 + imageDir := filepath.Join(cfg.QemuPipelines.ImageDir, name) 36 + if _, err := os.Stat(imageDir); os.IsNotExist(err) { 37 + return fmt.Errorf("image directory %s does not exist; did you run 'spindle setup'?", imageDir) 38 + } 39 + 40 + l := log.FromContext(ctx) 41 + l.Info("preparing image", "name", name) 42 + 43 + diskPath := filepath.Join(imageDir, "disk.qcow2") 44 + // todo(dawn): this should be configurable 45 + // cloud-init will expand the partitions itself! so we don't have to do anything else 46 + l.Info("resizing disk to 8G...", "path", diskPath) 47 + if out, err := exec.CommandContext(ctx, "qemu-img", "resize", diskPath, "8G").CombinedOutput(); err != nil { 48 + return fmt.Errorf("qemu-img resize: %s: %w", string(out), err) 49 + } 50 + 51 + // dispatch based on the prefix (e.g. 'alpine' from 'alpine-3.23') 52 + parts := strings.Split(name, "-") 53 + if preparer, ok := preparers[parts[0]]; ok { 54 + l.Info("running preparer", "type", parts[0]) 55 + return preparer.Prepare(ctx, imageDir) 56 + } 57 + 58 + return nil 59 + }
+91
spindle/engines/qemu/bakers/nbd.go
··· 1 + package bakers 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "log/slog" 7 + "os" 8 + "os/exec" 9 + "sync" 10 + "time" 11 + ) 12 + 13 + type NBDManager struct { 14 + mu sync.Mutex 15 + } 16 + 17 + type NBDHandle struct { 18 + mgr *NBDManager 19 + device string 20 + l *slog.Logger 21 + } 22 + 23 + func (n *NBDManager) Connect(ctx context.Context, l *slog.Logger, diskPath string) (*NBDHandle, error) { 24 + n.mu.Lock() 25 + 26 + if _, err := os.Stat("/dev/nbd0"); os.IsNotExist(err) { 27 + l.Info("nbd module not loaded, trying to load it...") 28 + if out, err := exec.Command("modprobe", "nbd", "max_part=8").CombinedOutput(); err != nil { 29 + n.mu.Unlock() 30 + return nil, fmt.Errorf("modprobe nbd: %s: %w", string(out), err) 31 + } 32 + } 33 + 34 + l.Info("connecting disk via nbd...", "disk", diskPath) 35 + if out, err := exec.CommandContext(ctx, "qemu-nbd", "--connect=/dev/nbd0", diskPath).CombinedOutput(); err != nil { 36 + n.mu.Unlock() 37 + return nil, fmt.Errorf("qemu-nbd connect: %s: %w", string(out), err) 38 + } 39 + 40 + waitCtx, cancel := context.WithTimeout(ctx, 3*time.Second) 41 + defer cancel() 42 + for { 43 + if _, err := os.Stat("/dev/nbd0"); err == nil { 44 + break 45 + } 46 + select { 47 + case <-waitCtx.Done(): 48 + n.mu.Unlock() 49 + return nil, fmt.Errorf("timeout waiting for /dev/nbd0 to appear") 50 + case <-time.After(50 * time.Millisecond): 51 + } 52 + } 53 + 54 + return &NBDHandle{mgr: n, device: "/dev/nbd0", l: l}, nil 55 + } 56 + 57 + // mounts the device node specified 58 + func (h *NBDHandle) Mount(ctx context.Context, device string) (string, error) { 59 + mntDir, err := os.MkdirTemp("", "spindle-mnt-*") 60 + if err != nil { 61 + return "", err 62 + } 63 + 64 + h.l.Info("mounting nbd device...", "device", device) 65 + if out, err := exec.CommandContext(ctx, "mount", device, mntDir).CombinedOutput(); err != nil { 66 + os.RemoveAll(mntDir) 67 + return "", fmt.Errorf("mount %s: %s: %w", device, string(out), err) 68 + } 69 + 70 + return mntDir, nil 71 + } 72 + 73 + func (h *NBDHandle) Unmount(mntDir string) { 74 + if out, err := exec.Command("umount", mntDir).CombinedOutput(); err != nil { 75 + h.l.Error("umount failed", "error", err, "output", string(out)) 76 + } 77 + os.RemoveAll(mntDir) 78 + } 79 + 80 + // returns the base node (/dev/nbd0) 81 + func (h *NBDHandle) Device() string { 82 + return h.device 83 + } 84 + 85 + // disconnects the nbd and releases the lock 86 + func (h *NBDHandle) Close() { 87 + defer h.mgr.mu.Unlock() 88 + if out, err := exec.Command("qemu-nbd", "--disconnect", h.device).CombinedOutput(); err != nil { 89 + h.l.Error("qemu-nbd disconnect failed", "error", err, "output", string(out)) 90 + } 91 + }
+36
spindle/engines/qemu/bakers/ubuntu.go
··· 1 + package bakers 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "fmt" 7 + "os" 8 + "path/filepath" 9 + 10 + "tangled.org/core/log" 11 + ) 12 + 13 + type UbuntuBaker struct{} 14 + 15 + func (p *UbuntuBaker) Prepare(ctx context.Context, dir string) error { 16 + l := log.FromContext(ctx) 17 + 18 + _, kerr := os.Stat(filepath.Join(dir, "kernel")) 19 + _, ierr := os.Stat(filepath.Join(dir, "initrd")) 20 + // ubuntu preparer assumes kernel and initrd are already downloaded 21 + if os.IsNotExist(kerr) || os.IsNotExist(ierr) { 22 + return fmt.Errorf("missing 'kernel' and/or 'initrd' in %s; please use ubuntu cloud images that provide separate kernel/initrd files (see 'unpacked/' on cloud-images.ubuntu.com)", dir) 23 + } 24 + 25 + configPath := filepath.Join(dir, "config.json") 26 + metadata := ImageMetadata{ 27 + Cmdline: "root=LABEL=cloudimg-rootfs rw console=ttyS0", 28 + Shell: "/bin/bash", 29 + } 30 + if b, err := json.MarshalIndent(metadata, "", " "); err == nil { 31 + _ = os.WriteFile(configPath, b, 0644) 32 + } 33 + 34 + l.Info("ubuntu configuration created") 35 + return nil 36 + }
+42
spindle/engines/qemu/cloudinit.go
··· 1 + package qemu 2 + 3 + import ( 4 + "fmt" 5 + "os" 6 + "os/exec" 7 + "path/filepath" 8 + ) 9 + 10 + func generateSeedISO(dir string, enginePubKey string) error { 11 + metaData := "instance-id: spindle-vm\nlocal-hostname: spindle\n" 12 + userData := fmt.Sprintf(`#cloud-config 13 + users: 14 + - name: build 15 + sudo: ALL=(ALL) NOPASSWD:ALL 16 + shell: /bin/sh 17 + ssh_authorized_keys: 18 + - %s 19 + `, enginePubKey) 20 + 21 + metaDataPath := filepath.Join(dir, "meta-data") 22 + if err := os.WriteFile(metaDataPath, []byte(metaData), 0o644); err != nil { 23 + return fmt.Errorf("writing meta-data: %w", err) 24 + } 25 + 26 + userDataPath := filepath.Join(dir, "user-data") 27 + if err := os.WriteFile(userDataPath, []byte(userData), 0o644); err != nil { 28 + return fmt.Errorf("writing user-data: %w", err) 29 + } 30 + 31 + // nocloud source expects volid "cidata" with joliet and rock ridge extensions 32 + cmd := exec.Command("genisoimage", "-output", filepath.Join(dir, "cloud-init.iso"), "-volid", "cidata", "-joliet", "-rock", "meta-data", "user-data") 33 + cmd.Dir = dir 34 + cmd.Stdout = os.Stdout 35 + cmd.Stderr = os.Stderr 36 + 37 + if err := cmd.Run(); err != nil { 38 + return fmt.Errorf("running genisoimage: %w", err) 39 + } 40 + 41 + return nil 42 + }
+493
spindle/engines/qemu/engine.go
··· 1 + package qemu 2 + 3 + import ( 4 + "context" 5 + "crypto/ed25519" 6 + "crypto/rand" 7 + "encoding/json" 8 + "fmt" 9 + "log/slog" 10 + "net" 11 + "os" 12 + "os/exec" 13 + "path/filepath" 14 + "sync" 15 + "time" 16 + 17 + "github.com/digitalocean/go-qemu/qmp" 18 + "golang.org/x/crypto/ssh" 19 + "gopkg.in/yaml.v3" 20 + 21 + "tangled.org/core/api/tangled" 22 + "tangled.org/core/log" 23 + "tangled.org/core/spindle/config" 24 + "tangled.org/core/spindle/engine" 25 + "tangled.org/core/spindle/engines/qemu/bakers" 26 + "tangled.org/core/spindle/models" 27 + "tangled.org/core/spindle/secrets" 28 + ) 29 + 30 + type ResolvedImage struct { 31 + kernel string 32 + initrd string 33 + disk string 34 + cmdline string 35 + shell string 36 + } 37 + 38 + type cleanupFunc func(context.Context) error 39 + 40 + type Engine struct { 41 + l *slog.Logger 42 + cfg *config.Config 43 + cleanupMu sync.Mutex 44 + cleanup map[string][]cleanupFunc 45 + } 46 + 47 + func New(ctx context.Context, cfg *config.Config) (*Engine, error) { 48 + return &Engine{ 49 + l: log.FromContext(ctx).With("component", "engine.qemu"), 50 + cfg: cfg, 51 + cleanup: make(map[string][]cleanupFunc), 52 + }, nil 53 + } 54 + 55 + type Step struct { 56 + name string 57 + kind models.StepKind 58 + command string 59 + environment map[string]string 60 + } 61 + 62 + func (s Step) Name() string { return s.name } 63 + func (s Step) Command() string { return s.command } 64 + func (s Step) Kind() models.StepKind { return s.kind } 65 + 66 + type setupSteps []models.Step 67 + 68 + func (ss *setupSteps) addStep(step models.Step) { *ss = append(*ss, step) } 69 + 70 + func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) { 71 + swf := &models.Workflow{} 72 + var dwf manifestWorkflow 73 + 74 + if err := yaml.Unmarshal([]byte(twf.Raw), &dwf); err != nil { 75 + return nil, err 76 + } 77 + 78 + for _, dstep := range dwf.Steps { 79 + swf.Steps = append(swf.Steps, Step{ 80 + name: dstep.Name, 81 + kind: models.StepKindUser, 82 + command: dstep.Command, 83 + environment: dstep.Environment, 84 + }) 85 + } 86 + swf.Name = twf.Name 87 + swf.Environment = dwf.Environment 88 + 89 + setup := &setupSteps{} 90 + setup.addStep(models.BuildCloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev)) 91 + 92 + swf.Steps = append(*setup, swf.Steps...) 93 + 94 + img, err := e.resolveImage(dwf.Image) 95 + if err != nil { 96 + return nil, err 97 + } 98 + 99 + swf.Data = vmState{ 100 + kernel: img.kernel, 101 + initrd: img.initrd, 102 + disk: img.disk, 103 + cmdline: img.cmdline, 104 + shell: img.shell, 105 + } 106 + 107 + return swf, nil 108 + } 109 + 110 + // discover and resolve kernel, initrd, and disk from an image subfolder 111 + func (e *Engine) resolveImage(name string) (ResolvedImage, error) { 112 + var img ResolvedImage 113 + if name == "" { 114 + name = e.cfg.QemuPipelines.DefaultImage 115 + } 116 + if name == "" { 117 + return img, fmt.Errorf("no image specified in workflow and SPINDLE_QEMU_PIPELINES_DEFAULT_IMAGE is not set") 118 + } 119 + 120 + imageDir := filepath.Join(e.cfg.QemuPipelines.ImageDir, name) 121 + kernelPath := filepath.Join(imageDir, "kernel") 122 + initrdPath := filepath.Join(imageDir, "initrd") 123 + diskPath := filepath.Join(imageDir, "disk.qcow2") 124 + configPath := filepath.Join(imageDir, "config.json") 125 + 126 + if _, err := os.Stat(diskPath); err == nil { 127 + img.disk = diskPath 128 + } 129 + if _, err := os.Stat(kernelPath); err == nil { 130 + img.kernel = kernelPath 131 + } 132 + if _, err := os.Stat(initrdPath); err == nil { 133 + img.initrd = initrdPath 134 + } 135 + if b, err := os.ReadFile(configPath); err == nil { 136 + var meta bakers.ImageMetadata 137 + if err := json.Unmarshal(b, &meta); err == nil { 138 + if meta.Cmdline != "" { 139 + img.cmdline = meta.Cmdline 140 + } 141 + if meta.Shell != "" { 142 + img.shell = meta.Shell 143 + } 144 + } 145 + } 146 + 147 + if img.disk == "" { 148 + return img, fmt.Errorf("missing 'disk.qcow2' in %s", imageDir) 149 + } 150 + if img.kernel != "" && (img.initrd == "" || img.cmdline == "") { 151 + return img, fmt.Errorf("kernel requires initrd and cmdline, but 'initrd' and/or 'cmdline' is missing for %s", name) 152 + } 153 + if img.shell == "" { 154 + return img, fmt.Errorf("shell is not configured for %s", name) 155 + } 156 + return img, nil 157 + } 158 + 159 + func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow, wfLogger models.WorkflowLogger) error { 160 + l := e.l.With("workflow", wid) 161 + l.Info("setting up qemu workflow") 162 + 163 + setupStep := Step{name: "qemu vm setup", kind: models.StepKindSystem} 164 + setupStepIdx := -1 165 + 166 + wfLogger.ControlWriter(setupStepIdx, setupStep, models.StepStatusStart).Write([]byte{0}) 167 + defer wfLogger.ControlWriter(setupStepIdx, setupStep, models.StepStatusEnd).Write([]byte{0}) 168 + 169 + // generate ed25519 keypair for the engine to ssh into the guest 170 + pubKey, privKey, err := ed25519.GenerateKey(rand.Reader) 171 + if err != nil { 172 + return fmt.Errorf("generating keypair: %w", err) 173 + } 174 + 175 + sshSigner, err := ssh.NewSignerFromKey(privKey) 176 + if err != nil { 177 + return err 178 + } 179 + sshPubKey, err := ssh.NewPublicKey(pubKey) 180 + if err != nil { 181 + return err 182 + } 183 + enginePubKeyStr := string(ssh.MarshalAuthorizedKey(sshPubKey)) 184 + 185 + // the tempdir is configurable since some systems may have tmpfs as /tmp, 186 + // which is not ideal if a workflow uses a lot of space. 187 + targetTempDir := e.cfg.QemuPipelines.OverlayDir 188 + if targetTempDir == "" { 189 + targetTempDir = os.TempDir() 190 + } 191 + 192 + tempDir, err := os.MkdirTemp(targetTempDir, "qemu-wf-*") 193 + if err != nil { 194 + return err 195 + } 196 + e.registerCleanup(wid, func(ctx context.Context) error { 197 + return os.RemoveAll(tempDir) 198 + }) 199 + 200 + state := wf.Data.(vmState) 201 + 202 + // generate nocloud seed iso containing engine's authorized_keys 203 + if err := generateSeedISO(tempDir, enginePubKeyStr); err != nil { 204 + return fmt.Errorf("generating seed iso: %w", err) 205 + } 206 + 207 + // note: there is an inherent TOCTOU race here between releasing the port 208 + // and qemu binding it. nothing we can do about this in userspace without 209 + // privilege, but in practice it's rarely an issue. 210 + nl, err := net.Listen("tcp", "127.0.0.1:0") 211 + if err != nil { 212 + return fmt.Errorf("finding free port: %w", err) 213 + } 214 + sshPort := nl.Addr().(*net.TCPAddr).Port 215 + nl.Close() 216 + 217 + qmpSock := filepath.Join(tempDir, "qmp.sock") 218 + // todo(dawn): ideally would be nice if we used qemu with the microvm enabled here... 219 + // but that is not compatible with cloud-init since it expects real hw enumeration... 220 + // and we would not be able to use standard cloud images, which is kind of annoying. 221 + // we also have to manage a virtiofsd process for the filesystem, instead of having to 222 + // manage a qcow overlay (see https://ubuntu.com/server/docs/explanation/virtualisation/qemu-microvm/). 223 + // also also need to be able to have some scripts for generating our own images 224 + // (though we should already do this anyway since some like alpine don't provide 225 + // "cloud ready" image files, at least with kernel and initrd) 226 + argv := []string{ 227 + // todo(dawn): ideally probably have "tiers" and let the spindle concile the tier using 228 + // what the user wants and what the user has exposed to them by the spindle operator? 229 + "-m", e.cfg.QemuPipelines.Memory, "-smp", fmt.Sprintf("%d", e.cfg.QemuPipelines.SMP), 230 + "-display", "none", "-nodefaults", "-no-user-config", 231 + // use snapshot=on to do copy-on-write without having us manage qcow overlays manually 232 + "-drive", fmt.Sprintf("file=%s,media=disk,snapshot=on,if=virtio", state.disk), 233 + "-drive", fmt.Sprintf("file=%s,media=cdrom", filepath.Join(tempDir, "cloud-init.iso")), 234 + "-netdev", fmt.Sprintf("user,id=net0,hostfwd=tcp:127.0.0.1:%d-:22", sshPort), 235 + "-device", "virtio-net-pci,netdev=net0", 236 + "-qmp", fmt.Sprintf("unix:%s,server,nowait", qmpSock), 237 + "-monitor", "none", 238 + } 239 + 240 + if e.cfg.Server.Dev { 241 + argv = append(argv, "-serial", "stdio") 242 + } else { 243 + argv = append(argv, "-serial", "none") 244 + } 245 + 246 + // support booting using qemu bios still, but otherwise we make it faster! 247 + // incase someone wants to do this for whatever reason... 248 + if state.kernel != "" { 249 + argv = append(argv, "-kernel", state.kernel) 250 + if state.initrd != "" { 251 + argv = append(argv, "-initrd", state.initrd) 252 + } 253 + if state.cmdline != "" { 254 + argv = append(argv, "-append", state.cmdline) 255 + } 256 + } else { 257 + // if we are booting with bios, we need to tell it to boot from the disk 258 + argv = append(argv, "-boot", "order=c") 259 + } 260 + 261 + enableKVM := e.cfg.QemuPipelines.EnableKVM 262 + if _, err := os.Stat("/dev/kvm"); err != nil { 263 + if enableKVM { 264 + l.Warn("kvm was requested but /dev/kvm is not accessible; falling back to software emulation", "error", err) 265 + } 266 + enableKVM = false 267 + } 268 + if enableKVM { 269 + argv = append(argv, "-enable-kvm", "-cpu", "host") 270 + } 271 + 272 + // todo(dawn): same with above, we assume x86_64 here, but should allow other archs, 273 + // probably just auto detect as a default 274 + qemuCmd := exec.Command("qemu-system-x86_64", argv...) 275 + qemuCmd.Env = append(os.Environ(), "TMPDIR="+tempDir) 276 + qemuCmd.Stdout = os.Stdout 277 + qemuCmd.Stderr = os.Stderr 278 + 279 + startBoot := time.Now() 280 + if err := qemuCmd.Start(); err != nil { 281 + return fmt.Errorf("starting qemu: %w", err) 282 + } 283 + 284 + var mon *qmp.SocketMonitor 285 + qmpCtx, cancelQmp := context.WithTimeout(ctx, 10*time.Second) 286 + defer cancelQmp() 287 + 288 + // retry qmp connect until the emulator is ready with its unix socket 289 + for { 290 + mon, err = qmp.NewSocketMonitor("unix", qmpSock, 2*time.Second) 291 + if err == nil { 292 + if err = mon.Connect(); err == nil { 293 + break 294 + } 295 + } 296 + select { 297 + case <-qmpCtx.Done(): 298 + _ = qemuCmd.Process.Kill() 299 + return fmt.Errorf("qmp connect timeout: %w", err) 300 + case <-time.After(10 * time.Millisecond): 301 + } 302 + } 303 + 304 + // check if the guest is running 305 + raw, err := mon.Run([]byte(`{"execute":"query-status"}`)) 306 + if err != nil { 307 + _ = qemuCmd.Process.Kill() 308 + _ = mon.Disconnect() 309 + return fmt.Errorf("qmp query-status failed: %w", err) 310 + } 311 + var resp map[string]any 312 + if err := json.Unmarshal(raw, &resp); err != nil { 313 + _ = qemuCmd.Process.Kill() 314 + _ = mon.Disconnect() 315 + return fmt.Errorf("qmp query-status parse failed: %w", err) 316 + } 317 + status, _ := resp["return"].(map[string]any)["status"].(string) 318 + l.Info("qemu guest status", "status", status) 319 + if status != "running" { 320 + _ = qemuCmd.Process.Kill() 321 + _ = mon.Disconnect() 322 + return fmt.Errorf("qemu guest not running (status: %s)", status) 323 + } 324 + 325 + sshConfig := &ssh.ClientConfig{ 326 + User: "build", 327 + Auth: []ssh.AuthMethod{ssh.PublicKeys(sshSigner)}, 328 + HostKeyCallback: ssh.InsecureIgnoreHostKey(), 329 + Timeout: 5 * time.Second, 330 + } 331 + 332 + bootCtx, cancelBoot := context.WithTimeout(ctx, 90*time.Second) 333 + defer cancelBoot() 334 + 335 + // backoff until the guest finishes booting and starts sshd 336 + var sshClient *ssh.Client 337 + for { 338 + sshClient, err = ssh.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", sshPort), sshConfig) 339 + if err == nil { 340 + bootDuration := time.Since(startBoot).Round(time.Millisecond) 341 + l.Debug("vm booted and ssh ready", "elapsed", bootDuration) 342 + break 343 + } 344 + select { 345 + case <-bootCtx.Done(): 346 + _ = qemuCmd.Process.Kill() 347 + _ = mon.Disconnect() 348 + return ErrBootTimeout 349 + case <-time.After(100 * time.Millisecond): 350 + } 351 + } 352 + 353 + e.registerCleanup(wid, func(ctx context.Context) error { 354 + _ = sshClient.Close() 355 + 356 + // graceful powerdown so guest can sync filesystem 357 + _, _ = mon.Run([]byte(`{"execute": "system_powerdown"}`)) 358 + 359 + done := make(chan error, 1) 360 + go func() { 361 + done <- qemuCmd.Wait() 362 + }() 363 + select { 364 + case <-done: 365 + case <-time.After(time.Second): 366 + _ = qemuCmd.Process.Kill() 367 + // drain Wait after kill to avoid zombie 368 + <-done 369 + } 370 + 371 + _ = mon.Disconnect() 372 + return nil 373 + }) 374 + 375 + wf.Data = vmState{ 376 + process: qemuCmd.Process, 377 + qmpMon: mon, 378 + sshClient: sshClient, 379 + sshPort: sshPort, 380 + tempDir: tempDir, 381 + kernel: state.kernel, 382 + initrd: state.initrd, 383 + disk: state.disk, 384 + shell: state.shell, 385 + } 386 + 387 + return nil 388 + } 389 + 390 + func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger models.WorkflowLogger) error { 391 + state := w.Data.(vmState) 392 + step := w.Steps[idx] 393 + 394 + var envStr string 395 + appendEnv := func(k, v string) { 396 + // we pass the env vars to shell instead of through ssh 397 + // (we would need to make sshd config have AcceptEnv in seed iso but that kinda sucks 398 + // because that would reload sshd during boot, which is not ideal, so we can just do this.) 399 + // todo(dawn): make images be prepared with sshd configured 400 + envStr += fmt.Sprintf("%s=%s ", k, shellescape(v)) 401 + } 402 + 403 + for k, v := range w.Environment { 404 + appendEnv(k, v) 405 + } 406 + for _, s := range secrets { 407 + appendEnv(s.Key, s.Value) 408 + } 409 + if s, ok := step.(Step); ok { 410 + for k, v := range s.environment { 411 + appendEnv(k, v) 412 + } 413 + } 414 + 415 + session, err := state.sshClient.NewSession() 416 + if err != nil { 417 + return fmt.Errorf("ssh new session: %w", err) 418 + } 419 + defer session.Close() 420 + 421 + stdoutPipe, err := session.StdoutPipe() 422 + if err != nil { 423 + return fmt.Errorf("ssh stdout pipe: %w", err) 424 + } 425 + stderrPipe, err := session.StderrPipe() 426 + if err != nil { 427 + return fmt.Errorf("ssh stderr pipe: %w", err) 428 + } 429 + 430 + tailDone := make(chan error, 1) 431 + go func() { 432 + tailDone <- streamLogs(ctx, stdoutPipe, stderrPipe, idx, wfLogger) 433 + }() 434 + 435 + // execute with the image's specified shell 436 + cmd := fmt.Sprintf("%s%s -c %s", envStr, state.shell, shellescape(step.Command())) 437 + if err := session.Start(cmd); err != nil { 438 + return fmt.Errorf("session start: %w", err) 439 + } 440 + 441 + select { 442 + case err := <-tailDone: 443 + if err != nil { 444 + e.l.Warn("log streaming error", "workflow", wid, "step", idx, "error", err) 445 + } 446 + case <-ctx.Done(): 447 + _ = session.Signal(ssh.SIGKILL) 448 + <-tailDone 449 + return engine.ErrTimedOut 450 + } 451 + 452 + if err := session.Wait(); err != nil { 453 + return engine.ErrWorkflowFailed 454 + } 455 + return nil 456 + } 457 + 458 + func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error { 459 + fns := e.drainCleanups(wid) 460 + 461 + // cleanups must be executed LIFO to respect resource dependencies 462 + // (e.g. process shutdown before tempdir removal) 463 + for i := len(fns) - 1; i >= 0; i-- { 464 + if err := fns[i](ctx); err != nil { 465 + e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err) 466 + } 467 + } 468 + return nil 469 + } 470 + 471 + func (e *Engine) WorkflowTimeout() time.Duration { 472 + d, err := time.ParseDuration(e.cfg.QemuPipelines.WorkflowTimeout) 473 + if err != nil { 474 + return 5 * time.Minute 475 + } 476 + return d 477 + } 478 + 479 + func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) { 480 + e.cleanupMu.Lock() 481 + defer e.cleanupMu.Unlock() 482 + key := wid.String() 483 + e.cleanup[key] = append(e.cleanup[key], fn) 484 + } 485 + 486 + func (e *Engine) drainCleanups(wid models.WorkflowId) []cleanupFunc { 487 + e.cleanupMu.Lock() 488 + defer e.cleanupMu.Unlock() 489 + key := wid.String() 490 + fns := e.cleanup[key] 491 + delete(e.cleanup, key) 492 + return fns 493 + }
+8
spindle/engines/qemu/errors.go
··· 1 + package qemu 2 + 3 + import "errors" 4 + 5 + var ( 6 + ErrOOMKilled = errors.New("container died due to OOM kill") 7 + ErrBootTimeout = errors.New("timed out waiting for VM to boot") 8 + )
+35
spindle/engines/qemu/logs.go
··· 1 + package qemu 2 + 3 + import ( 4 + "context" 5 + "io" 6 + "strings" 7 + 8 + "golang.org/x/sync/errgroup" 9 + "tangled.org/core/spindle/models" 10 + ) 11 + 12 + // shellescape safely escapes a string for bash -c 13 + func shellescape(str string) string { 14 + return "'" + strings.ReplaceAll(str, "'", `'"'"'`) + "'" 15 + } 16 + 17 + // streamLogs copies stdout and stderr from the SSH session to the workflow logger 18 + func streamLogs(ctx context.Context, stdoutPipe io.Reader, stderrPipe io.Reader, idx int, wfLogger models.WorkflowLogger) error { 19 + if wfLogger == nil { 20 + return nil 21 + } 22 + 23 + g, _ := errgroup.WithContext(ctx) 24 + 25 + g.Go(func() error { 26 + _, err := io.Copy(wfLogger.DataWriter(idx, "stdout"), stdoutPipe) 27 + return err 28 + }) 29 + g.Go(func() error { 30 + _, err := io.Copy(wfLogger.DataWriter(idx, "stderr"), stderrPipe) 31 + return err 32 + }) 33 + 34 + return g.Wait() 35 + }
+33
spindle/engines/qemu/models.go
··· 1 + package qemu 2 + 3 + import ( 4 + "os" 5 + 6 + "github.com/digitalocean/go-qemu/qmp" 7 + "golang.org/x/crypto/ssh" 8 + ) 9 + 10 + type vmState struct { 11 + process *os.Process 12 + qmpMon *qmp.SocketMonitor 13 + sshClient *ssh.Client 14 + sshPort int 15 + tempDir string 16 + 17 + kernel string 18 + initrd string 19 + disk string 20 + cmdline string 21 + shell string 22 + } 23 + 24 + type manifestWorkflow struct { 25 + Image string `yaml:"image"` 26 + Steps []struct { 27 + Name string `yaml:"name"` 28 + Command string `yaml:"command"` 29 + Environment map[string]string `yaml:"environment"` 30 + } `yaml:"steps"` 31 + Dependencies map[string][]string `yaml:"dependencies"` 32 + Environment map[string]string `yaml:"environment"` 33 + }
+141
spindle/engines/qemu/setup.go
··· 1 + package qemu 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "io" 7 + "net/http" 8 + "os" 9 + "path/filepath" 10 + 11 + "golang.org/x/sync/errgroup" 12 + "tangled.org/core/log" 13 + "tangled.org/core/spindle/config" 14 + "tangled.org/core/spindle/engines/qemu/bakers" 15 + ) 16 + 17 + type DefaultImage struct { 18 + disk string 19 + kernel string 20 + initrd string 21 + } 22 + 23 + // todo(dawn): we should be able to just pull this from a json file or something 24 + var defaultImages = map[string]DefaultImage{ 25 + // alpine doesnt have files that will let us bypass bios :( 26 + // we would ideally have our own image generation step... 27 + "alpine-3.23": { 28 + disk: "https://dl-cdn.alpinelinux.org/alpine/v3.23/releases/cloud/nocloud_alpine-3.23.3-x86_64-bios-tiny-r0.qcow2", 29 + kernel: "", 30 + initrd: "", 31 + }, 32 + "ubuntu-24.04": { 33 + disk: "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img", 34 + kernel: "https://cloud-images.ubuntu.com/noble/current/unpacked/noble-server-cloudimg-amd64-vmlinuz-generic", 35 + initrd: "https://cloud-images.ubuntu.com/noble/current/unpacked/noble-server-cloudimg-amd64-initrd-generic", 36 + }, 37 + } 38 + 39 + func downloadFile(ctx context.Context, url string, dest string) error { 40 + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) 41 + if err != nil { 42 + return err 43 + } 44 + 45 + resp, err := http.DefaultClient.Do(req) 46 + if err != nil { 47 + return err 48 + } 49 + defer resp.Body.Close() 50 + 51 + if resp.StatusCode != http.StatusOK { 52 + return fmt.Errorf("bad status: %s", resp.Status) 53 + } 54 + 55 + tmp := dest + ".tmp" 56 + out, err := os.Create(tmp) 57 + if err != nil { 58 + return err 59 + } 60 + defer os.Remove(tmp) // no-op if rename succeeds 61 + 62 + if _, err = io.Copy(out, resp.Body); err != nil { 63 + out.Close() 64 + return err 65 + } 66 + if err := out.Sync(); err != nil { 67 + out.Close() 68 + return err 69 + } 70 + if err := out.Close(); err != nil { 71 + return err 72 + } 73 + return os.Rename(tmp, dest) 74 + } 75 + 76 + func SetupDefaultImages(ctx context.Context) error { 77 + cfg, err := config.Load(ctx) 78 + if err != nil { 79 + return fmt.Errorf("loading config: %w", err) 80 + } 81 + 82 + imageDir := cfg.QemuPipelines.ImageDir 83 + if imageDir == "" { 84 + return fmt.Errorf("SPINDLE_QEMU_PIPELINES_IMAGE_DIR must be set") 85 + } 86 + 87 + l := log.FromContext(ctx) 88 + l.Info("setting up default qemu images", "imageDir", imageDir) 89 + 90 + if err := os.MkdirAll(imageDir, 0755); err != nil { 91 + return err 92 + } 93 + 94 + g, ctx := errgroup.WithContext(ctx) 95 + g.SetLimit(4) 96 + 97 + for name, img := range defaultImages { 98 + g.Go(func() error { 99 + dir := filepath.Join(imageDir, name) 100 + if err := os.MkdirAll(dir, 0755); err != nil { 101 + return err 102 + } 103 + 104 + diskPath := filepath.Join(dir, "disk.qcow2") 105 + if _, err := os.Stat(diskPath); err == nil { 106 + l.Info("image already exists, skipping", "name", name) 107 + return nil 108 + } 109 + 110 + l.Info("downloading image...", "name", name) 111 + if err := downloadFile(ctx, img.disk, diskPath); err != nil { 112 + return fmt.Errorf("downloading %s disk: %w", name, err) 113 + } 114 + 115 + if img.kernel != "" { 116 + if err := downloadFile(ctx, img.kernel, filepath.Join(dir, "kernel")); err != nil { 117 + return fmt.Errorf("downloading %s kernel: %w", name, err) 118 + } 119 + } 120 + 121 + if img.initrd != "" { 122 + if err := downloadFile(ctx, img.initrd, filepath.Join(dir, "initrd")); err != nil { 123 + return fmt.Errorf("downloading %s initrd: %w", name, err) 124 + } 125 + } 126 + 127 + if err := bakers.PrepareImage(ctx, cfg, name); err != nil { 128 + return fmt.Errorf("preparing image %s: %w", name, err) 129 + } 130 + 131 + return nil 132 + }) 133 + } 134 + 135 + if err := g.Wait(); err != nil { 136 + return err 137 + } 138 + 139 + l.Info("setup complete") 140 + return nil 141 + }
+18 -11
spindle/models/clone.go
··· 55 55 } 56 56 } 57 57 58 - repoURL := BuildRepoURL(tr.Repo, dev) 58 + repoURL := BuildRepoURL(tr.Repo) 59 + 60 + if dev { 61 + repoURL = RewriteLocalhost(repoURL, twf.Engine) 62 + } 59 63 60 64 var cloneOpts tangled.Pipeline_CloneOpts 61 65 if twf.Clone != nil { ··· 102 106 } 103 107 104 108 // BuildRepoURL constructs the repository URL from repo metadata. 105 - func BuildRepoURL(repo *tangled.Pipeline_TriggerRepo, devMode bool) string { 109 + func BuildRepoURL(repo *tangled.Pipeline_TriggerRepo) string { 106 110 if repo == nil { 107 111 return "" 108 112 } 109 113 110 114 scheme := "https://" 111 - if devMode { 112 - scheme = "http://" 113 - } 114 - 115 115 // Get host from knot 116 116 host := repo.Knot 117 117 118 - // In dev mode, replace localhost with host.docker.internal for Docker networking 119 - if devMode && strings.Contains(host, "localhost") { 120 - host = strings.ReplaceAll(host, "localhost", "host.docker.internal") 121 - } 122 - 123 118 switch { 124 119 case repo.RepoDid != nil: 125 120 return fmt.Sprintf("%s%s/%s", scheme, host, *repo.RepoDid) ··· 128 123 default: 129 124 return "" 130 125 } 126 + } 127 + 128 + // RewriteLocalhost adapts a localhost repository URL for container/VM environments 129 + func RewriteLocalhost(repoURL string, engine string) string { 130 + if !strings.Contains(repoURL, "localhost") { 131 + return repoURL 132 + } 133 + repoURL = strings.ReplaceAll(repoURL, "https://", "http://") 134 + if engine == "qemu" { 135 + return strings.ReplaceAll(repoURL, "localhost", "10.0.2.2") 136 + } 137 + return strings.ReplaceAll(repoURL, "localhost", "host.docker.internal") 131 138 } 132 139 133 140 // buildFetchArgs constructs the arguments for git fetch based on clone options
+2 -2
spindle/models/pipeline_env.go
··· 10 10 11 11 // PipelineEnvVars extracts environment variables from pipeline trigger metadata. 12 12 // These are framework-provided variables that are injected into workflow steps. 13 - func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string { 13 + func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId) map[string]string { 14 14 if tr == nil { 15 15 return nil 16 16 } ··· 33 33 env["TANGLED_REPO_REPO_DID"] = *tr.Repo.RepoDid 34 34 } 35 35 env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch 36 - env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode) 36 + env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo) 37 37 } 38 38 39 39 switch workflow.TriggerKind(tr.Kind) {
+9 -9
spindle/models/pipeline_env_test.go
··· 26 26 Knot: "example.com", 27 27 Rkey: "123123", 28 28 } 29 - env := PipelineEnvVars(tr, id, false) 29 + env := PipelineEnvVars(tr, id) 30 30 31 31 // Check standard CI variable 32 32 if env["CI"] != "true" { ··· 88 88 Knot: "example.com", 89 89 Rkey: "123123", 90 90 } 91 - env := PipelineEnvVars(tr, id, false) 91 + env := PipelineEnvVars(tr, id) 92 92 93 93 if env["TANGLED_REF"] != "refs/tags/v1.2.3" { 94 94 t.Errorf("Expected TANGLED_REF='refs/tags/v1.2.3', got '%s'", env["TANGLED_REF"]) ··· 120 120 Knot: "example.com", 121 121 Rkey: "123123", 122 122 } 123 - env := PipelineEnvVars(tr, id, false) 123 + env := PipelineEnvVars(tr, id) 124 124 125 125 // Check ref variables for PR 126 126 if env["TANGLED_REF"] != "refs/heads/feature-branch" { ··· 175 175 Knot: "example.com", 176 176 Rkey: "123123", 177 177 } 178 - env := PipelineEnvVars(tr, id, false) 178 + env := PipelineEnvVars(tr, id) 179 179 180 180 // Check manual input variables 181 181 if env["TANGLED_INPUT_VERSION"] != "1.0.0" { ··· 211 211 Knot: "example.com", 212 212 Rkey: "123123", 213 213 } 214 - env := PipelineEnvVars(tr, id, true) 214 + env := PipelineEnvVars(tr, id) 215 215 216 - // Dev mode should use http:// and replace localhost with host.docker.internal 217 - expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo" 216 + // In pristine mode, it should be natively https://localhost 217 + expectedURL := "https://localhost:3000/did:plc:user123/my-repo" 218 218 if env["TANGLED_REPO_URL"] != expectedURL { 219 219 t.Errorf("Expected TANGLED_REPO_URL='%s', got '%s'", expectedURL, env["TANGLED_REPO_URL"]) 220 220 } ··· 225 225 Knot: "example.com", 226 226 Rkey: "123123", 227 227 } 228 - env := PipelineEnvVars(nil, id, false) 228 + env := PipelineEnvVars(nil, id) 229 229 230 230 if env != nil { 231 231 t.Error("Expected nil env for nil trigger") ··· 246 246 Knot: "example.com", 247 247 Rkey: "123123", 248 248 } 249 - env := PipelineEnvVars(tr, id, false) 249 + env := PipelineEnvVars(tr, id) 250 250 251 251 // Should still have repo variables 252 252 if env["TANGLED_REPO_KNOT"] != "example.com" {
+15 -1
spindle/server.go
··· 23 23 "tangled.org/core/spindle/db" 24 24 "tangled.org/core/spindle/engine" 25 25 "tangled.org/core/spindle/engines/nixery" 26 + "tangled.org/core/spindle/engines/qemu" 26 27 "tangled.org/core/spindle/models" 27 28 "tangled.org/core/spindle/queue" 28 29 "tangled.org/core/spindle/secrets" ··· 250 251 return err 251 252 } 252 253 254 + qemuEng, err := qemu.New(ctx, cfg) 255 + if err != nil { 256 + return err 257 + } 258 + 253 259 s, err := New(ctx, cfg, map[string]models.Engine{ 254 260 "nixery": nixeryEng, 261 + "qemu": qemuEng, 255 262 }) 256 263 if err != nil { 257 264 return err ··· 337 344 workflows := make(map[models.Engine][]models.Workflow) 338 345 339 346 // Build pipeline environment variables once for all workflows 340 - pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev) 347 + pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId) 341 348 342 349 for _, w := range tpl.Workflows { 343 350 if w != nil { ··· 370 377 ewf.Environment = make(map[string]string) 371 378 } 372 379 maps.Copy(ewf.Environment, pipelineEnv) 380 + 381 + // if in dev mode we have to replace localhost with the correct host alias 382 + if s.cfg.Server.Dev { 383 + if repoUrl, ok := ewf.Environment["TANGLED_REPO_URL"]; ok { 384 + ewf.Environment["TANGLED_REPO_URL"] = models.RewriteLocalhost(repoUrl, w.Engine) 385 + } 386 + } 373 387 374 388 workflows[eng] = append(workflows[eng], *ewf) 375 389