Stitch any CI into Tangled
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

abstract providers

+228 -123
+14 -120
knot.go
··· 29 29 "encoding/json" 30 30 "fmt" 31 31 "log/slog" 32 - "time" 33 32 34 33 "tangled.org/core/api/tangled" 35 34 "tangled.org/core/eventconsumer" ··· 74 73 type knotConsumer struct { 75 74 c *eventconsumer.Consumer 76 75 log *slog.Logger 77 - // br is how we publish synthesized sh.tangled.pipeline.status 78 - // records back out to /events subscribers. Today it's driven by 79 - // the fake-job stand-in in process(); once we hook up Buildkite, 80 - // the webhook handler will be the primary publisher. 81 - br *broker 76 + 77 + // provider dispatches each incoming pipeline trigger to whatever 78 + // backend actually runs it (today: the fake provider; tomorrow: 79 + // Buildkite). The consumer doesn't care which — it just hands 80 + // over the decoded record and lets the provider publish status 81 + // records back through its own broker connection. 82 + provider Provider 82 83 } 83 84 84 85 // Compile-time interface conformance check. ··· 94 95 // restart is harmless. When we start translating triggers into real 95 96 // Buildkite builds, this should switch to a SQLite-backed cursor store 96 97 // to avoid duplicate builds. 97 - func startKnotConsumer(ctx context.Context, cfg config, st *store, br *broker) (*knotConsumer, error) { 98 + func startKnotConsumer(ctx context.Context, cfg config, st *store, provider Provider) (*knotConsumer, error) { 98 99 logger := loggerFrom(ctx).With("component", "knotconsumer") 99 100 100 101 knots, err := st.KnotsForSpindle(ctx, cfg.Hostname) ··· 102 103 return nil, fmt.Errorf("load known knots: %w", err) 103 104 } 104 105 105 - kc := &knotConsumer{log: logger, br: br} 106 + kc := &knotConsumer{log: logger, provider: provider} 106 107 107 108 ccfg := eventconsumer.NewConsumerConfig() 108 109 ccfg.Logger = logger ··· 200 201 "workflows", len(p.Workflows), 201 202 ) 202 203 203 - // Stand-in for the real Buildkite integration. Spawn one fake 204 - // job per workflow so the /events fan-out has something to 205 - // emit and the appview can show progress end to end. We hand 206 - // each goroutine the worker ctx (app-scoped) so they survive 207 - // process() returning but exit cleanly on shutdown. 208 - k.spawnFakeJobs(ctx, src.Key(), msg.Rkey, p.Workflows) 204 + // Hand the trigger to whichever Provider was configured. 205 + // Spawn is non-blocking — it fans out into provider-owned 206 + // goroutines so this worker can move on to the next event. 207 + // The provider keeps ctx around for shutdown coordination. 208 + k.provider.Spawn(ctx, src.Key(), msg.Rkey, p.Workflows) 209 209 210 210 default: 211 211 // Knots may publish other record types over the same stream; we ··· 221 221 222 222 return nil 223 223 } 224 - 225 - // fakeJob constants. Pulled out so it's obvious where the timing 226 - // numbers come from, and trivially adjustable when we want to dial the 227 - // fake up or down. 228 - const ( 229 - // fakeJobDuration is the wall-clock length of a fake run. Total 230 - // publishes per workflow = (fakeJobDuration / fakeJobInterval) + 1 231 - // (one final "success"). 232 - fakeJobDuration = 30 * time.Second 233 - // fakeJobInterval is how often we emit a "running" heartbeat. 234 - fakeJobInterval = 5 * time.Second 235 - ) 236 - 237 - // spawnFakeJobs starts a goroutine per workflow. They each emit a 238 - // stream of sh.tangled.pipeline.status records via the broker until 239 - // either the fake duration elapses (success) or ctx is cancelled. 240 - // 241 - // This is a deliberate stand-in: it lets us validate the entire 242 - // jetstream → knot → broker → /events → appview pipeline before the 243 - // real Buildkite plumbing is in place. 244 - func (k *knotConsumer) spawnFakeJobs(ctx context.Context, knot, pipelineRkey string, workflows []*tangled.Pipeline_Workflow) { 245 - if len(workflows) == 0 { 246 - // Nothing to fake — without a workflow name there's no valid 247 - // pipeline.status record to publish. 248 - k.log.Warn("pipeline has no workflows; skipping fake run", 249 - "knot", knot, "rkey", pipelineRkey, 250 - ) 251 - return 252 - } 253 - for _, wf := range workflows { 254 - if wf == nil || wf.Name == "" { 255 - continue 256 - } 257 - go k.runFakeJob(ctx, knot, pipelineRkey, wf.Name) 258 - } 259 - } 260 - 261 - // runFakeJob emits a "running" status every fakeJobInterval for 262 - // fakeJobDuration, then a final "success". It returns early if ctx is 263 - // cancelled (shutdown) — without doing a final publish, since we'd be 264 - // writing to a broker whose store may be closing. 265 - func (k *knotConsumer) runFakeJob(ctx context.Context, knot, pipelineRkey, workflow string) { 266 - // pipelineURI is what the appview parses out of the status record 267 - // to associate it with the originating pipeline. Format mirrors 268 - // what the upstream spindle emits: at://did:web:<knot>/<nsid>/<rkey> 269 - // — the appview strips the did:web: prefix and uses the hostname 270 - // as the knot identifier. 271 - pipelineURI := fmt.Sprintf("at://did:web:%s/%s/%s", 272 - knot, tangled.PipelineNSID, pipelineRkey, 273 - ) 274 - 275 - logger := k.log.With( 276 - "knot", knot, 277 - "pipeline_rkey", pipelineRkey, 278 - "workflow", workflow, 279 - ) 280 - 281 - // Heartbeat phase. seq doubles as a per-workflow disambiguator in 282 - // the synthesized status rkey so multiple fakes don't collide. 283 - deadline := time.Now().Add(fakeJobDuration) 284 - seq := 0 285 - for time.Now().Before(deadline) { 286 - if err := k.publishStatus(ctx, pipelineURI, workflow, "running", seq); err != nil { 287 - logger.Error("publish fake running status", "err", err, "seq", seq) 288 - return 289 - } 290 - seq++ 291 - select { 292 - case <-ctx.Done(): 293 - logger.Debug("fake job cancelled mid-run", "seq", seq) 294 - return 295 - case <-time.After(fakeJobInterval): 296 - } 297 - } 298 - 299 - // Terminal status. Marked as "success" using the upstream 300 - // StatusKind enum's success label (see tangled.org/core/spindle/models). 301 - if err := k.publishStatus(ctx, pipelineURI, workflow, "success", seq); err != nil { 302 - logger.Error("publish fake success status", "err", err, "seq", seq) 303 - return 304 - } 305 - logger.Info("fake job complete") 306 - } 307 - 308 - // publishStatus assembles a tangled.PipelineStatus, marshals it, and 309 - // hands it to the broker for persistence + fan-out. The rkey we mint 310 - // is purely synthetic — it just needs to be unique across our event 311 - // log; the appview keys its rows on (spindle, rkey). 312 - func (k *knotConsumer) publishStatus(ctx context.Context, pipelineURI, workflow, status string, seq int) error { 313 - rec := tangled.PipelineStatus{ 314 - LexiconTypeID: tangled.PipelineStatusNSID, 315 - Pipeline: pipelineURI, 316 - Workflow: workflow, 317 - Status: status, 318 - CreatedAt: time.Now().UTC().Format(time.RFC3339), 319 - } 320 - body, err := json.Marshal(rec) 321 - if err != nil { 322 - return fmt.Errorf("marshal pipeline.status: %w", err) 323 - } 324 - rkey := fmt.Sprintf("fake-%d-%s-%d", time.Now().UnixNano(), workflow, seq) 325 - if _, err := k.br.Publish(ctx, rkey, tangled.PipelineStatusNSID, body); err != nil { 326 - return fmt.Errorf("publish pipeline.status: %w", err) 327 - } 328 - return nil 329 - }
+11 -3
main.go
··· 114 114 // them to publish synthetic status events at startup. 115 115 br := newBroker(st) 116 116 117 + // Provider that turns Tangled pipeline triggers into 118 + // pipeline.status events. The fake provider stands in for a real 119 + // CI integration: it emits synthetic running/success heartbeats 120 + // over the broker so the entire jetstream → knot → /events flow 121 + // is exercisable end-to-end. Swap this for a Buildkite-backed 122 + // implementation once that lands. 123 + provider := newFakeProvider(br, logger) 124 + 117 125 // Start the knot event-stream consumer first so the jetstream 118 126 // loop has somewhere to register newly-observed knots into. It 119 - // gets the broker so its (currently fake) pipeline runner can 120 - // publish sh.tangled.pipeline.status events back out via /events. 121 - knots, err := startKnotConsumer(ctx, cfg, st, br) 127 + // gets the provider so each incoming pipeline trigger has 128 + // something to dispatch to. 129 + knots, err := startKnotConsumer(ctx, cfg, st, provider) 122 130 if err != nil { 123 131 logger.Error("failed to start knot consumer", "err", err) 124 132 os.Exit(1)
+46
provider.go
··· 1 + package main 2 + 3 + // Provider is the abstraction over "the thing that turns a Tangled 4 + // pipeline trigger into pipeline.status events". It exists so the rest 5 + // of tack can stay agnostic to whether a given trigger is dispatched to 6 + // Buildkite, run by a stub for testing, or anything else we plug in later. 7 + 8 + import ( 9 + "context" 10 + 11 + "tangled.org/core/api/tangled" 12 + ) 13 + 14 + // Provider dispatches a Tangled pipeline trigger to whatever backend 15 + // actually runs the workflows. 16 + // 17 + // Implementations are responsible for publishing 18 + // sh.tangled.pipeline.status records back through whatever channel 19 + // they were constructed with. 20 + type Provider interface { 21 + // Spawn kicks off a pipeline run for every workflow in workflows. 22 + // 23 + // It MUST be non-blocking: the caller is the eventconsumer worker 24 + // that's shared across all knot subscriptions, so per-pipeline 25 + // work has to live on its own goroutine. A typical implementation 26 + // fans out into a goroutine per workflow and returns immediately. 27 + // 28 + // ctx is the consumer's app-scoped context (lives until shutdown, 29 + // not just for the duration of one event). Implementations are 30 + // expected to honour cancellation: in-flight runs should wind 31 + // down without issuing further publishes once ctx is done. 32 + // 33 + // knot is the knot hostname the trigger arrived on; it's the 34 + // authority half of the pipeline ATURI that pipeline.status 35 + // records reference. pipelineRkey is the trigger record's rkey 36 + // on that knot. workflows is the unmodified slice from the 37 + // decoded sh.tangled.pipeline record; implementations should 38 + // tolerate nil entries and zero-length names defensively, since 39 + // the lexicon doesn't enforce either. 40 + Spawn( 41 + ctx context.Context, 42 + knot string, 43 + pipelineRkey string, 44 + workflows []*tangled.Pipeline_Workflow, 45 + ) 46 + }
+157
provider_fake.go
··· 1 + package main 2 + 3 + // fakeProvider is a stand-in Provider implementation: it doesn't talk 4 + // to any external CI. For each workflow in a triggered pipeline it 5 + // spawns a goroutine that emits a fixed-cadence stream of 6 + // sh.tangled.pipeline.status records — "running" every five seconds 7 + // for thirty seconds, then a final "success" — through the broker. 8 + // 9 + // The point is to exercise the entire trigger → broker → /events → 10 + // appview path end-to-end before any real CI integration exists. Once 11 + // the Buildkite provider lands, this one stays around as a reference 12 + // implementation and as the test double of choice when a test wants 13 + // "something that publishes plausible status updates" without the 14 + // timing weight of real builds. 15 + 16 + import ( 17 + "context" 18 + "encoding/json" 19 + "fmt" 20 + "log/slog" 21 + "time" 22 + 23 + "tangled.org/core/api/tangled" 24 + ) 25 + 26 + // Fake-job timing knobs. Pulled out as constants so it's obvious 27 + // where the numbers come from and they can be tuned independently of 28 + // the rest of the file. Total publishes per workflow = 29 + // (fakeJobDuration / fakeJobInterval) heartbeats + 1 final success. 30 + const ( 31 + fakeJobDuration = 30 * time.Second 32 + fakeJobInterval = 5 * time.Second 33 + ) 34 + 35 + // fakeProvider implements Provider against the in-process broker. 36 + type fakeProvider struct { 37 + br *broker 38 + log *slog.Logger 39 + } 40 + 41 + // Compile-time interface check — keeps the fake honest if Provider 42 + // ever gains additional methods. 43 + var _ Provider = (*fakeProvider)(nil) 44 + 45 + // newFakeProvider constructs a fakeProvider bound to br. The provided 46 + // logger is annotated with component=provider so its output stands 47 + // apart from the knot-consumer / jetstream noise. 48 + func newFakeProvider(br *broker, log *slog.Logger) *fakeProvider { 49 + return &fakeProvider{ 50 + br: br, 51 + log: log.With("component", "provider", "kind", "fake"), 52 + } 53 + } 54 + 55 + // Spawn satisfies Provider. It kicks off one runWorkflow goroutine per 56 + // workflow, returning immediately so the eventconsumer worker that 57 + // invoked us isn't blocked. Goroutines inherit ctx (app-scoped) and 58 + // will exit early on cancellation. 59 + func (p *fakeProvider) Spawn( 60 + ctx context.Context, 61 + knot string, 62 + pipelineRkey string, 63 + workflows []*tangled.Pipeline_Workflow, 64 + ) { 65 + if len(workflows) == 0 { 66 + // Without a workflow name there's no valid pipeline.status 67 + // record to publish. Log loudly enough that an operator 68 + // staring at the logs can tell the trigger arrived but 69 + // produced no fake activity. 70 + p.log.Warn("pipeline has no workflows; skipping fake run", 71 + "knot", knot, "rkey", pipelineRkey, 72 + ) 73 + return 74 + } 75 + for _, wf := range workflows { 76 + // Defensive: the lexicon allows pointer entries and doesn't 77 + // enforce non-empty names. We can't publish a status for an 78 + // unnamed workflow, so just skip it. 79 + if wf == nil || wf.Name == "" { 80 + continue 81 + } 82 + go p.runWorkflow(ctx, knot, pipelineRkey, wf.Name) 83 + } 84 + } 85 + 86 + // runWorkflow emits a "running" status every fakeJobInterval until 87 + // fakeJobDuration elapses, then a final "success". On ctx 88 + // cancellation it returns without issuing the terminal publish — the 89 + // broker's underlying store may already be closing during shutdown. 90 + func (p *fakeProvider) runWorkflow(ctx context.Context, knot, pipelineRkey, workflow string) { 91 + // pipelineURI is what the appview parses out of the status record 92 + // to associate it back with the originating pipeline. Format 93 + // mirrors the upstream spindle's emission: 94 + // at://did:web:<knot>/<nsid>/<rkey>. The appview strips the 95 + // did:web: prefix and treats the remainder as the knot identifier. 96 + pipelineURI := fmt.Sprintf("at://did:web:%s/%s/%s", 97 + knot, tangled.PipelineNSID, pipelineRkey, 98 + ) 99 + 100 + logger := p.log.With( 101 + "knot", knot, 102 + "pipeline_rkey", pipelineRkey, 103 + "workflow", workflow, 104 + ) 105 + 106 + // Heartbeat phase. seq doubles as a per-workflow disambiguator 107 + // in the synthesized status rkey so concurrent fakes (across 108 + // workflows or pipelines) don't collide. 109 + deadline := time.Now().Add(fakeJobDuration) 110 + seq := 0 111 + for time.Now().Before(deadline) { 112 + if err := p.publishStatus(ctx, pipelineURI, workflow, "running", seq); err != nil { 113 + logger.Error("publish fake running status", "err", err, "seq", seq) 114 + return 115 + } 116 + seq++ 117 + select { 118 + case <-ctx.Done(): 119 + logger.Debug("fake job cancelled mid-run", "seq", seq) 120 + return 121 + case <-time.After(fakeJobInterval): 122 + } 123 + } 124 + 125 + // Terminal publish. "success" matches the upstream StatusKind 126 + // enum (see tangled.org/core/spindle/models) — the appview 127 + // routes status strings through that same enum. 128 + if err := p.publishStatus(ctx, pipelineURI, workflow, "success", seq); err != nil { 129 + logger.Error("publish fake success status", "err", err, "seq", seq) 130 + return 131 + } 132 + logger.Info("fake job complete") 133 + } 134 + 135 + // publishStatus assembles a tangled.PipelineStatus record, marshals 136 + // it, and pushes it through the broker. The synthesized rkey just 137 + // needs to be unique within our event log; the appview keys its rows 138 + // on (spindle, rkey) so we mix in time + workflow + sequence to avoid 139 + // collisions across concurrent workflows on the same pipeline. 140 + func (p *fakeProvider) publishStatus(ctx context.Context, pipelineURI, workflow, status string, seq int) error { 141 + rec := tangled.PipelineStatus{ 142 + LexiconTypeID: tangled.PipelineStatusNSID, 143 + Pipeline: pipelineURI, 144 + Workflow: workflow, 145 + Status: status, 146 + CreatedAt: time.Now().UTC().Format(time.RFC3339), 147 + } 148 + body, err := json.Marshal(rec) 149 + if err != nil { 150 + return fmt.Errorf("marshal pipeline.status: %w", err) 151 + } 152 + rkey := fmt.Sprintf("fake-%d-%s-%d", time.Now().UnixNano(), workflow, seq) 153 + if _, err := p.br.Publish(ctx, rkey, tangled.PipelineStatusNSID, body); err != nil { 154 + return fmt.Errorf("publish pipeline.status: %w", err) 155 + } 156 + return nil 157 + }