Stitch any CI into Tangled
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

fake jobs

+125 -4
+121 -2
knot.go
··· 29 29 "encoding/json" 30 30 "fmt" 31 31 "log/slog" 32 + "time" 32 33 33 34 "tangled.org/core/api/tangled" 34 35 "tangled.org/core/eventconsumer" ··· 73 74 type knotConsumer struct { 74 75 c *eventconsumer.Consumer 75 76 log *slog.Logger 77 + // br is how we publish synthesized sh.tangled.pipeline.status 78 + // records back out to /events subscribers. Today it's driven by 79 + // the fake-job stand-in in process(); once we hook up Buildkite, 80 + // the webhook handler will be the primary publisher. 81 + br *broker 76 82 } 77 83 78 84 // Compile-time interface conformance check. ··· 88 94 // restart is harmless. When we start translating triggers into real 89 95 // Buildkite builds, this should switch to a SQLite-backed cursor store 90 96 // to avoid duplicate builds. 91 - func startKnotConsumer(ctx context.Context, cfg config, st *store) (*knotConsumer, error) { 97 + func startKnotConsumer(ctx context.Context, cfg config, st *store, br *broker) (*knotConsumer, error) { 92 98 logger := loggerFrom(ctx).With("component", "knotconsumer") 93 99 94 100 knots, err := st.KnotsForSpindle(ctx, cfg.Hostname) ··· 96 102 return nil, fmt.Errorf("load known knots: %w", err) 97 103 } 98 104 99 - kc := &knotConsumer{log: logger} 105 + kc := &knotConsumer{log: logger, br: br} 100 106 101 107 ccfg := eventconsumer.NewConsumerConfig() 102 108 ccfg.Logger = logger ··· 194 200 "workflows", len(p.Workflows), 195 201 ) 196 202 203 + // Stand-in for the real Buildkite integration. Spawn one fake 204 + // job per workflow so the /events fan-out has something to 205 + // emit and the appview can show progress end to end. We hand 206 + // each goroutine the worker ctx (app-scoped) so they survive 207 + // process() returning but exit cleanly on shutdown. 208 + k.spawnFakeJobs(ctx, src.Key(), msg.Rkey, p.Workflows) 209 + 197 210 default: 198 211 // Knots may publish other record types over the same stream; we 199 212 // don't care about them yet. Debug-only so it's available when ··· 208 221 209 222 return nil 210 223 } 224 + 225 + // fakeJob constants. Pulled out so it's obvious where the timing 226 + // numbers come from, and trivially adjustable when we want to dial the 227 + // fake up or down. 228 + const ( 229 + // fakeJobDuration is the wall-clock length of a fake run. Total 230 + // publishes per workflow = (fakeJobDuration / fakeJobInterval) + 1 231 + // (one final "success"). 232 + fakeJobDuration = 30 * time.Second 233 + // fakeJobInterval is how often we emit a "running" heartbeat. 234 + fakeJobInterval = 5 * time.Second 235 + ) 236 + 237 + // spawnFakeJobs starts a goroutine per workflow. They each emit a 238 + // stream of sh.tangled.pipeline.status records via the broker until 239 + // either the fake duration elapses (success) or ctx is cancelled. 240 + // 241 + // This is a deliberate stand-in: it lets us validate the entire 242 + // jetstream → knot → broker → /events → appview pipeline before the 243 + // real Buildkite plumbing is in place. 244 + func (k *knotConsumer) spawnFakeJobs(ctx context.Context, knot, pipelineRkey string, workflows []*tangled.Pipeline_Workflow) { 245 + if len(workflows) == 0 { 246 + // Nothing to fake — without a workflow name there's no valid 247 + // pipeline.status record to publish. 248 + k.log.Warn("pipeline has no workflows; skipping fake run", 249 + "knot", knot, "rkey", pipelineRkey, 250 + ) 251 + return 252 + } 253 + for _, wf := range workflows { 254 + if wf == nil || wf.Name == "" { 255 + continue 256 + } 257 + go k.runFakeJob(ctx, knot, pipelineRkey, wf.Name) 258 + } 259 + } 260 + 261 + // runFakeJob emits a "running" status every fakeJobInterval for 262 + // fakeJobDuration, then a final "success". It returns early if ctx is 263 + // cancelled (shutdown) — without doing a final publish, since we'd be 264 + // writing to a broker whose store may be closing. 265 + func (k *knotConsumer) runFakeJob(ctx context.Context, knot, pipelineRkey, workflow string) { 266 + // pipelineURI is what the appview parses out of the status record 267 + // to associate it with the originating pipeline. Format mirrors 268 + // what the upstream spindle emits: at://did:web:<knot>/<nsid>/<rkey> 269 + // — the appview strips the did:web: prefix and uses the hostname 270 + // as the knot identifier. 271 + pipelineURI := fmt.Sprintf("at://did:web:%s/%s/%s", 272 + knot, tangled.PipelineNSID, pipelineRkey, 273 + ) 274 + 275 + logger := k.log.With( 276 + "knot", knot, 277 + "pipeline_rkey", pipelineRkey, 278 + "workflow", workflow, 279 + ) 280 + 281 + // Heartbeat phase. seq doubles as a per-workflow disambiguator in 282 + // the synthesized status rkey so multiple fakes don't collide. 283 + deadline := time.Now().Add(fakeJobDuration) 284 + seq := 0 285 + for time.Now().Before(deadline) { 286 + if err := k.publishStatus(ctx, pipelineURI, workflow, "running", seq); err != nil { 287 + logger.Error("publish fake running status", "err", err, "seq", seq) 288 + return 289 + } 290 + seq++ 291 + select { 292 + case <-ctx.Done(): 293 + logger.Debug("fake job cancelled mid-run", "seq", seq) 294 + return 295 + case <-time.After(fakeJobInterval): 296 + } 297 + } 298 + 299 + // Terminal status. Marked as "success" using the upstream 300 + // StatusKind enum's success label (see tangled.org/core/spindle/models). 301 + if err := k.publishStatus(ctx, pipelineURI, workflow, "success", seq); err != nil { 302 + logger.Error("publish fake success status", "err", err, "seq", seq) 303 + return 304 + } 305 + logger.Info("fake job complete") 306 + } 307 + 308 + // publishStatus assembles a tangled.PipelineStatus, marshals it, and 309 + // hands it to the broker for persistence + fan-out. The rkey we mint 310 + // is purely synthetic — it just needs to be unique across our event 311 + // log; the appview keys its rows on (spindle, rkey). 312 + func (k *knotConsumer) publishStatus(ctx context.Context, pipelineURI, workflow, status string, seq int) error { 313 + rec := tangled.PipelineStatus{ 314 + LexiconTypeID: tangled.PipelineStatusNSID, 315 + Pipeline: pipelineURI, 316 + Workflow: workflow, 317 + Status: status, 318 + CreatedAt: time.Now().UTC().Format(time.RFC3339), 319 + } 320 + body, err := json.Marshal(rec) 321 + if err != nil { 322 + return fmt.Errorf("marshal pipeline.status: %w", err) 323 + } 324 + rkey := fmt.Sprintf("fake-%d-%s-%d", time.Now().UnixNano(), workflow, seq) 325 + if _, err := k.br.Publish(ctx, rkey, tangled.PipelineStatusNSID, body); err != nil { 326 + return fmt.Errorf("publish pipeline.status: %w", err) 327 + } 328 + return nil 329 + }
+4 -2
main.go
··· 115 115 br := newBroker(st) 116 116 117 117 // Start the knot event-stream consumer first so the jetstream 118 - // loop has somewhere to register newly-observed knots into. 119 - knots, err := startKnotConsumer(ctx, cfg, st) 118 + // loop has somewhere to register newly-observed knots into. It 119 + // gets the broker so its (currently fake) pipeline runner can 120 + // publish sh.tangled.pipeline.status events back out via /events. 121 + knots, err := startKnotConsumer(ctx, cfg, st, br) 120 122 if err != nil { 121 123 logger.Error("failed to start knot consumer", "err", err) 122 124 os.Exit(1)