···5151//
5252// The logger is pulled from ctx (see log.go); falls back to slog.Default()
5353// if none is attached.
5454-func startJetstream(ctx context.Context, cfg config, st *store) error {
5454+func startJetstream(ctx context.Context, cfg config, st *store, knots KnotConsumer) error {
5555 logger := loggerFrom(ctx).With("component", "jetstream")
56565757 // `wantedCollections` is a server-side filter: jetstream will only send
···6969 clientCfg.WebsocketURL = cfg.JetstreamURL
7070 clientCfg.WantedCollections = collections
71717272- // The handler closes over `st` and `logger` so the scheduler signature
7373- // stays plain `func(ctx, *Event) error` — no need for a method
7474- // receiver or a global.
7272+ // The handler closes over `st`, `knots` and the spindle hostname so
7373+ // the scheduler signature stays plain `func(ctx, *Event) error` and
7474+ // applyCommit can hand the knot consumer new sources as soon as
7575+ // matching repo records arrive.
7576 handler := func(ctx context.Context, evt *jsmodels.Event) error {
7676- return handleJetstreamEvent(ctx, st, evt)
7777+ return handleJetstreamEvent(ctx, st, knots, cfg.Hostname, evt)
7778 }
78797980 // Re-attach the component-scoped logger so handler — which the
···136137// applies the event to the store and advances the persisted cursor. Any
137138// returned error is logged by the scheduler but does not tear down the
138139// connection — the next event will retry the cursor write implicitly.
139139-func handleJetstreamEvent(ctx context.Context, st *store, evt *jsmodels.Event) error {
140140+func handleJetstreamEvent(ctx context.Context, st *store, knots KnotConsumer, hostname string, evt *jsmodels.Event) error {
140141 // We only care about commits, which are the actual record CRUD
141142 // operations on a user's PDS. Account/identity events are ignored
142143 // for now; if we ever care about handle changes we can add them.
···148149 // Dispatch on collection. Unknown collections shouldn't happen given
149150 // our wantedCollections filter, but be defensive — jetstream may
150151 // send schema changes ahead of us updating the filter.
151151- if err := applyCommit(ctx, st, evt); err != nil {
152152+ if err := applyCommit(ctx, st, knots, hostname, evt); err != nil {
152153 logger.Error("apply commit",
153154 "err", err,
154155 "did", evt.Did,
···175176176177// applyCommit routes a commit to the right store mutation based on its
177178// collection NSID and operation.
178178-func applyCommit(ctx context.Context, st *store, evt *jsmodels.Event) error {
179179+func applyCommit(ctx context.Context, st *store, knots KnotConsumer, hostname string, evt *jsmodels.Event) error {
179180 c := evt.Commit
180181 switch c.Collection {
181182 case tangled.SpindleMemberNSID:
182183 return applySpindleMember(ctx, st, evt.Did, c)
183184 case tangled.RepoNSID:
184184- return applyRepo(ctx, st, evt.Did, c)
185185+ return applyRepo(ctx, st, knots, hostname, evt.Did, c)
185186 case tangled.RepoCollaboratorNSID:
186187 return applyRepoCollaborator(ctx, st, evt.Did, c)
187188 default:
···207208 return nil
208209}
209210210210-func applyRepo(ctx context.Context, st *store, did string, c *jsmodels.Commit) error {
211211+func applyRepo(ctx context.Context, st *store, knots KnotConsumer, hostname string, did string, c *jsmodels.Commit) error {
211212 switch c.Operation {
212213 case jsOpCreate, jsOpUpdate:
213214 var rec tangled.Repo
214215 if err := json.Unmarshal(c.Record, &rec); err != nil {
215216 return fmt.Errorf("decode repo: %w", err)
216217 }
217217- return st.UpsertRepo(ctx, did, c.RKey,
218218+ if err := st.UpsertRepo(ctx, did, c.RKey,
218219 rec.Knot, rec.Name,
219220 deref(rec.Spindle), deref(rec.RepoDid),
220221 rec.CreatedAt,
221221- )
222222+ ); err != nil {
223223+ return err
224224+ }
225225+226226+ // If this repo just declared us as its spindle, start (or
227227+ // continue) listening to its knot for pipeline triggers. The
228228+ // knot consumer dedupes on its own so this is safe to call
229229+ // even on update events that don't change the spindle field.
230230+ if knots != nil && rec.Spindle != nil && *rec.Spindle == hostname && rec.Knot != "" {
231231+ knots.AddKnot(ctx, rec.Knot)
232232+ }
233233+234234+ return nil
222235 case jsOpDelete:
236236+ // We don't unsubscribe from the knot here: other repos may
237237+ // still want us to watch it. A periodic reconciliation pass
238238+ // (not yet implemented) is the right place to drop unused
239239+ // subscriptions.
223240 return st.DeleteRepo(ctx, did, c.RKey)
224241 }
225242 return nil
+62-8
jetstream_test.go
···6666 TimeUS: 100,
6767 Kind: jsmodels.EventKindAccount,
6868 }
6969- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
6969+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
7070 t.Fatalf("handle: %v", err)
7171 }
7272 got, err := s.LoadCursor(ctx)
···9090 CreatedAt: "2026-01-01T00:00:00Z",
9191 }
9292 evt := commitEvent(12345, "did:plc:owner", tangled.SpindleMemberNSID, jsOpCreate, "rk1", rec)
9393- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
9393+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
9494 t.Fatalf("handle: %v", err)
9595 }
9696···118118 t.Fatalf("seed: %v", err)
119119 }
120120 evt := commitEvent(99, "did:plc:owner", tangled.SpindleMemberNSID, jsOpDelete, "rk1", nil)
121121- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
121121+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
122122 t.Fatalf("handle: %v", err)
123123 }
124124 if n := countRows(t, s, "spindle_members"); n != 0 {
···144144 CreatedAt: "2026-01-01T00:00:00Z",
145145 }
146146 evt := commitEvent(7, "did:plc:owner", tangled.RepoNSID, jsOpCreate, "repo1", rec)
147147- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
147147+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
148148 t.Fatalf("handle: %v", err)
149149 }
150150···168168 CreatedAt: "2026-01-01T00:00:00Z",
169169 }
170170 evt2 := commitEvent(8, "did:plc:owner", tangled.RepoNSID, jsOpCreate, "repo2", rec2)
171171- if err := handleJetstreamEvent(ctx, s, evt2); err != nil {
171171+ if err := handleJetstreamEvent(ctx, s, nil, "", evt2); err != nil {
172172 t.Fatalf("handle nil-optionals: %v", err)
173173 }
174174 err = s.db.QueryRowContext(ctx,
···197197 CreatedAt: "2026-01-01T00:00:00Z",
198198 }
199199 evt := commitEvent(55, "did:plc:owner", tangled.RepoCollaboratorNSID, jsOpCreate, "c1", rec)
200200- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
200200+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
201201 t.Fatalf("handle: %v", err)
202202 }
203203···223223 ctx := context.Background()
224224225225 evt := commitEvent(42, "did:plc:owner", "app.bsky.feed.post", jsOpCreate, "rk", map[string]string{"text": "hi"})
226226- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
226226+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
227227 t.Fatalf("handle: %v", err)
228228 }
229229 requireCursor(t, s, 42)
···248248 Record: json.RawMessage(`{not valid json`),
249249 },
250250 }
251251- if err := handleJetstreamEvent(ctx, s, evt); err != nil {
251251+ if err := handleJetstreamEvent(ctx, s, nil, "", evt); err != nil {
252252 t.Fatalf("handle should swallow decode error, got: %v", err)
253253 }
254254 if n := countRows(t, s, "spindle_members"); n != 0 {
···256256 }
257257 requireCursor(t, s, 1000)
258258}
259259+260260+// TestRepoEventSubscribesKnotForOurSpindle confirms that observing a
261261+// sh.tangled.repo whose .spindle field equals our hostname results in a
262262+// dynamic AddKnot call. This is the hot path for picking up new repos
263263+// without a tack restart.
264264+func TestRepoEventSubscribesKnotForOurSpindle(t *testing.T) {
265265+ s := newTestStore(t)
266266+ ctx := context.Background()
267267+268268+ const ours = "tack.example"
269269+ spindle := ours
270270+ rec := tangled.Repo{
271271+ Knot: "knot.example",
272272+ Name: "myrepo",
273273+ Spindle: &spindle,
274274+ CreatedAt: "2026-01-01T00:00:00Z",
275275+ }
276276+ evt := commitEvent(1, "did:plc:owner", tangled.RepoNSID, jsOpCreate, "rk", rec)
277277+278278+ fake := &fakeKnotConsumer{}
279279+ if err := handleJetstreamEvent(ctx, s, fake, ours, evt); err != nil {
280280+ t.Fatalf("handle: %v", err)
281281+ }
282282+ added := fake.Added()
283283+ if len(added) != 1 || added[0] != "knot.example" {
284284+ t.Fatalf("AddKnot calls = %v, want [knot.example]", added)
285285+ }
286286+}
287287+288288+// TestRepoEventIgnoresKnotForOtherSpindle confirms repos pointing at a
289289+// *different* spindle do not pull us into watching their knot. Without
290290+// this guard, tack would dial every knot named in any sh.tangled.repo
291291+// it sees over the firehose, which is most of them.
292292+func TestRepoEventIgnoresKnotForOtherSpindle(t *testing.T) {
293293+ s := newTestStore(t)
294294+ ctx := context.Background()
295295+296296+ other := "other-spindle.example"
297297+ rec := tangled.Repo{
298298+ Knot: "knot.example",
299299+ Name: "myrepo",
300300+ Spindle: &other,
301301+ CreatedAt: "2026-01-01T00:00:00Z",
302302+ }
303303+ evt := commitEvent(1, "did:plc:owner", tangled.RepoNSID, jsOpCreate, "rk", rec)
304304+305305+ fake := &fakeKnotConsumer{}
306306+ if err := handleJetstreamEvent(ctx, s, fake, "tack.example", evt); err != nil {
307307+ t.Fatalf("handle: %v", err)
308308+ }
309309+ if added := fake.Added(); len(added) != 0 {
310310+ t.Fatalf("AddKnot calls = %v, want none", added)
311311+ }
312312+}
+187
knot.go
···11+package main
22+33+// Knot event-stream subscriber.
44+//
55+// Tangled knot servers expose a websocket at ws[s]://<knot>/events that
66+// streams JSON-wrapped record events for that knot, including the
77+// sh.tangled.pipeline trigger records that drive CI. Pipeline triggers do
88+// *not* come over the AT Proto firehose (jetstream); the knot publishes
99+// them itself, so as a spindle we have to dial each knot whose repos have
1010+// pointed at us.
1111+//
1212+// We use tangled-core's `eventconsumer` package, which already handles
1313+// per-source connection management, retries, ordered processing and
1414+// cursor tracking. We hand it:
1515+//
1616+// 1. The initial set of knots, derived from previously-observed
1717+// sh.tangled.repo records that named us as their .spindle field.
1818+// 2. A ProcessFunc that, for now, simply logs every received event.
1919+// Once the build pipeline is wired up this is where pipeline
2020+// triggers will be translated into Buildkite builds.
2121+//
2222+// The jetstream consumer also gets a back-reference (via the knotAdder
2323+// interface) so it can dynamically subscribe to a new knot the moment a
2424+// matching sh.tangled.repo record arrives, without waiting for a tack
2525+// restart.
2626+2727+import (
2828+ "context"
2929+ "encoding/json"
3030+ "fmt"
3131+ "log/slog"
3232+3333+ "tangled.org/core/api/tangled"
3434+ "tangled.org/core/eventconsumer"
3535+)
3636+3737+// KnotConsumer is the surface area the rest of tack uses to interact
3838+// with the knot event-stream subscriber. It exists so we can test
3939+// knot interactions from tests.
4040+//
4141+// Implementations must be safe for concurrent use: AddKnot is invoked
4242+// from the jetstream goroutine while the consumer's worker goroutines
4343+// are independently processing inbound knot events.
4444+//
4545+// The fake at knot_fake.go provides a no-network implementation suitable
4646+// for use from tests.
4747+type KnotConsumer interface {
4848+ // AddKnot subscribes to the given knot's /events websocket if not
4949+ // already subscribed. Calling with the same knot more than once is
5050+ // a no-op. An empty knot string is ignored. The supplied context
5151+ // scopes the dial; cancelling it tears the subscription down.
5252+ AddKnot(ctx context.Context, knot string)
5353+}
5454+5555+// knotConsumer is the production KnotConsumer. It wraps
5656+// eventconsumer.Consumer with the small surface the rest of tack actually
5757+// wants: AddKnot for dynamic subscription, plus a Stop lifecycle hook
5858+// owned by main.
5959+//
6060+// Wrapping (instead of exposing *eventconsumer.Consumer directly) keeps
6161+// callers from importing eventconsumer just to construct a KnotSource,
6262+// and lets us swap or extend the underlying transport later.
6363+type knotConsumer struct {
6464+ c *eventconsumer.Consumer
6565+ log *slog.Logger
6666+}
6767+6868+// Compile-time interface conformance check.
6969+var _ KnotConsumer = (*knotConsumer)(nil)
7070+7171+// startKnotConsumer builds a knot event consumer pre-loaded with every
7272+// knot already known to the store, starts its connection loops in the
7373+// background, and returns the wrapper. The consumer keeps running until
7474+// ctx is cancelled.
7575+//
7676+// Cursor persistence is intentionally in-memory for now: we only log
7777+// events, so re-receiving a few seconds of pipeline triggers after a
7878+// restart is harmless. When we start translating triggers into real
7979+// Buildkite builds, this should switch to a SQLite-backed cursor store
8080+// to avoid duplicate builds.
8181+func startKnotConsumer(ctx context.Context, cfg config, st *store) (*knotConsumer, error) {
8282+ logger := loggerFrom(ctx).With("component", "knotconsumer")
8383+8484+ knots, err := st.KnotsForSpindle(ctx, cfg.Hostname)
8585+ if err != nil {
8686+ return nil, fmt.Errorf("load known knots: %w", err)
8787+ }
8888+8989+ kc := &knotConsumer{log: logger}
9090+9191+ ccfg := eventconsumer.NewConsumerConfig()
9292+ ccfg.Logger = logger
9393+ ccfg.Dev = cfg.Dev
9494+ ccfg.ProcessFunc = kc.process
9595+ for _, k := range knots {
9696+ ccfg.Sources[eventconsumer.NewKnotSource(k)] = struct{}{}
9797+ logger.Info("seeding knot source", "knot", k)
9898+ }
9999+ kc.c = eventconsumer.NewConsumer(*ccfg)
100100+101101+ // Start workers + per-source connection loops. Consumer.Start is
102102+ // non-blocking; the goroutines it spawns observe ctx for shutdown.
103103+ kc.c.Start(ctx)
104104+ logger.Info("knot consumer started", "initial_knots", len(knots))
105105+106106+ return kc, nil
107107+}
108108+109109+// AddKnot subscribes to a knot we hadn't been watching before. Safe to
110110+// call repeatedly: eventconsumer.Consumer.AddSource deduplicates by the
111111+// source's Key (the knot hostname), so passing the same knot twice is a
112112+// no-op.
113113+func (k *knotConsumer) AddKnot(ctx context.Context, knot string) {
114114+ if knot == "" {
115115+ return
116116+ }
117117+ k.log.Info("adding knot source", "knot", knot)
118118+ k.c.AddSource(ctx, eventconsumer.NewKnotSource(knot))
119119+}
120120+121121+// Stop tears down all knot websocket connections and waits for the
122122+// consumer's goroutines to exit. It must be called exactly once.
123123+func (k *knotConsumer) Stop() {
124124+ k.c.Stop()
125125+}
126126+127127+// process is the ProcessFunc handed to eventconsumer. It runs once per
128128+// inbound message, on a worker goroutine. For now we only care about
129129+// pipeline records — everything else is logged at debug and dropped.
130130+//
131131+// Returning an error only logs it (the consumer keeps reading); the
132132+// cursor is advanced before the ProcessFunc runs, so a returned error
133133+// does *not* cause a replay.
134134+func (k *knotConsumer) process(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
135135+ switch msg.Nsid {
136136+ case tangled.PipelineNSID:
137137+ var p tangled.Pipeline
138138+ if err := json.Unmarshal(msg.EventJson, &p); err != nil {
139139+ k.log.Error("decode pipeline",
140140+ "err", err,
141141+ "knot", src.Key(),
142142+ "rkey", msg.Rkey,
143143+ )
144144+ return err
145145+ }
146146+147147+ // Pull a couple of fields out of the trigger metadata for log
148148+ // context. They're all optional in the schema, so each one is
149149+ // guarded — we want a noisy log entry, not a nil-deref.
150150+ var (
151151+ triggerKind string
152152+ repoDid string
153153+ repoName string
154154+ )
155155+ if p.TriggerMetadata != nil {
156156+ triggerKind = p.TriggerMetadata.Kind
157157+ if p.TriggerMetadata.Repo != nil {
158158+ repoDid = p.TriggerMetadata.Repo.Did
159159+ if p.TriggerMetadata.Repo.Repo != nil {
160160+ repoName = *p.TriggerMetadata.Repo.Repo
161161+ }
162162+ }
163163+ }
164164+165165+ k.log.Info("pipeline event",
166166+ "knot", src.Key(),
167167+ "rkey", msg.Rkey,
168168+ "trigger", triggerKind,
169169+ "repo_did", repoDid,
170170+ "repo", repoName,
171171+ "workflows", len(p.Workflows),
172172+ )
173173+174174+ default:
175175+ // Knots may publish other record types over the same stream; we
176176+ // don't care about them yet. Debug-only so it's available when
177177+ // chasing "why isn't my pipeline firing" but doesn't drown out
178178+ // info-level logs.
179179+ k.log.Debug("ignored knot event",
180180+ "knot", src.Key(),
181181+ "nsid", msg.Nsid,
182182+ "rkey", msg.Rkey,
183183+ )
184184+ }
185185+186186+ return nil
187187+}
+44
knot_fake.go
···11+package main
22+33+// Test fake for KnotConsumer.
44+//
55+// Lives in a non-_test.go file so it can be referenced from tests across
66+// any future test files (and, if we ever split tack into subpackages, can
77+// be promoted to an exported helper without moving code around).
88+//
99+// It does no I/O: AddKnot just records the knot it was handed so tests
1010+// can assert on the side effect.
1111+1212+import (
1313+ "context"
1414+ "sync"
1515+)
1616+1717+// fakeKnotConsumer is an in-memory KnotConsumer suitable for tests. The
1818+// zero value is ready to use; concurrent calls to AddKnot are safe.
1919+type fakeKnotConsumer struct {
2020+ mu sync.Mutex
2121+ added []string
2222+}
2323+2424+// Compile-time interface conformance check — keeps the fake honest if
2525+// the KnotConsumer surface ever grows a new method.
2626+var _ KnotConsumer = (*fakeKnotConsumer)(nil)
2727+2828+// AddKnot records the knot for later inspection via Added().
2929+func (f *fakeKnotConsumer) AddKnot(_ context.Context, knot string) {
3030+ f.mu.Lock()
3131+ defer f.mu.Unlock()
3232+ f.added = append(f.added, knot)
3333+}
3434+3535+// Added returns a copy of the knots passed to AddKnot, in call order.
3636+// A copy is returned so callers can't accidentally mutate the fake's
3737+// internal slice while comparing.
3838+func (f *fakeKnotConsumer) Added() []string {
3939+ f.mu.Lock()
4040+ defer f.mu.Unlock()
4141+ out := make([]string, len(f.added))
4242+ copy(out, f.added)
4343+ return out
4444+}
+29-2
main.go
···2121// spindle without surprises.
2222type config struct {
2323 Addr string
2424+ Hostname string
2425 OwnerDID string
2526 JetstreamURL string
2627 DBPath string
2828+ // Dev flips the knot event-stream scheme from wss:// to ws://.
2929+ // Useful when running against a local knot during development.
3030+ Dev bool
2731}
28322933func loadConfig() (config, error) {
3034 cfg := config{
3135 Addr: envOr("TACK_LISTEN_ADDR", ":8080"),
3636+ Hostname: os.Getenv("TACK_HOSTNAME"),
3237 OwnerDID: os.Getenv("TACK_OWNER_DID"),
3338 JetstreamURL: envOr("TACK_JETSTREAM_URL", "wss://jetstream1.us-west.bsky.network/subscribe"),
3439 DBPath: envOr("TACK_DB_PATH", "tack.db"),
4040+ Dev: os.Getenv("TACK_DEV") != "",
3541 }
3642 addrFlag := flag.String("addr", cfg.Addr, "HTTP listen address (overrides TACK_LISTEN_ADDR)")
3743 flag.Parse()
···4046 if cfg.OwnerDID == "" {
4147 return cfg, errors.New("TACK_OWNER_DID is required")
4248 }
4949+5050+ // Hostname identifies *us* in sh.tangled.repo records (the .spindle
5151+ // field). Without it we have no way to know which repos point at us
5252+ // and therefore which knots we should subscribe to for pipeline
5353+ // triggers — so we refuse to start rather than silently subscribe to
5454+ // nothing.
5555+ if cfg.Hostname == "" {
5656+ return cfg, errors.New("TACK_HOSTNAME is required")
5757+ }
5858+4359 return cfg, nil
4460}
4561···92108 }()
93109 logger.Info("store open", "path", cfg.DBPath)
941109595- // Start the JetStream listener in the background.
9696- if err := startJetstream(ctx, cfg, st); err != nil {
111111+ // Start the knot event-stream consumer first so the jetstream
112112+ // loop has somewhere to register newly-observed knots into.
113113+ knots, err := startKnotConsumer(ctx, cfg, st)
114114+ if err != nil {
115115+ logger.Error("failed to start knot consumer", "err", err)
116116+ os.Exit(1)
117117+ }
118118+ defer knots.Stop()
119119+120120+ // Start the JetStream listener in the background. It hands the knot
121121+ // consumer any new knot referenced by an incoming sh.tangled.repo
122122+ // record so we don't have to wait for a restart to pick it up.
123123+ if err := startJetstream(ctx, cfg, st, knots); err != nil {
97124 logger.Error("failed to start jetstream consumer", "err", err)
98125 os.Exit(1)
99126 }
+30
store.go
···194194 return nil
195195}
196196197197+// KnotsForSpindle returns the distinct knot hostnames of all repos that
198198+// have declared the given spindle hostname as their CI spindle. The knot
199199+// event-stream subscriber uses this to decide which knots to dial.
200200+//
201201+// Returns an empty slice (not nil) when nothing matches, so callers can
202202+// range over the result without a nil check.
203203+func (s *store) KnotsForSpindle(ctx context.Context, hostname string) ([]string, error) {
204204+ rows, err := s.db.QueryContext(ctx,
205205+ `SELECT DISTINCT knot FROM repos WHERE spindle = ? AND knot <> ''`,
206206+ hostname,
207207+ )
208208+ if err != nil {
209209+ return nil, fmt.Errorf("query knots: %w", err)
210210+ }
211211+ defer rows.Close()
212212+213213+ out := []string{}
214214+ for rows.Next() {
215215+ var k string
216216+ if err := rows.Scan(&k); err != nil {
217217+ return nil, fmt.Errorf("scan knot: %w", err)
218218+ }
219219+ out = append(out, k)
220220+ }
221221+ if err := rows.Err(); err != nil {
222222+ return nil, fmt.Errorf("iterate knots: %w", err)
223223+ }
224224+ return out, nil
225225+}
226226+197227// DeleteRepoCollaborator removes a collaborator record by its ATProto
198228// identity.
199229func (s *store) DeleteRepoCollaborator(ctx context.Context, did, rkey string) error {
+45
store_test.go
···239239 }
240240}
241241242242+// TestKnotsForSpindle verifies the query returns only knots from repos
243243+// whose .spindle field matches the given hostname, and that duplicate
244244+// knots collapse to a single entry.
245245+func TestKnotsForSpindle(t *testing.T) {
246246+ s := newTestStore(t)
247247+ ctx := context.Background()
248248+249249+ const ours = "tack.example"
250250+ const other = "other.example"
251251+252252+ // Two repos on the same knot pointing at us — should collapse to 1.
253253+ if err := s.UpsertRepo(ctx, "did:plc:a", "rk1", "knot1.example", "repo-a", ours, "", "t"); err != nil {
254254+ t.Fatal(err)
255255+ }
256256+ if err := s.UpsertRepo(ctx, "did:plc:b", "rk2", "knot1.example", "repo-b", ours, "", "t"); err != nil {
257257+ t.Fatal(err)
258258+ }
259259+ // A second knot pointing at us.
260260+ if err := s.UpsertRepo(ctx, "did:plc:c", "rk3", "knot2.example", "repo-c", ours, "", "t"); err != nil {
261261+ t.Fatal(err)
262262+ }
263263+ // A repo pointing at a different spindle — must be excluded.
264264+ if err := s.UpsertRepo(ctx, "did:plc:d", "rk4", "knot3.example", "repo-d", other, "", "t"); err != nil {
265265+ t.Fatal(err)
266266+ }
267267+ // A repo with no spindle declared — must be excluded.
268268+ if err := s.UpsertRepo(ctx, "did:plc:e", "rk5", "knot4.example", "repo-e", "", "", "t"); err != nil {
269269+ t.Fatal(err)
270270+ }
271271+272272+ got, err := s.KnotsForSpindle(ctx, ours)
273273+ if err != nil {
274274+ t.Fatalf("KnotsForSpindle: %v", err)
275275+ }
276276+ want := map[string]struct{}{"knot1.example": {}, "knot2.example": {}}
277277+ if len(got) != len(want) {
278278+ t.Fatalf("got %v, want %v", got, want)
279279+ }
280280+ for _, k := range got {
281281+ if _, ok := want[k]; !ok {
282282+ t.Fatalf("unexpected knot %q in %v", k, got)
283283+ }
284284+ }
285285+}
286286+242287// countRows is a small SELECT COUNT(*) helper used by lifecycle tests
243288// to verify deletes actually removed the row. Table name is interpolated
244289// directly because callers pass a constant from the schema, not user