Stitch any CI into Tangled
107
fork

Configure Feed

Select the types of activity you want to include in your feed.

store: order latest buildkite build by monotonic int #7

open opened by mitchellh.com targeting main from push-rpsnoyxktrsu

LookupBuildkiteBuildByTuple sorted on created_at, an RFC3339Nano text column. Lexical comparison of nanosecond timestamps is not reliable: time.Format trims trailing zeros, so an instant on the exact second renders as '...:00Z' while one nanosecond later renders as '...:00.000000001Z' and lex-sorts before it. The practical effect was that /logs could resolve the wrong run for a workflow that had been triggered more than once.

Add a created_unix_ns INTEGER column to buildkite_builds, populate it from time.Now().UnixNano() on insert, and switch the lookup to ORDER BY created_unix_ns DESC with created_at and build_number as deterministic tiebreakers for legacy rows that pre-date the column.

The migration path is covered: an additive ALTER widens existing databases, and a one-shot Go-side backfill parses each row's created_at and writes the corresponding UnixNano. Rows whose text fails to parse are left at the default 0 so a single corrupt row cannot wedge startup. New tests in store_migrate_test.go open a hand-crafted pre-migration database through openStore and assert the upgrade is correct, idempotent, and tolerant of bad data.

Labels

None yet.

assignee

None yet.

Participants 1
AT URI
at://did:plc:onu3oqfahfubgbetlr4giknc/sh.tangled.repo.pull/3mktuvvg2q322
+447 -21
Diff #0
+26 -12
store.go
··· 352 352 // Buildkite-side rebuild that re-fires us) just refreshes the row 353 353 // instead of failing. 354 354 func (s *store) InsertBuildkiteBuild(ctx context.Context, ref BuildkiteBuildRef) error { 355 + // Capture wall-clock and monotonic-friendly forms once so the two 356 + // columns agree on the same instant. created_at is the 357 + // human-readable RFC3339Nano string; created_unix_ns is the 358 + // integer the lookup orders on (text comparison of nanosecond 359 + // timestamps isn't reliable, so we sort on the int instead). 360 + now := time.Now().UTC() 355 361 _, err := s.db.ExecContext(ctx, 356 362 `INSERT INTO buildkite_builds ( 357 363 build_uuid, build_number, pipeline_slug, org, 358 364 knot, pipeline_rkey, workflow, 359 - pipeline_uri, created_at 360 - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) 365 + pipeline_uri, created_at, created_unix_ns 366 + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 361 367 ON CONFLICT(build_uuid) DO UPDATE SET 362 - build_number = excluded.build_number, 363 - pipeline_slug = excluded.pipeline_slug, 364 - org = excluded.org, 365 - knot = excluded.knot, 366 - pipeline_rkey = excluded.pipeline_rkey, 367 - workflow = excluded.workflow, 368 - pipeline_uri = excluded.pipeline_uri, 369 - created_at = excluded.created_at`, 368 + build_number = excluded.build_number, 369 + pipeline_slug = excluded.pipeline_slug, 370 + org = excluded.org, 371 + knot = excluded.knot, 372 + pipeline_rkey = excluded.pipeline_rkey, 373 + workflow = excluded.workflow, 374 + pipeline_uri = excluded.pipeline_uri, 375 + created_at = excluded.created_at, 376 + created_unix_ns = excluded.created_unix_ns`, 370 377 ref.BuildUUID, ref.BuildNumber, ref.PipelineSlug, ref.Org, 371 378 ref.Knot, ref.PipelineRkey, ref.Workflow, 372 - ref.PipelineURI, time.Now().UTC().Format(time.RFC3339Nano), 379 + ref.PipelineURI, now.Format(time.RFC3339Nano), now.UnixNano(), 373 380 ) 374 381 if err != nil { 375 382 return fmt.Errorf("insert buildkite_build: %w", err) ··· 411 418 // over time (rebuilds, re-triggers). We always serve logs for the 412 419 // latest run; older runs are still queryable by build UUID directly 413 420 // if anyone ever wants that. 421 + // 422 + // Ordering is on created_unix_ns (a monotonic int) rather than 423 + // created_at. Text comparison of RFC3339Nano timestamps is not 424 + // reliable across nanosecond precision, which used to make this 425 + // query occasionally pick the wrong run. created_at and build_number 426 + // are kept as deterministic tiebreakers for legacy rows that pre-date 427 + // the new column and still scan as 0. 414 428 func (s *store) LookupBuildkiteBuildByTuple(ctx context.Context, knot, pipelineRkey, workflow string) (*BuildkiteBuildRef, error) { 415 429 var ref BuildkiteBuildRef 416 430 err := s.db.QueryRowContext(ctx, ··· 418 432 knot, pipeline_rkey, workflow, pipeline_uri 419 433 FROM buildkite_builds 420 434 WHERE knot = ? AND pipeline_rkey = ? AND workflow = ? 421 - ORDER BY created_at DESC 435 + ORDER BY created_unix_ns DESC, created_at DESC, build_number DESC 422 436 LIMIT 1`, 423 437 knot, pipelineRkey, workflow, 424 438 ).Scan(
+85 -9
store_migrate.go
··· 8 8 "context" 9 9 "fmt" 10 10 "strings" 11 + "time" 11 12 ) 12 13 13 14 // schema is the full set of CREATE statements applied at startup. It is ··· 100 101 -- string means "use the provider's defaultOrg" — that's both the 101 102 -- usual single-org case and what every row written before this 102 103 -- column existed will scan as. 104 + -- created_unix_ns is the monotonic recency key. created_at is kept 105 + -- (RFC3339Nano text) for human-readable inspection, but it must NOT 106 + -- be used for ordering: text comparison of nanosecond timestamps is 107 + -- not reliable, which used to make /logs occasionally resolve the 108 + -- wrong run. created_unix_ns is the integer the latest-build lookup 109 + -- sorts on instead. 103 110 CREATE TABLE IF NOT EXISTS buildkite_builds ( 104 - build_uuid TEXT PRIMARY KEY, 105 - build_number INTEGER NOT NULL, 106 - pipeline_slug TEXT NOT NULL, 107 - org TEXT NOT NULL DEFAULT '', 108 - knot TEXT NOT NULL, 109 - pipeline_rkey TEXT NOT NULL, 110 - workflow TEXT NOT NULL, 111 - pipeline_uri TEXT NOT NULL, 112 - created_at TEXT NOT NULL 111 + build_uuid TEXT PRIMARY KEY, 112 + build_number INTEGER NOT NULL, 113 + pipeline_slug TEXT NOT NULL, 114 + org TEXT NOT NULL DEFAULT '', 115 + knot TEXT NOT NULL, 116 + pipeline_rkey TEXT NOT NULL, 117 + workflow TEXT NOT NULL, 118 + pipeline_uri TEXT NOT NULL, 119 + created_at TEXT NOT NULL, 120 + created_unix_ns INTEGER NOT NULL DEFAULT 0 113 121 ); 114 122 CREATE INDEX IF NOT EXISTS buildkite_builds_lookup 115 123 ON buildkite_builds (knot, pipeline_rkey, workflow); ··· 133 141 // chose. Pre-existing rows scan as empty string, which 134 142 // the provider treats as "use defaultOrg". 135 143 `ALTER TABLE buildkite_builds ADD COLUMN org TEXT NOT NULL DEFAULT ''`, 144 + 145 + // Monotonic integer ordering key for buildkite_builds. 146 + // Replaces ORDER BY created_at (RFC3339Nano text), whose 147 + // lexical order isn't reliable across nanosecond precision 148 + // and could make /logs resolve the wrong run. Default 0 149 + // covers pre-existing rows; the backfill below promotes 150 + // them to their parsed timestamp so ordering stays stable 151 + // across the upgrade. 152 + `ALTER TABLE buildkite_builds ADD COLUMN created_unix_ns INTEGER NOT NULL DEFAULT 0`, 136 153 } { 137 154 if _, err := s.db.ExecContext(ctx, alter); err != nil { 138 155 if strings.Contains(err.Error(), "duplicate column name") { ··· 141 158 return fmt.Errorf("apply alter %q: %w", alter, err) 142 159 } 143 160 } 161 + 162 + if err := s.backfillBuildkiteCreatedUnixNS(ctx); err != nil { 163 + return fmt.Errorf("backfill buildkite created_unix_ns: %w", err) 164 + } 165 + return nil 166 + } 167 + 168 + // backfillBuildkiteCreatedUnixNS walks every buildkite_builds row whose 169 + // created_unix_ns is still the post-ALTER default (0) and sets it from 170 + // the RFC3339Nano text in created_at. SQLite has no native nanosecond 171 + // parser, so the conversion has to happen in Go. 172 + // 173 + // Rows whose created_at can't be parsed are left at 0; that keeps a 174 + // single corrupt row from blocking startup, and ordering between two 175 + // 0-keyed rows still falls through to created_at as a deterministic 176 + // tiebreaker in the lookup query. 177 + func (s *store) backfillBuildkiteCreatedUnixNS(ctx context.Context) error { 178 + rows, err := s.db.QueryContext(ctx, 179 + `SELECT build_uuid, created_at FROM buildkite_builds 180 + WHERE created_unix_ns = 0`, 181 + ) 182 + if err != nil { 183 + return fmt.Errorf("query rows to backfill: %w", err) 184 + } 185 + type pending struct { 186 + uuid string 187 + ns int64 188 + } 189 + var todo []pending 190 + for rows.Next() { 191 + var uuid, createdAt string 192 + if err := rows.Scan(&uuid, &createdAt); err != nil { 193 + rows.Close() 194 + return fmt.Errorf("scan row: %w", err) 195 + } 196 + t, perr := time.Parse(time.RFC3339Nano, createdAt) 197 + if perr != nil { 198 + // Skip unparseable rows rather than failing the whole 199 + // migration. The lookup query still has a fallback 200 + // ordering for rows that share the default 0 key. 201 + continue 202 + } 203 + todo = append(todo, pending{uuid: uuid, ns: t.UnixNano()}) 204 + } 205 + if err := rows.Err(); err != nil { 206 + rows.Close() 207 + return fmt.Errorf("iterate rows: %w", err) 208 + } 209 + rows.Close() 210 + 211 + for _, p := range todo { 212 + if _, err := s.db.ExecContext(ctx, 213 + `UPDATE buildkite_builds SET created_unix_ns = ? 214 + WHERE build_uuid = ?`, 215 + p.ns, p.uuid, 216 + ); err != nil { 217 + return fmt.Errorf("update row %q: %w", p.uuid, err) 218 + } 219 + } 144 220 return nil 145 221 }
+242
store_migrate_test.go
··· 1 + package main 2 + 3 + // Migration tests for the SQLite store. These specifically cover the 4 + // upgrade path from a pre-`created_unix_ns` database (the shape every 5 + // production tack.db that pre-dates this commit will have on first 6 + // boot) to the current schema. The fresh-database path is exercised 7 + // implicitly by every other test via newTestStore. These tests are 8 + // about the *transition*. 9 + 10 + import ( 11 + "context" 12 + "database/sql" 13 + "fmt" 14 + "path/filepath" 15 + "testing" 16 + "time" 17 + ) 18 + 19 + // legacyBuildkiteSchema is the buildkite_builds table as it looked 20 + // before created_unix_ns was added. We hand-craft a database in this 21 + // shape so the migration has something realistic to widen and backfill. 22 + // 23 + // Note that org is also absent: the migrate() path adds it via ALTER 24 + // too, so the test doubles as coverage for two stacked column adds 25 + // applying to the same table on the same upgrade. 26 + const legacyBuildkiteSchema = ` 27 + CREATE TABLE buildkite_builds ( 28 + build_uuid TEXT PRIMARY KEY, 29 + build_number INTEGER NOT NULL, 30 + pipeline_slug TEXT NOT NULL, 31 + knot TEXT NOT NULL, 32 + pipeline_rkey TEXT NOT NULL, 33 + workflow TEXT NOT NULL, 34 + pipeline_uri TEXT NOT NULL, 35 + created_at TEXT NOT NULL 36 + ); 37 + ` 38 + 39 + // openLegacyStore opens a brand-new sqlite file, hand-installs the 40 + // pre-migration schema, and seeds it with the supplied rows. It 41 + // returns the file path so the caller can re-open it through the 42 + // real openStore (which runs migrate()) and observe the upgrade. 43 + // 44 + // We deliberately don't go through openStore for the seeding step, 45 + // since that would apply the current schema and defeat the point. 46 + func openLegacyStore(t *testing.T, rows []legacyRow) string { 47 + t.Helper() 48 + path := filepath.Join(t.TempDir(), "tack.db") 49 + dsn := fmt.Sprintf("file:%s?_journal_mode=WAL&_synchronous=NORMAL&_foreign_keys=on", path) 50 + db, err := sql.Open("sqlite3", dsn) 51 + if err != nil { 52 + t.Fatalf("open legacy db: %v", err) 53 + } 54 + defer db.Close() 55 + 56 + if _, err := db.Exec(legacyBuildkiteSchema); err != nil { 57 + t.Fatalf("install legacy schema: %v", err) 58 + } 59 + for _, r := range rows { 60 + if _, err := db.Exec( 61 + `INSERT INTO buildkite_builds ( 62 + build_uuid, build_number, pipeline_slug, 63 + knot, pipeline_rkey, workflow, 64 + pipeline_uri, created_at 65 + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, 66 + r.uuid, r.buildNumber, "p", 67 + r.knot, r.rkey, r.workflow, 68 + "at://x", r.createdAt, 69 + ); err != nil { 70 + t.Fatalf("seed legacy row %q: %v", r.uuid, err) 71 + } 72 + } 73 + return path 74 + } 75 + 76 + // legacyRow is the minimal set of fields we need to seed for the 77 + // migration tests. Anything unspecified in the legacy schema (org, 78 + // created_unix_ns) is filled in by the migration itself. 79 + type legacyRow struct { 80 + uuid string 81 + knot, rkey, workflow string 82 + buildNumber int64 83 + createdAt string 84 + } 85 + 86 + // TestMigrateAddsAndBackfillsCreatedUnixNS exercises the full upgrade: 87 + // a database in the old schema gets opened through openStore, which 88 + // runs migrate(), which should (a) add the created_unix_ns column and 89 + // (b) populate it from each row's existing created_at text. 90 + func TestMigrateAddsAndBackfillsCreatedUnixNS(t *testing.T) { 91 + older := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) 92 + newer := older.Add(time.Nanosecond) // see store_test.go for why this matters 93 + 94 + path := openLegacyStore(t, []legacyRow{ 95 + { 96 + uuid: "older", knot: "k", rkey: "r", workflow: "w", 97 + buildNumber: 1, createdAt: older.Format(time.RFC3339Nano), 98 + }, 99 + { 100 + uuid: "newer", knot: "k", rkey: "r", workflow: "w", 101 + buildNumber: 2, createdAt: newer.Format(time.RFC3339Nano), 102 + }, 103 + }) 104 + 105 + s, err := openStore(path) 106 + if err != nil { 107 + t.Fatalf("openStore (migrate): %v", err) 108 + } 109 + defer s.Close() 110 + 111 + // Column must now exist and be populated for both seeded rows. 112 + got := map[string]int64{} 113 + rows, err := s.db.Query(`SELECT build_uuid, created_unix_ns FROM buildkite_builds`) 114 + if err != nil { 115 + t.Fatalf("select after migrate: %v", err) 116 + } 117 + for rows.Next() { 118 + var uuid string 119 + var ns int64 120 + if err := rows.Scan(&uuid, &ns); err != nil { 121 + t.Fatalf("scan: %v", err) 122 + } 123 + got[uuid] = ns 124 + } 125 + if err := rows.Err(); err != nil { 126 + t.Fatalf("iterate: %v", err) 127 + } 128 + 129 + if got["older"] != older.UnixNano() { 130 + t.Errorf("older: created_unix_ns = %d; want %d", got["older"], older.UnixNano()) 131 + } 132 + if got["newer"] != newer.UnixNano() { 133 + t.Errorf("newer: created_unix_ns = %d; want %d", got["newer"], newer.UnixNano()) 134 + } 135 + 136 + // And the lookup query (the actual reason the column exists) 137 + // must surface the newer row, which is the case the original bug 138 + // report says was getting it wrong before. 139 + ref, err := s.LookupBuildkiteBuildByTuple(context.Background(), "k", "r", "w") 140 + if err != nil { 141 + t.Fatalf("lookup: %v", err) 142 + } 143 + if ref == nil || ref.BuildUUID != "newer" { 144 + t.Fatalf("lookup picked %+v; want uuid=newer", ref) 145 + } 146 + } 147 + 148 + // TestMigrateIdempotent makes sure running migrate() repeatedly is 149 + // safe: re-applying the ALTERs returns "duplicate column name" which 150 + // is swallowed, and the backfill sees no rows with created_unix_ns=0 151 + // the second time around so it must not clobber the values written 152 + // on the first pass. 153 + func TestMigrateIdempotent(t *testing.T) { 154 + when := time.Date(2026, 5, 1, 12, 34, 56, 789, time.UTC) 155 + path := openLegacyStore(t, []legacyRow{{ 156 + uuid: "u1", knot: "k", rkey: "r", workflow: "w", 157 + buildNumber: 1, createdAt: when.Format(time.RFC3339Nano), 158 + }}) 159 + 160 + // First open: migrate runs, column is added and backfilled. 161 + s, err := openStore(path) 162 + if err != nil { 163 + t.Fatalf("first openStore: %v", err) 164 + } 165 + 166 + // Run migrate() again on the same handle. Should be a no-op and 167 + // must not return an error from a redundant ALTER. 168 + if err := s.migrate(context.Background()); err != nil { 169 + t.Fatalf("second migrate: %v", err) 170 + } 171 + s.Close() 172 + 173 + // Re-open from scratch (which also re-runs migrate) and confirm 174 + // the backfilled value is still intact. 175 + s2, err := openStore(path) 176 + if err != nil { 177 + t.Fatalf("re-open: %v", err) 178 + } 179 + defer s2.Close() 180 + 181 + var ns int64 182 + if err := s2.db.QueryRow( 183 + `SELECT created_unix_ns FROM buildkite_builds WHERE build_uuid = ?`, "u1", 184 + ).Scan(&ns); err != nil { 185 + t.Fatalf("read back: %v", err) 186 + } 187 + if ns != when.UnixNano() { 188 + t.Fatalf("created_unix_ns = %d after repeated migrate; want %d", ns, when.UnixNano()) 189 + } 190 + } 191 + 192 + // TestMigrateBackfillSkipsUnparseableCreatedAt confirms the backfill's 193 + // "skip and keep going" behavior for malformed timestamps. A single 194 + // corrupt row must not block startup; the row is left at 0 (the 195 + // post-ALTER default) and the lookup's tiebreaker columns then take 196 + // over for it. 197 + func TestMigrateBackfillSkipsUnparseableCreatedAt(t *testing.T) { 198 + good := time.Date(2026, 7, 8, 9, 10, 11, 12, time.UTC) 199 + path := openLegacyStore(t, []legacyRow{ 200 + { 201 + uuid: "good", knot: "k", rkey: "r", workflow: "w", 202 + buildNumber: 1, createdAt: good.Format(time.RFC3339Nano), 203 + }, 204 + { 205 + uuid: "garbage", knot: "k", rkey: "r", workflow: "w", 206 + buildNumber: 2, createdAt: "not a timestamp", 207 + }, 208 + }) 209 + 210 + s, err := openStore(path) 211 + if err != nil { 212 + // The whole point of swallowing parse errors is that 213 + // migrate() must succeed regardless. If it returns an 214 + // error here, the swallow logic regressed. 215 + t.Fatalf("openStore must tolerate unparseable created_at: %v", err) 216 + } 217 + defer s.Close() 218 + 219 + values := map[string]int64{} 220 + rows, err := s.db.Query(`SELECT build_uuid, created_unix_ns FROM buildkite_builds`) 221 + if err != nil { 222 + t.Fatalf("select: %v", err) 223 + } 224 + for rows.Next() { 225 + var uuid string 226 + var ns int64 227 + if err := rows.Scan(&uuid, &ns); err != nil { 228 + t.Fatalf("scan: %v", err) 229 + } 230 + values[uuid] = ns 231 + } 232 + if err := rows.Err(); err != nil { 233 + t.Fatalf("iterate: %v", err) 234 + } 235 + 236 + if values["good"] != good.UnixNano() { 237 + t.Errorf("good row: created_unix_ns = %d; want %d", values["good"], good.UnixNano()) 238 + } 239 + if values["garbage"] != 0 { 240 + t.Errorf("garbage row: created_unix_ns = %d; want 0 (left at default)", values["garbage"]) 241 + } 242 + }
+94
store_test.go
··· 12 12 "context" 13 13 "path/filepath" 14 14 "testing" 15 + "time" 15 16 ) 16 17 17 18 // newTestStore opens a fresh store in a per-test temp dir and registers ··· 284 285 } 285 286 } 286 287 288 + // TestLookupBuildkiteBuildByTuplePicksMostRecent verifies that 289 + // LookupBuildkiteBuildByTuple returns the build with the largest 290 + // created_unix_ns even when text-ordering created_at would lie. 291 + // 292 + // The motivating bug: time.Format(RFC3339Nano) trims trailing zeros, 293 + // so an exact-second instant renders as "...:00Z" while one nanosecond 294 + // later renders as "...:00.000000001Z". Lex ordering puts ".000000001Z" 295 + // *before* "Z" because '.' (0x2E) < 'Z' (0x5A), so ORDER BY created_at 296 + // DESC would surface the *earlier* row as the latest build. Sorting 297 + // on created_unix_ns avoids that and is what /logs depends on to 298 + // resolve the right run. 299 + func TestLookupBuildkiteBuildByTuplePicksMostRecent(t *testing.T) { 300 + s := newTestStore(t) 301 + ctx := context.Background() 302 + 303 + // Two rows for the same (knot, rkey, workflow). The "newer" one 304 + // is one nanosecond later but its RFC3339Nano text sorts BEFORE 305 + // the older row's, which is exactly the failure mode we're 306 + // guarding against. 307 + older := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) 308 + newer := older.Add(time.Nanosecond) 309 + 310 + insert := func(uuid string, ts time.Time) { 311 + t.Helper() 312 + _, err := s.db.ExecContext(ctx, 313 + `INSERT INTO buildkite_builds ( 314 + build_uuid, build_number, pipeline_slug, org, 315 + knot, pipeline_rkey, workflow, 316 + pipeline_uri, created_at, created_unix_ns 317 + ) VALUES (?, ?, ?, '', ?, ?, ?, ?, ?, ?)`, 318 + uuid, int64(1), "p", 319 + "k", "r", "w", "at://x", 320 + ts.Format(time.RFC3339Nano), ts.UnixNano(), 321 + ) 322 + if err != nil { 323 + t.Fatalf("insert %s: %v", uuid, err) 324 + } 325 + } 326 + insert("older", older) 327 + insert("newer", newer) 328 + 329 + ref, err := s.LookupBuildkiteBuildByTuple(ctx, "k", "r", "w") 330 + if err != nil { 331 + t.Fatalf("lookup: %v", err) 332 + } 333 + if ref == nil { 334 + t.Fatal("lookup returned nil; want a row") 335 + } 336 + if ref.BuildUUID != "newer" { 337 + t.Fatalf("lookup picked %q; want %q (text-ordering bug regression)", 338 + ref.BuildUUID, "newer") 339 + } 340 + } 341 + 342 + // TestBuildkiteCreatedUnixNSBackfill simulates the upgrade path: rows 343 + // inserted before the column existed (created_unix_ns = 0) get 344 + // promoted to the parsed UnixNano of their created_at by the migration. 345 + // Without the backfill the lookup would tie on 0 across all legacy 346 + // rows and have to fall back to the (still-imperfect) text ordering. 347 + func TestBuildkiteCreatedUnixNSBackfill(t *testing.T) { 348 + s := newTestStore(t) 349 + ctx := context.Background() 350 + 351 + // Pretend this row was written by an older binary: created_at 352 + // is set, created_unix_ns is the post-ALTER default 0. 353 + want := time.Date(2026, 3, 4, 5, 6, 7, 8, time.UTC) 354 + if _, err := s.db.ExecContext(ctx, 355 + `INSERT INTO buildkite_builds ( 356 + build_uuid, build_number, pipeline_slug, org, 357 + knot, pipeline_rkey, workflow, 358 + pipeline_uri, created_at, created_unix_ns 359 + ) VALUES (?, 1, 'p', '', 'k', 'r', 'w', 'at://x', ?, 0)`, 360 + "legacy", want.Format(time.RFC3339Nano), 361 + ); err != nil { 362 + t.Fatalf("seed legacy row: %v", err) 363 + } 364 + 365 + if err := s.backfillBuildkiteCreatedUnixNS(ctx); err != nil { 366 + t.Fatalf("backfill: %v", err) 367 + } 368 + 369 + var got int64 370 + if err := s.db.QueryRowContext(ctx, 371 + `SELECT created_unix_ns FROM buildkite_builds WHERE build_uuid = ?`, 372 + "legacy", 373 + ).Scan(&got); err != nil { 374 + t.Fatalf("read back: %v", err) 375 + } 376 + if got != want.UnixNano() { 377 + t.Fatalf("created_unix_ns = %d; want %d", got, want.UnixNano()) 378 + } 379 + } 380 + 287 381 // countRows is a small SELECT COUNT(*) helper used by lifecycle tests 288 382 // to verify deletes actually removed the row. Table name is interpolated 289 383 // directly because callers pass a constant from the schema, not user

History

1 round 0 comments
sign up or login to add to the discussion
mitchellh.com submitted #0
1 commit
expand
store: order latest buildkite build by monotonic int
merge conflicts detected
expand
  • store.go:352
  • store_migrate.go:8
  • store_test.go:12
expand 0 comments