Monorepo for Tangled tangled.org
776
fork

Configure Feed

Select the types of activity you want to include in your feed.

appview/db,models: add repo_did columns and update model structs #272

open opened by oyster.cafe targeting master from lt/repo-rename-by-rkey
Labels

None yet.

assignee

None yet.

Participants 1
AT URI
at://did:plc:3fwecdnvtcscjnrx2p4n7alz/sh.tangled.repo.pull/3mjm6w2jguo22
+642 -100
Diff #0
+419 -13
appview/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 + "fmt" 6 7 "log/slog" 7 8 "strings" 8 9 ··· 116 117 unique(repo_at, issue_id), 117 118 foreign key (repo_at) references repos(at_uri) on delete cascade 118 119 ); 119 - create table if not exists comments ( 120 - id integer primary key autoincrement, 121 - owner_did text not null, 122 - issue_id integer not null, 123 - repo_at text not null, 124 - comment_id integer not null, 125 - comment_at text not null, 126 - body text not null, 127 - created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 128 - unique(issue_id, comment_id), 129 - foreign key (repo_at, issue_id) references issues(repo_at, issue_id) on delete cascade 130 - ); 131 120 create table if not exists pulls ( 132 121 -- identifiers 133 122 id integer primary key autoincrement, ··· 676 665 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read); 677 666 create index if not exists idx_references_from_at on reference_links(from_at); 678 667 create index if not exists idx_references_to_at on reference_links(to_at); 679 - create index if not exists idx_webhooks_repo_at on webhooks(repo_at); 680 668 create index if not exists idx_webhook_deliveries_webhook_id on webhook_deliveries(webhook_id); 681 669 create index if not exists idx_site_deploys_repo_at on site_deploys(repo_at); 682 670 create index if not exists idx_newsletter_prefs_user_did on newsletter_preferences(user_did); ··· 1429 1417 return err 1430 1418 }) 1431 1419 1420 + orm.RunMigration(conn, logger, "add-repo-renames", func(tx *sql.Tx) error { 1421 + res, err := tx.Exec(` 1422 + update repos 1423 + set name = name || '-renamed-' || id || '-' || lower(hex(randomblob(4))) 1424 + where id in ( 1425 + select id from ( 1426 + select id, row_number() over ( 1427 + partition by did, knot, name 1428 + order by created desc, id desc 1429 + ) as rn 1430 + from repos 1431 + ) where rn > 1 1432 + ); 1433 + `) 1434 + if err != nil { 1435 + return err 1436 + } 1437 + if n, _ := res.RowsAffected(); n > 0 { 1438 + logger.Warn("suffixed legacy duplicate repo names before adding unique index", "rows", n) 1439 + } 1440 + 1441 + var remaining int 1442 + if err := tx.QueryRow(` 1443 + select count(*) from ( 1444 + select 1 from repos group by did, knot, name having count(*) > 1 1445 + ) 1446 + `).Scan(&remaining); err != nil { 1447 + return fmt.Errorf("checking for residual duplicate (did, knot, name) groups: %w", err) 1448 + } 1449 + if remaining > 0 { 1450 + return fmt.Errorf("add-repo-renames: %d duplicate (did, knot, name) groups remain after suffix pass; manual cleanup required before unique index can be created", remaining) 1451 + } 1452 + 1453 + _, err = tx.Exec(` 1454 + create table if not exists repo_renames ( 1455 + owner_did text not null, 1456 + old_rkey text not null, 1457 + repo_did text not null, 1458 + renamed_at text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1459 + primary key (owner_did, old_rkey) 1460 + ); 1461 + create unique index if not exists idx_repos_owner_knot_name 1462 + on repos(did, knot, name); 1463 + `) 1464 + return err 1465 + }) 1466 + 1467 + orm.RunMigration(conn, logger, "repos-canonical-rkey-uniqueness", func(tx *sql.Tx) error { 1468 + _, err := tx.Exec(` 1469 + drop index if exists idx_repos_owner_knot_name; 1470 + create unique index if not exists idx_repos_did_rkey 1471 + on repos(did, rkey); 1472 + `) 1473 + return err 1474 + }) 1475 + 1476 + orm.RunMigration(conn, logger, "repo-did-references", func(tx *sql.Tx) error { 1477 + tables := []struct{ table, oldCol, newCol string }{ 1478 + {"issues", "repo_at", "repo_did"}, 1479 + {"pulls", "repo_at", "repo_did"}, 1480 + {"pull_comments", "repo_at", "repo_did"}, 1481 + {"stars", "subject_at", "subject_did"}, 1482 + {"artifacts", "repo_at", "repo_did"}, 1483 + {"webhooks", "repo_at", "repo_did"}, 1484 + {"repo_sites", "repo_at", "repo_did"}, 1485 + {"site_deploys", "repo_at", "repo_did"}, 1486 + {"collaborators", "repo_at", "repo_did"}, 1487 + {"repo_issue_seqs", "repo_at", "repo_did"}, 1488 + {"repo_pull_seqs", "repo_at", "repo_did"}, 1489 + {"repo_languages", "repo_at", "repo_did"}, 1490 + {"repo_labels", "repo_at", "repo_did"}, 1491 + } 1492 + 1493 + stmts := "" 1494 + for _, t := range tables { 1495 + stmts += fmt.Sprintf( 1496 + `ALTER TABLE %s ADD COLUMN %s TEXT; 1497 + UPDATE %s SET %s = (SELECT repos.repo_did FROM repos WHERE repos.at_uri = %s.%s); 1498 + CREATE INDEX IF NOT EXISTS idx_%s_%s ON %s(%s); 1499 + `, t.table, t.newCol, t.table, t.newCol, t.table, t.oldCol, t.table, t.newCol, t.table, t.newCol) 1500 + } 1501 + 1502 + stmts += `ALTER TABLE pulls ADD COLUMN source_repo_did TEXT; 1503 + UPDATE pulls SET source_repo_did = (SELECT repos.repo_did FROM repos WHERE repos.at_uri = pulls.source_repo_at); 1504 + 1505 + UPDATE profile_pinned_repositories SET pin = ( 1506 + SELECT repos.repo_did FROM repos WHERE repos.at_uri = profile_pinned_repositories.pin 1507 + ) WHERE pin LIKE 'at://%' 1508 + AND EXISTS (SELECT 1 FROM repos WHERE repos.at_uri = profile_pinned_repositories.pin AND repos.repo_did IS NOT NULL AND repos.repo_did != ''); 1509 + ` 1510 + 1511 + _, err := tx.Exec(stmts) 1512 + return err 1513 + }) 1514 + 1515 + orm.RunMigration(conn, logger, "drop-old-repo-at-from-pds-rewrite-status", func(tx *sql.Tx) error { 1516 + _, err := tx.Exec(`ALTER TABLE pds_rewrite_status DROP COLUMN old_repo_at`) 1517 + return err 1518 + }) 1519 + 1520 + orm.RunMigration(conn, logger, "backfill-pds-rewrites-star-issue-pull-collab", func(tx *sql.Tx) error { 1521 + type source struct { 1522 + userDidCol string 1523 + table string 1524 + nsid string 1525 + fkCol string 1526 + } 1527 + sources := []source{ 1528 + {"did", "stars", "sh.tangled.feed.star", "subject_at"}, 1529 + {"did", "issues", "sh.tangled.repo.issue", "repo_at"}, 1530 + {"owner_did", "pulls", "sh.tangled.repo.pull", "repo_at"}, 1531 + {"did", "collaborators", "sh.tangled.repo.collaborator", "repo_at"}, 1532 + } 1533 + 1534 + for _, src := range sources { 1535 + _, err := tx.Exec(fmt.Sprintf(` 1536 + INSERT INTO pds_rewrite_status (user_did, repo_did, record_nsid, record_rkey, status) 1537 + SELECT t.%s, r.repo_did, '%s', t.rkey, 'pending' 1538 + FROM %s t 1539 + JOIN repos r ON r.at_uri = t.%s 1540 + WHERE r.repo_did IS NOT NULL AND r.repo_did != '' 1541 + ON CONFLICT(user_did, record_nsid, record_rkey) DO NOTHING 1542 + `, src.userDidCol, src.nsid, src.table, src.fkCol)) 1543 + if err != nil { 1544 + return fmt.Errorf("backfill pds rewrites for %s: %w", src.table, err) 1545 + } 1546 + } 1547 + 1548 + return nil 1549 + }) 1550 + 1551 + orm.RunMigration(conn, logger, "backfill-pds-rewrites-profiles", func(tx *sql.Tx) error { 1552 + _, err := tx.Exec(` 1553 + INSERT INTO pds_rewrite_status (user_did, repo_did, record_nsid, record_rkey, status) 1554 + SELECT DISTINCT pp.did, r.repo_did, 'sh.tangled.actor.profile', 'self', 'pending' 1555 + FROM profile_pinned_repositories pp 1556 + JOIN repos r ON r.at_uri = pp.pin 1557 + WHERE pp.pin LIKE 'at://%' 1558 + AND r.repo_did IS NOT NULL AND r.repo_did != '' 1559 + ON CONFLICT(user_did, record_nsid, record_rkey) DO NOTHING 1560 + `) 1561 + if err != nil { 1562 + return fmt.Errorf("backfill pds rewrites for profiles: %w", err) 1563 + } 1564 + return nil 1565 + }) 1566 + 1567 + conn.ExecContext(ctx, "pragma foreign_keys = off;") 1568 + orm.RunMigration(conn, logger, "drop-old-at-uri-columns", func(tx *sql.Tx) error { 1569 + _, err := tx.Exec(` 1570 + CREATE TABLE repos_new ( 1571 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1572 + did TEXT NOT NULL, 1573 + name TEXT NOT NULL, 1574 + knot TEXT NOT NULL, 1575 + rkey TEXT NOT NULL, 1576 + at_uri TEXT NOT NULL UNIQUE, 1577 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1578 + description TEXT CHECK (length(description) <= 200), 1579 + source TEXT, 1580 + spindle TEXT, 1581 + website TEXT, 1582 + topics TEXT, 1583 + repo_did TEXT, 1584 + UNIQUE(did, rkey) 1585 + ); 1586 + INSERT INTO repos_new (id, did, name, knot, rkey, at_uri, created, description, source, spindle, website, topics, repo_did) 1587 + SELECT id, did, name, knot, rkey, at_uri, created, description, source, spindle, website, topics, repo_did 1588 + FROM repos; 1589 + DROP TABLE repos; 1590 + ALTER TABLE repos_new RENAME TO repos; 1591 + CREATE UNIQUE INDEX idx_repos_repo_did ON repos(repo_did); 1592 + CREATE UNIQUE INDEX idx_repos_did_rkey ON repos(did, rkey); 1593 + 1594 + CREATE TABLE issues_new ( 1595 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1596 + did TEXT NOT NULL, 1597 + rkey TEXT NOT NULL, 1598 + at_uri TEXT GENERATED ALWAYS AS ('at://' || did || '/' || 'sh.tangled.repo.issue' || '/' || rkey) STORED, 1599 + repo_did TEXT NOT NULL, 1600 + issue_id INTEGER NOT NULL, 1601 + title TEXT NOT NULL, 1602 + body TEXT NOT NULL, 1603 + open INTEGER NOT NULL DEFAULT 1, 1604 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1605 + edited TEXT, 1606 + deleted TEXT, 1607 + UNIQUE(did, rkey), 1608 + UNIQUE(repo_did, issue_id), 1609 + UNIQUE(at_uri), 1610 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1611 + ); 1612 + INSERT INTO issues_new (id, did, rkey, repo_did, issue_id, title, body, open, created, edited, deleted) 1613 + SELECT id, did, rkey, repo_did, issue_id, title, body, open, created, edited, deleted 1614 + FROM issues WHERE repo_did IS NOT NULL AND repo_did != ''; 1615 + DROP TABLE issues; 1616 + ALTER TABLE issues_new RENAME TO issues; 1617 + CREATE INDEX idx_issues_repo_did ON issues(repo_did); 1618 + 1619 + CREATE TABLE pulls_new ( 1620 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1621 + pull_id INTEGER NOT NULL, 1622 + at_uri TEXT GENERATED ALWAYS AS ('at://' || owner_did || '/' || 'sh.tangled.repo.pull' || '/' || rkey) STORED, 1623 + repo_did TEXT NOT NULL, 1624 + owner_did TEXT NOT NULL, 1625 + rkey TEXT NOT NULL, 1626 + title TEXT NOT NULL, 1627 + body TEXT NOT NULL, 1628 + target_branch TEXT NOT NULL, 1629 + state INTEGER NOT NULL DEFAULT 0 CHECK (state IN (0, 1, 2, 3)), 1630 + source_branch TEXT, 1631 + source_repo_did TEXT, 1632 + change_id TEXT, 1633 + dependent_on TEXT, 1634 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1635 + UNIQUE(repo_did, pull_id), 1636 + UNIQUE(at_uri), 1637 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1638 + ); 1639 + INSERT INTO pulls_new (id, pull_id, repo_did, owner_did, rkey, title, body, target_branch, state, source_branch, source_repo_did, change_id, dependent_on, created) 1640 + SELECT id, pull_id, repo_did, owner_did, rkey, title, body, target_branch, state, source_branch, source_repo_did, change_id, dependent_on, created 1641 + FROM pulls WHERE repo_did IS NOT NULL AND repo_did != ''; 1642 + DROP TABLE pulls; 1643 + ALTER TABLE pulls_new RENAME TO pulls; 1644 + CREATE INDEX idx_pulls_repo_did ON pulls(repo_did); 1645 + CREATE INDEX idx_pulls_source_repo_did ON pulls(source_repo_did); 1646 + 1647 + CREATE TABLE pull_comments_new ( 1648 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1649 + pull_id INTEGER NOT NULL, 1650 + submission_id INTEGER NOT NULL, 1651 + repo_did TEXT NOT NULL, 1652 + owner_did TEXT NOT NULL, 1653 + comment_at TEXT NOT NULL, 1654 + body TEXT NOT NULL, 1655 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1656 + FOREIGN KEY (repo_did, pull_id) REFERENCES pulls(repo_did, pull_id) ON DELETE CASCADE, 1657 + FOREIGN KEY (submission_id) REFERENCES pull_submissions(id) ON DELETE CASCADE 1658 + ); 1659 + INSERT INTO pull_comments_new (id, pull_id, submission_id, repo_did, owner_did, comment_at, body, created) 1660 + SELECT id, pull_id, submission_id, repo_did, owner_did, comment_at, body, created 1661 + FROM pull_comments WHERE repo_did IS NOT NULL AND repo_did != ''; 1662 + DROP TABLE pull_comments; 1663 + ALTER TABLE pull_comments_new RENAME TO pull_comments; 1664 + CREATE INDEX idx_pull_comments_repo_did ON pull_comments(repo_did); 1665 + 1666 + CREATE TABLE stars_new ( 1667 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1668 + did TEXT NOT NULL, 1669 + rkey TEXT NOT NULL, 1670 + subject_type TEXT NOT NULL CHECK (subject_type IN ('repo', 'string')), 1671 + subject TEXT NOT NULL, 1672 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1673 + UNIQUE(did, rkey), 1674 + UNIQUE(did, subject) 1675 + ); 1676 + INSERT INTO stars_new (id, did, rkey, subject_type, subject, created) 1677 + SELECT id, did, rkey, 'repo', subject_did, created 1678 + FROM stars 1679 + WHERE subject_did IS NOT NULL AND subject_did != ''; 1680 + INSERT OR IGNORE INTO stars_new (id, did, rkey, subject_type, subject, created) 1681 + SELECT id, did, rkey, 'string', subject_at, created 1682 + FROM stars 1683 + WHERE (subject_did IS NULL OR subject_did = '') 1684 + AND subject_at LIKE 'at://%/sh.tangled.string/%'; 1685 + DROP TABLE stars; 1686 + ALTER TABLE stars_new RENAME TO stars; 1687 + CREATE INDEX idx_stars_subject ON stars(subject); 1688 + CREATE INDEX idx_stars_subject_type ON stars(subject_type); 1689 + CREATE INDEX idx_stars_created ON stars(created); 1690 + 1691 + CREATE TABLE collaborators_new ( 1692 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1693 + did TEXT NOT NULL, 1694 + rkey TEXT, 1695 + subject_did TEXT NOT NULL, 1696 + repo_did TEXT NOT NULL, 1697 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1698 + UNIQUE(did, rkey), 1699 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1700 + ); 1701 + INSERT INTO collaborators_new (id, did, rkey, subject_did, repo_did, created) 1702 + SELECT id, did, NULLIF(rkey, ''), subject_did, repo_did, created 1703 + FROM collaborators WHERE repo_did IS NOT NULL AND repo_did != ''; 1704 + DROP TABLE collaborators; 1705 + ALTER TABLE collaborators_new RENAME TO collaborators; 1706 + CREATE INDEX idx_collaborators_repo_did ON collaborators(repo_did); 1707 + 1708 + CREATE TABLE artifacts_new ( 1709 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1710 + did TEXT NOT NULL, 1711 + rkey TEXT NOT NULL, 1712 + repo_did TEXT NOT NULL, 1713 + tag BINARY(20) NOT NULL, 1714 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1715 + blob_cid TEXT NOT NULL, 1716 + name TEXT NOT NULL, 1717 + size INTEGER NOT NULL DEFAULT 0, 1718 + mimetype TEXT NOT NULL DEFAULT '*/*', 1719 + UNIQUE(did, rkey), 1720 + UNIQUE(repo_did, tag, name), 1721 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1722 + ); 1723 + INSERT INTO artifacts_new (id, did, rkey, repo_did, tag, created, blob_cid, name, size, mimetype) 1724 + SELECT id, did, rkey, repo_did, tag, created, blob_cid, name, size, mimetype 1725 + FROM artifacts WHERE repo_did IS NOT NULL AND repo_did != ''; 1726 + DROP TABLE artifacts; 1727 + ALTER TABLE artifacts_new RENAME TO artifacts; 1728 + CREATE INDEX idx_artifacts_repo_did ON artifacts(repo_did); 1729 + 1730 + CREATE TABLE webhooks_new ( 1731 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1732 + repo_did TEXT NOT NULL, 1733 + url TEXT NOT NULL, 1734 + secret TEXT, 1735 + active INTEGER NOT NULL DEFAULT 1, 1736 + events TEXT NOT NULL, 1737 + created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1738 + updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1739 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1740 + ); 1741 + INSERT INTO webhooks_new (id, repo_did, url, secret, active, events, created_at, updated_at) 1742 + SELECT id, repo_did, url, secret, active, events, created_at, updated_at 1743 + FROM webhooks WHERE repo_did IS NOT NULL AND repo_did != ''; 1744 + DROP TABLE webhooks; 1745 + ALTER TABLE webhooks_new RENAME TO webhooks; 1746 + CREATE INDEX idx_webhooks_repo_did ON webhooks(repo_did); 1747 + 1748 + CREATE TABLE repo_sites_new ( 1749 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1750 + repo_did TEXT NOT NULL UNIQUE, 1751 + branch TEXT NOT NULL, 1752 + dir TEXT NOT NULL DEFAULT '/', 1753 + is_index INTEGER NOT NULL DEFAULT 0, 1754 + created TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1755 + updated TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1756 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1757 + ); 1758 + INSERT INTO repo_sites_new (id, repo_did, branch, dir, is_index, created, updated) 1759 + SELECT id, repo_did, branch, dir, is_index, created, updated 1760 + FROM repo_sites WHERE repo_did IS NOT NULL AND repo_did != ''; 1761 + DROP TABLE repo_sites; 1762 + ALTER TABLE repo_sites_new RENAME TO repo_sites; 1763 + 1764 + CREATE TABLE site_deploys_new ( 1765 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1766 + repo_did TEXT NOT NULL, 1767 + branch TEXT NOT NULL, 1768 + dir TEXT NOT NULL DEFAULT '/', 1769 + commit_sha TEXT NOT NULL DEFAULT '', 1770 + status TEXT NOT NULL CHECK (status IN ('success', 'failure')), 1771 + trigger TEXT NOT NULL CHECK (trigger IN ('config_change', 'push')), 1772 + error TEXT NOT NULL DEFAULT '', 1773 + created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1774 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1775 + ); 1776 + INSERT INTO site_deploys_new (id, repo_did, branch, dir, commit_sha, status, trigger, error, created_at) 1777 + SELECT id, repo_did, branch, dir, commit_sha, status, trigger, error, created_at 1778 + FROM site_deploys WHERE repo_did IS NOT NULL AND repo_did != ''; 1779 + DROP TABLE site_deploys; 1780 + ALTER TABLE site_deploys_new RENAME TO site_deploys; 1781 + CREATE INDEX idx_site_deploys_repo_did ON site_deploys(repo_did); 1782 + 1783 + CREATE TABLE repo_issue_seqs_new ( 1784 + repo_did TEXT PRIMARY KEY, 1785 + next_issue_id INTEGER NOT NULL DEFAULT 1, 1786 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1787 + ); 1788 + INSERT INTO repo_issue_seqs_new (repo_did, next_issue_id) 1789 + SELECT repo_did, next_issue_id 1790 + FROM repo_issue_seqs WHERE repo_did IS NOT NULL AND repo_did != ''; 1791 + DROP TABLE repo_issue_seqs; 1792 + ALTER TABLE repo_issue_seqs_new RENAME TO repo_issue_seqs; 1793 + 1794 + CREATE TABLE repo_pull_seqs_new ( 1795 + repo_did TEXT PRIMARY KEY, 1796 + next_pull_id INTEGER NOT NULL DEFAULT 1, 1797 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1798 + ); 1799 + INSERT INTO repo_pull_seqs_new (repo_did, next_pull_id) 1800 + SELECT repo_did, next_pull_id 1801 + FROM repo_pull_seqs WHERE repo_did IS NOT NULL AND repo_did != ''; 1802 + DROP TABLE repo_pull_seqs; 1803 + ALTER TABLE repo_pull_seqs_new RENAME TO repo_pull_seqs; 1804 + 1805 + CREATE TABLE repo_languages_new ( 1806 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1807 + repo_did TEXT NOT NULL, 1808 + ref TEXT NOT NULL, 1809 + is_default_ref INTEGER NOT NULL DEFAULT 0, 1810 + language TEXT NOT NULL, 1811 + bytes INTEGER NOT NULL CHECK (bytes >= 0), 1812 + UNIQUE(repo_did, ref, language), 1813 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1814 + ); 1815 + INSERT INTO repo_languages_new (id, repo_did, ref, is_default_ref, language, bytes) 1816 + SELECT id, repo_did, ref, is_default_ref, language, bytes 1817 + FROM repo_languages WHERE repo_did IS NOT NULL AND repo_did != ''; 1818 + DROP TABLE repo_languages; 1819 + ALTER TABLE repo_languages_new RENAME TO repo_languages; 1820 + 1821 + CREATE TABLE repo_labels_new ( 1822 + id INTEGER PRIMARY KEY AUTOINCREMENT, 1823 + repo_did TEXT NOT NULL, 1824 + label_at TEXT NOT NULL, 1825 + UNIQUE(repo_did, label_at), 1826 + FOREIGN KEY (repo_did) REFERENCES repos(repo_did) ON DELETE CASCADE 1827 + ); 1828 + INSERT INTO repo_labels_new (id, repo_did, label_at) 1829 + SELECT id, repo_did, label_at 1830 + FROM repo_labels WHERE repo_did IS NOT NULL AND repo_did != ''; 1831 + DROP TABLE repo_labels; 1832 + ALTER TABLE repo_labels_new RENAME TO repo_labels; 1833 + `) 1834 + return err 1835 + }) 1836 + conn.ExecContext(ctx, "pragma foreign_keys = on;") 1837 + 1432 1838 return &DB{ 1433 1839 db, 1434 1840 logger,
+1 -1
appview/models/artifact.go
··· 15 15 Did string 16 16 Rkey string 17 17 18 - RepoAt syntax.ATURI 18 + RepoDid syntax.DID 19 19 Tag plumbing.Hash 20 20 CreatedAt time.Time 21 21
+1 -1
appview/models/collaborator.go
··· 14 14 15 15 // content 16 16 SubjectDid syntax.DID 17 - RepoAt syntax.ATURI 17 + RepoDid syntax.DID 18 18 19 19 // meta 20 20 Created time.Time
+3 -12
appview/models/issue.go
··· 13 13 Id int64 14 14 Did string 15 15 Rkey string 16 - RepoAt syntax.ATURI 16 + RepoDid syntax.DID 17 17 IssueId int 18 18 Created time.Time 19 19 Edited *time.Time ··· 44 44 for i, uri := range i.References { 45 45 references[i] = string(uri) 46 46 } 47 - repoAtStr := i.RepoAt.String() 48 47 rec := tangled.RepoIssue{ 49 - Repo: &repoAtStr, 48 + Repo: string(i.RepoDid), 50 49 Title: i.Title, 51 50 Body: &i.Body, 52 51 Mentions: mentions, 53 52 References: references, 54 53 CreatedAt: i.Created.Format(time.RFC3339), 55 54 } 56 - if i.Repo != nil && i.Repo.RepoDid != "" { 57 - rec.RepoDid = &i.Repo.RepoDid 58 - } 59 55 return rec 60 56 } 61 57 ··· 166 162 body = *record.Body 167 163 } 168 164 169 - var repoAt syntax.ATURI 170 - if record.Repo != nil { 171 - repoAt = syntax.ATURI(*record.Repo) 172 - } 173 - 174 165 return Issue{ 175 - RepoAt: repoAt, 166 + RepoDid: syntax.DID(record.Repo), 176 167 Did: did, 177 168 Rkey: rkey, 178 169 Created: created,
+2 -4
appview/models/language.go
··· 1 1 package models 2 2 3 - import ( 4 - "github.com/bluesky-social/indigo/atproto/syntax" 5 - ) 3 + import "github.com/bluesky-social/indigo/atproto/syntax" 6 4 7 5 type RepoLanguage struct { 8 6 Id int64 9 - RepoAt syntax.ATURI 7 + RepoDid syntax.DID 10 8 Ref string 11 9 IsDefaultRef bool 12 10 Language string
+23 -48
appview/models/pull.go
··· 61 61 PullId int 62 62 63 63 // at ids 64 - RepoAt syntax.ATURI 64 + RepoDid syntax.DID 65 65 OwnerDid string 66 66 Rkey string 67 67 ··· 97 97 references[i] = string(uri) 98 98 } 99 99 100 - var targetRepoAt, targetRepoDid *string 101 - targetRepoAt = new(string) 102 - *targetRepoAt = p.RepoAt.String() 103 - if p.Repo != nil && p.Repo.RepoDid != "" { 104 - targetRepoDid = new(string) 105 - *targetRepoDid = p.Repo.RepoDid 106 - } 107 - 108 100 rounds := make([]*tangled.RepoPull_Round, len(p.Submissions)) 109 101 for i, submission := range p.Submissions { 110 102 rounds[i] = submission.AsRecord() ··· 123 115 References: references, 124 116 CreatedAt: p.Created.Format(time.RFC3339), 125 117 Target: &tangled.RepoPull_Target{ 126 - Repo: targetRepoAt, 127 - RepoDid: targetRepoDid, 128 - Branch: p.TargetBranch, 118 + Repo: string(p.RepoDid), 119 + Branch: p.TargetBranch, 129 120 }, 130 121 Rounds: rounds, 131 122 Source: p.PullSource.AsRecord(), ··· 151 142 } 152 143 } 153 144 154 - var targetRepoAt syntax.ATURI 145 + var targetRepoDid syntax.DID 155 146 var targetBranch string 156 147 if record.Target != nil { 157 - if record.Target.Repo != nil { 158 - uri, err := syntax.ParseATURI(*record.Target.Repo) 159 - if err != nil { 160 - return nil, fmt.Errorf("invalid target.repo aturi: %w", err) 161 - } 162 - targetRepoAt = uri 148 + did, err := syntax.ParseDID(record.Target.Repo) 149 + if err != nil { 150 + return nil, fmt.Errorf("invalid target.repo did: %w", err) 163 151 } 152 + targetRepoDid = did 164 153 targetBranch = record.Target.Branch 165 154 } 166 155 ··· 171 160 } 172 161 173 162 if record.Source.Repo != nil { 174 - uri, err := syntax.ParseATURI(*record.Source.Repo) 163 + did, err := syntax.ParseDID(*record.Source.Repo) 175 164 if err != nil { 176 - return nil, fmt.Errorf("invalid source.repo aturi: %w", err) 177 - } 178 - pullSource.RepoAt = &uri 179 - } 180 - if record.Source.RepoDid != nil { 181 - did, err := syntax.ParseDID(*record.Source.RepoDid) 182 - if err != nil { 183 - return nil, fmt.Errorf("invalid source.repoDid did: %w", err) 165 + return nil, fmt.Errorf("invalid source.repo did: %w", err) 184 166 } 185 167 pullSource.RepoDid = &did 186 168 } ··· 209 191 } 210 192 211 193 return &Pull{ 212 - RepoAt: targetRepoAt, 194 + RepoDid: targetRepoDid, 213 195 OwnerDid: did, 214 196 Rkey: rkey, 215 197 Title: record.Title, ··· 260 242 261 243 type PullSource struct { 262 244 Branch string 263 - RepoAt *syntax.ATURI 264 245 RepoDid *syntax.DID 265 246 266 247 // optionally populate this for reverse mappings ··· 271 252 if s == nil { 272 253 return nil 273 254 } 274 - var repoAt, repoDid *string 275 - if s.RepoAt != nil { 276 - repoAt = new(string) 277 - *repoAt = s.RepoAt.String() 278 - } 255 + var repo *string 279 256 if s.RepoDid != nil { 280 - repoDid = new(string) 281 - *repoDid = s.RepoDid.String() 257 + r := s.RepoDid.String() 258 + repo = &r 282 259 } 283 260 return &tangled.RepoPull_Source{ 284 - Branch: s.Branch, 285 - Repo: repoAt, 286 - RepoDid: repoDid, 261 + Branch: s.Branch, 262 + Repo: repo, 287 263 } 288 264 } 289 265 ··· 313 289 SubmissionId int 314 290 315 291 // at ids 316 - RepoAt string 292 + RepoDid string 317 293 OwnerDid string 318 294 CommentAt string 319 295 ··· 366 342 367 343 func (p *Pull) IsBranchBased() bool { 368 344 if p.PullSource != nil { 369 - if p.PullSource.RepoAt != nil { 370 - return p.PullSource.RepoAt == &p.RepoAt 371 - } else { 372 - // no repo specified 373 - return true 345 + if p.PullSource.RepoDid != nil { 346 + return *p.PullSource.RepoDid == p.RepoDid 374 347 } 348 + // no repo specified 349 + return true 375 350 } 376 351 return false 377 352 } 378 353 379 354 func (p *Pull) IsForkBased() bool { 380 355 if p.PullSource != nil { 381 - if p.PullSource.RepoAt != nil { 356 + if p.PullSource.RepoDid != nil { 382 357 // make sure repos are different 383 - return p.PullSource.RepoAt != &p.RepoAt 358 + return *p.PullSource.RepoDid != p.RepoDid 384 359 } 385 360 } 386 361 return false
+54 -3
appview/models/repo.go
··· 57 57 58 58 return tangled.Repo{ 59 59 Knot: r.Knot, 60 - Name: r.Name, 60 + Name: r.cosmeticName(), 61 61 Description: description, 62 62 Website: website, 63 63 Topics: r.Topics, ··· 69 69 } 70 70 } 71 71 72 + func (r *Repo) cosmeticName() *string { 73 + if r.Name == "" || r.Name == r.Rkey { 74 + return nil 75 + } 76 + return &r.Name 77 + } 78 + 72 79 func (r Repo) RepoAt() syntax.ATURI { 73 80 return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey)) 74 81 } ··· 77 84 if r.RepoDid != "" { 78 85 return r.RepoDid 79 86 } 80 - p, _ := securejoin.SecureJoin(r.Did, r.Name) 87 + p, _ := securejoin.SecureJoin(r.Did, r.Rkey) 81 88 return p 82 89 } 83 90 ··· 113 120 114 121 type RepoLabel struct { 115 122 Id int64 116 - RepoAt syntax.ATURI 123 + RepoDid syntax.DID 117 124 LabelAt syntax.ATURI 118 125 } 119 126 127 + var reservedRepoNames = map[string]struct{}{ 128 + "self": {}, 129 + } 130 + 131 + func ValidateRepoName(name string) error { 132 + if len(name) == 0 { 133 + return fmt.Errorf("Repository name cannot be empty") 134 + } 135 + if len(name) > 100 { 136 + return fmt.Errorf("Repository name must be 100 characters or fewer") 137 + } 138 + 139 + if strings.Contains(name, "/") || strings.Contains(name, "\\") { 140 + return fmt.Errorf("Repository name contains invalid path characters") 141 + } 142 + 143 + if strings.HasPrefix(name, ".") || strings.HasSuffix(name, ".") { 144 + return fmt.Errorf("Repository name contains invalid path sequence") 145 + } 146 + 147 + for _, char := range name { 148 + if !((char >= 'a' && char <= 'z') || 149 + (char >= 'A' && char <= 'Z') || 150 + (char >= '0' && char <= '9') || 151 + char == '-' || char == '_' || char == '.') { 152 + return fmt.Errorf("Repository name can only contain alphanumeric characters, periods, hyphens, and underscores") 153 + } 154 + } 155 + 156 + if strings.Contains(name, "..") { 157 + return fmt.Errorf("Repository name cannot contain sequential dots") 158 + } 159 + 160 + if _, reserved := reservedRepoNames[strings.ToLower(name)]; reserved { 161 + return fmt.Errorf("Repository name %q is reserved", name) 162 + } 163 + 164 + return nil 165 + } 166 + 167 + func StripGitExt(name string) string { 168 + return strings.TrimSuffix(name, ".git") 169 + } 170 + 120 171 type RepoGroup struct { 121 172 Repo *Repo 122 173 Issues []Issue
+86
appview/models/repo_test.go
··· 1 + package models 2 + 3 + import ( 4 + "strings" 5 + "testing" 6 + ) 7 + 8 + func TestValidateRepoName_ValidRkeys(t *testing.T) { 9 + valid := []string{ 10 + "myrepo", 11 + "MyRepo", 12 + "my-repo", 13 + "my_repo", 14 + "my.repo", 15 + "a", 16 + "repo123", 17 + strings.Repeat("a", 100), 18 + } 19 + for _, name := range valid { 20 + if err := ValidateRepoName(name); err != nil { 21 + t.Errorf("ValidateRepoName(%q) = %v, want nil", name, err) 22 + } 23 + } 24 + } 25 + 26 + func TestValidateRepoName_InvalidRkeys(t *testing.T) { 27 + cases := []struct { 28 + input string 29 + substr string 30 + }{ 31 + {"", "empty"}, 32 + {strings.Repeat("a", 101), "100 characters"}, 33 + {"has space", "alphanumeric"}, 34 + {"has/slash", "invalid path"}, 35 + {"has\\backslash", "invalid path"}, 36 + {".dotprefix", "invalid path"}, 37 + {"dotsuffix.", "invalid path"}, 38 + {"two..dots", "sequential dots"}, 39 + {"../traversal", "invalid path"}, 40 + {"self", "reserved"}, 41 + {"SELF", "reserved"}, 42 + } 43 + for _, tc := range cases { 44 + err := ValidateRepoName(tc.input) 45 + if err == nil { 46 + t.Errorf("ValidateRepoName(%q) = nil, want error containing %q", tc.input, tc.substr) 47 + continue 48 + } 49 + if !strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.substr)) { 50 + t.Errorf("ValidateRepoName(%q) = %q, want substring %q", tc.input, err.Error(), tc.substr) 51 + } 52 + } 53 + } 54 + 55 + func TestStripGitExt(t *testing.T) { 56 + cases := []struct{ in, want string }{ 57 + {"repo.git", "repo"}, 58 + {"repo", "repo"}, 59 + {"repo.git.git", "repo.git"}, 60 + {".git", ""}, 61 + } 62 + for _, tc := range cases { 63 + if got := StripGitExt(tc.in); got != tc.want { 64 + t.Errorf("StripGitExt(%q) = %q, want %q", tc.in, got, tc.want) 65 + } 66 + } 67 + } 68 + 69 + func TestCosmeticName_NilWhenMatchesRkey(t *testing.T) { 70 + r := Repo{Name: "myrepo", Rkey: "myrepo"} 71 + rec := r.AsRecord() 72 + if rec.Name != nil { 73 + t.Errorf("cosmeticName should be nil when Name == Rkey, got %q", *rec.Name) 74 + } 75 + } 76 + 77 + func TestCosmeticName_PresentWhenDiffers(t *testing.T) { 78 + r := Repo{Name: "MyRepo", Rkey: "myrepo", Knot: "k"} 79 + rec := r.AsRecord() 80 + if rec.Name == nil { 81 + t.Fatal("cosmeticName should be non-nil when Name != Rkey") 82 + } 83 + if *rec.Name != "MyRepo" { 84 + t.Errorf("cosmeticName = %q, want %q", *rec.Name, "MyRepo") 85 + } 86 + }
+2 -2
appview/models/search.go
··· 5 5 type IssueSearchOptions struct { 6 6 Keywords []string 7 7 Phrases []string 8 - RepoAt string 8 + RepoDid string 9 9 IsOpen *bool 10 10 AuthorDid string 11 11 Labels []string ··· 31 31 type PullSearchOptions struct { 32 32 Keywords []string 33 33 Phrases []string 34 - RepoAt string 34 + RepoDid string 35 35 State *PullState 36 36 AuthorDid string 37 37 Labels []string
+4 -4
appview/models/search_test.go
··· 16 16 want: false, 17 17 }, 18 18 { 19 - name: "non-filter fields only (RepoAt, IsOpen, Page) return false", 20 - opts: IssueSearchOptions{RepoAt: "at://did:plc:abc/repo"}, 19 + name: "non-filter fields only (RepoDid, IsOpen, Page) return false", 20 + opts: IssueSearchOptions{RepoDid: "did:plc:abc"}, 21 21 want: false, 22 22 }, 23 23 { ··· 93 93 want: false, 94 94 }, 95 95 { 96 - name: "non-filter fields only (RepoAt, State, Page) return false", 97 - opts: PullSearchOptions{RepoAt: "at://did:plc:abc/repo"}, 96 + name: "non-filter fields only (RepoDid, State, Page) return false", 97 + opts: PullSearchOptions{RepoDid: "did:plc:abc"}, 98 98 want: false, 99 99 }, 100 100 {
+6 -2
appview/models/site_deploy.go
··· 1 1 package models 2 2 3 - import "time" 3 + import ( 4 + "time" 5 + 6 + "github.com/bluesky-social/indigo/atproto/syntax" 7 + ) 4 8 5 9 type SiteDeployStatus string 6 10 ··· 29 33 30 34 type SiteDeploy struct { 31 35 Id int64 32 - RepoAt string 36 + RepoDid syntax.DID 33 37 Branch string 34 38 Dir string 35 39 CommitSHA string
+7 -3
appview/models/sites.go
··· 1 1 package models 2 2 3 - import "time" 3 + import ( 4 + "time" 5 + 6 + "github.com/bluesky-social/indigo/atproto/syntax" 7 + ) 4 8 5 9 type DomainClaim struct { 6 10 ID int64 ··· 11 15 12 16 type RepoSite struct { 13 17 ID int64 14 - RepoAt string 15 - RepoName string // populated when joined with repos table 18 + RepoDid syntax.DID 19 + RepoRkey string // populated when joined with repos table 16 20 Branch string 17 21 Dir string 18 22 IsIndex bool
+11 -5
appview/models/star.go
··· 2 2 3 3 import ( 4 4 "time" 5 + ) 6 + 7 + type StarSubjectType string 5 8 6 - "github.com/bluesky-social/indigo/atproto/syntax" 9 + const ( 10 + StarSubjectRepo StarSubjectType = "repo" 11 + StarSubjectString StarSubjectType = "string" 7 12 ) 8 13 9 14 type Star struct { 10 - Did string 11 - RepoAt syntax.ATURI 12 - Created time.Time 13 - Rkey string 15 + Did string 16 + SubjectType StarSubjectType 17 + Subject string 18 + Created time.Time 19 + Rkey string 14 20 } 15 21 16 22 // RepoStar is used for reverse mapping to repos
+11 -2
appview/models/webhook.go
··· 10 10 type WebhookEvent string 11 11 12 12 const ( 13 - WebhookEventPush WebhookEvent = "push" 13 + WebhookEventPush WebhookEvent = "push" 14 + WebhookEventRepoRenamed WebhookEvent = "repository:renamed" 14 15 ) 15 16 16 17 type Webhook struct { 17 18 Id int64 18 - RepoAt syntax.ATURI 19 + RepoDid syntax.DID 19 20 Url string 20 21 Secret string 21 22 Active bool ··· 72 73 type WebhookUser struct { 73 74 Did string `json:"did"` 74 75 } 76 + 77 + // WebhookRenamePayload represents the payload for a repository:renamed event 78 + type WebhookRenamePayload struct { 79 + OldName string `json:"old_name"` 80 + NewName string `json:"new_name"` 81 + Repository WebhookRepository `json:"repository"` 82 + Sender WebhookUser `json:"sender"` 83 + }
+12
orm/orm.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 + "errors" 6 7 "fmt" 7 8 "log/slog" 8 9 "reflect" 9 10 "strings" 11 + 12 + "github.com/mattn/go-sqlite3" 10 13 ) 11 14 15 + func IsUniqueViolation(err error) bool { 16 + var sqlErr sqlite3.Error 17 + if !errors.As(err, &sqlErr) { 18 + return false 19 + } 20 + return sqlErr.ExtendedCode == sqlite3.ErrConstraintUnique || 21 + sqlErr.ExtendedCode == sqlite3.ErrConstraintPrimaryKey 22 + } 23 + 12 24 type migrationFn = func(*sql.Tx) error 13 25 14 26 func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {

History

1 round 0 comments
sign up or login to add to the discussion
oyster.cafe submitted #0
1 commit
expand
appview/db,models: add repo_did columns and update model structs
no conflicts, ready to merge
expand 0 comments