about things
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

database#

patterns for sqlite in zig - connection management, thread safety, transactions.

zqlite vs raw c bindings#

prefer zqlite over raw c bindings. the api is cleaner and handles common pitfalls:

const zqlite = @import("zqlite");

// open with flags
conn = zqlite.open(path, zqlite.OpenFlags.Create | zqlite.OpenFlags.ReadWrite) catch |err| {
    return err;
};

// exec with tuple binding (type-safe)
conn.exec("INSERT INTO users (name, age) VALUES (?, ?)", .{ name, age }) catch |err| {
    return err;
};

// exec without args
conn.execNoArgs("CREATE TABLE IF NOT EXISTS users (...)") catch |err| {
    return err;
};

with raw c bindings you manually manage statement lifecycle, bind parameters by index, and handle error codes. zqlite wraps all that.

see: build.zig.zon dependency

connection patterns#

global connection + mutex (simple apps)#

for single-writer workloads, a global connection protected by mutex works:

pub var conn: zqlite.Conn = undefined;
pub var mutex: std.Thread.Mutex = .{};

pub fn init(path: [*:0]const u8) !void {
    conn = zqlite.open(path, zqlite.OpenFlags.Create | zqlite.OpenFlags.ReadWrite) catch |err| {
        return err;
    };
    // enable WAL for better read concurrency
    _ = conn.exec("PRAGMA journal_mode=WAL", .{}) catch {};
    _ = conn.exec("PRAGMA busy_timeout=5000", .{}) catch {};
}

pub fn insertUser(name: []const u8) !void {
    mutex.lock();
    defer mutex.unlock();

    conn.exec("INSERT INTO users (name) VALUES (?)", .{name}) catch |err| {
        return err;
    };
}

every function that touches the db acquires the mutex first. the defer mutex.unlock() ensures unlock even on error.

see: pollz/db.zig, music-atmosphere-feed/db.zig

connection pool (concurrent apps)#

for multi-threaded servers with concurrent writes, use zqlite's pool:

const pool = try zqlite.Pool.init(.{
    .path = path,
    .size = 4,  // number of connections
    .on_first_connection = initSchema,  // runs once
    .on_connection = configurePragmas,   // runs per connection
});

// in request handler
const c = pool.acquire();
defer c.release();

try c.exec("INSERT ...", .{...});

the pool won't grow beyond the configured size. each connection gets its own cursor/statement state.

transactions#

wrap multi-statement operations in transactions for atomicity:

try conn.transaction();  // BEGIN
errdefer conn.rollback();

try conn.exec("UPDATE flow_run SET state = ?", .{state});
try conn.exec("INSERT INTO flow_run_state ...", .{...});

try conn.commit();  // COMMIT

the errdefer conn.rollback() ensures rollback if any statement fails. without this, partial writes corrupt state.

for write-heavy operations, use exclusiveTransaction() which acquires a write lock immediately (avoids busy-wait):

try conn.exclusiveTransaction();  // BEGIN IMMEDIATE
// ... writes ...
try conn.commit();

pragmas for concurrency#

always set these for concurrent access:

_ = conn.exec("PRAGMA journal_mode=WAL", .{}) catch {};      // write-ahead logging
_ = conn.exec("PRAGMA busy_timeout=5000", .{}) catch {};     // wait 5s on lock
_ = conn.exec("PRAGMA foreign_keys=ON", .{}) catch {};       // enforce FK constraints

WAL allows concurrent readers during writes. busy_timeout prevents immediate SQLITE_BUSY errors.

module organization#

for larger apps, separate db concerns into a module:

src/db/
  mod.zig       -- re-exports, global state
  Client.zig    -- connection/pool management
  schema.zig    -- table definitions, migrations
  result.zig    -- result parsing helpers

the mod.zig pattern:

pub const Client = @import("Client.zig");
pub const Row = result.Row;

var client: ?Client = null;

pub fn init() !void {
    client = try Client.init();
    try schema.init(&client.?);
}

pub fn getClient() ?*Client {
    if (client) |*c| return c;
    return null;
}

see: leaflet-search/db/mod.zig

comptime sql with zql#

zql parses sql at compile time and provides type-safe bindings. it extracts column names, named parameters, and validates struct mapping:

const zql = @import("zql");

// parse sql at comptime
const Q = zql.Query("SELECT id, name FROM users WHERE age > :min_age");

// comptime validation - fails if struct missing required param
Q.validateArgs(struct { min_age: i64 });

// access parsed metadata
_ = Q.positional; // "SELECT id, name FROM users WHERE age > ?"
_ = Q.params;     // ["min_age"]
_ = Q.columns;    // ["id", "name"]

// bind named params to positional tuple (order independent)
const args = Q.bind(.{ .min_age = 25 });  // returns tuple for db.exec

// map result row to struct (validates fields match columns)
const User = struct { id: i64, name: []const u8 };
Q.validateStruct(User);
const user = Q.fromRow(User, row);  // row must have .text(idx), .int(idx)

the key pattern: Query(comptime sql) returns a type with methods for validation and binding. all validation happens at compile time - typos in param names or missing fields are compile errors.

combine with zqlite for fully type-safe database layer:

const Q = zql.Query("INSERT INTO users (name, age) VALUES (:name, :age)");
try conn.exec(Q.positional, Q.bind(.{ .name = "alice", .age = 25 }));

see: zql/Query.zig

postgres with pg.zig#

pg.zig is the postgresql client. key difference from sqlite: strict type checking on column types.

strict integer types#

pg.zig enforces exact type matching for integer columns:

// WRONG - panics at runtime
row.getInt(col)  // returns i32, but you assigned to i64 from BIGINT column

// RIGHT - match postgres types exactly
const count: i32 = row.getInt(col);    // for INTEGER / INT4 columns
const big_id: i64 = row.getBigInt(col); // for BIGINT / INT8 columns

common mistake: sqlite's row.int() returns i64 regardless of column type. pg.zig is strict.

abstracting sqlite/postgres#

when supporting both databases, create a wrapper that handles the differences:

pub const Row = union(enum) {
    sqlite: zqlite.Row,
    postgres: pg.Row,

    // for INTEGER columns
    pub fn int(self: Row, col: usize) i64 {
        return switch (self) {
            .sqlite => |r| r.int(col),
            .postgres => |r| r.getInt(col), // returns i32
        };
    }

    // for BIGINT columns - MUST use separate method
    pub fn bigint(self: Row, col: usize) i64 {
        return switch (self) {
            .sqlite => |r| r.int(col),
            .postgres => |r| r.getBigInt(col), // returns i64
        };
    }

    pub fn text(self: Row, col: usize) []const u8 {
        return switch (self) {
            .sqlite => |r| r.text(col),
            .postgres => |r| r.getText(col) orelse "",
        };
    }
};

see: prefect-server/db/backend.zig

placeholder syntax#

postgres uses $1, $2 instead of ?. convert at runtime:

fn rewritePlaceholders(alloc: Allocator, sql: []const u8) ![]const u8 {
    var result: std.ArrayList(u8) = .empty;
    errdefer result.deinit(alloc);
    var param_num: usize = 1;

    for (sql) |c| {
        if (c == '?') {
            try result.print(alloc, "${d}", .{param_num});
            param_num += 1;
        } else {
            try result.append(alloc, c);
        }
    }
    return result.toOwnedSlice(alloc);
}

connection pooling#

pg.zig has built-in pooling:

const pool = try pg.Pool.init(.{
    .connect = .{
        .host = "localhost",
        .port = 5432,
        .database = "mydb",
        .username = "user",
        .password = "pass",
    },
    .size = 10,
});

// acquire connection (returns to pool on deinit)
const conn = try pool.acquire();
defer conn.deinit();

var result = try conn.query("SELECT ...", .{});
defer result.deinit();