logfire client for zig
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

add metrics, attributes, proper JSON serialization

- metrics: Counter, Gauge, UpDownCounter, Histogram, ExponentialHistogram
- attributes: typed key-value pairs for spans/logs/metrics
- exporter: rewrite with std.json.Stringify (no manual string building)
- root: add counter(), gaugeInt(), gaugeDouble() convenience functions
- CLAUDE.md: project patterns and zig 0.15 notes

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

zzstoatzz 1789e611 7449e252

+1514 -145
+103
CLAUDE.md
··· 1 + # logfire-zig 2 + 3 + zig 0.15+ SDK for pydantic logfire - OTLP HTTP/JSON export for traces, logs, metrics. 4 + 5 + ## goal 6 + 7 + **full parity with logfire-rust client.** not simpler, not "good enough" - parity. 8 + 9 + ## reference projects 10 + 11 + look at these for patterns, don't reinvent: 12 + 13 + - `~/tangled.sh/@zzstoatzz.io/prefect-server/` - zig 0.15 http server, json handling 14 + - `~/tangled.sh/@zzstoatzz.io/zat/` - zig 0.15 AT Protocol client 15 + - `~/tangled.sh/@zzstoatzz.io/leaflet-search/` - zig backend patterns 16 + - `~/tangled.sh/@zzstoatzz.io/notes/languages/ziglang/0.15/` - zig 0.15 syntax notes 17 + 18 + ## zig 0.15 patterns 19 + 20 + ### JSON serialization - use std.json.Stringify 21 + 22 + **DO NOT manually concatenate JSON strings.** use the stdlib: 23 + 24 + ```zig 25 + const json = std.json; 26 + 27 + fn buildJson(alloc: std.mem.Allocator, data: MyData) ![]u8 { 28 + var output: std.Io.Writer.Allocating = .init(alloc); 29 + var jw: json.Stringify = .{ .writer = &output.writer }; 30 + 31 + try jw.beginObject(); 32 + try jw.objectField("name"); 33 + try jw.write(data.name); 34 + try jw.objectField("count"); 35 + try jw.write(data.count); 36 + try jw.endObject(); 37 + 38 + return output.toOwnedSlice(); 39 + } 40 + ``` 41 + 42 + for raw JSON passthrough: 43 + ```zig 44 + try jw.beginWriteRaw(); 45 + try jw.writer.writeAll(raw_json_string); 46 + jw.endWriteRaw(); 47 + ``` 48 + 49 + ### ArrayList (unmanaged in 0.15) 50 + 51 + pass allocator to each method: 52 + 53 + ```zig 54 + var buf: std.ArrayList(u8) = .empty; 55 + defer buf.deinit(alloc); 56 + 57 + try buf.appendSlice(alloc, data); 58 + try buf.print(alloc, "{d}", .{value}); 59 + 60 + // borrow: buf.items (don't hold after deinit) 61 + // transfer ownership: buf.toOwnedSlice(alloc) 62 + ``` 63 + 64 + ### HTTP client 65 + 66 + ```zig 67 + var client = std.http.Client{ .allocator = allocator }; 68 + defer client.deinit(); 69 + 70 + var aw: std.Io.Writer.Allocating = .init(allocator); 71 + defer aw.deinit(); 72 + 73 + const result = client.fetch(.{ 74 + .location = .{ .url = url }, 75 + .response_writer = &aw.writer, 76 + .method = .POST, 77 + .payload = body, 78 + .headers = .{ 79 + .content_type = .{ .override = "application/json" }, 80 + .accept_encoding = .{ .override = "identity" }, // disable gzip (bug in 0.15) 81 + }, 82 + }) catch return error.RequestFailed; 83 + ``` 84 + 85 + ### env vars 86 + 87 + ```zig 88 + const token = std.posix.getenv("LOGFIRE_WRITE_TOKEN") orelse 89 + std.posix.getenv("LOGFIRE_TOKEN"); 90 + ``` 91 + 92 + ## testing 93 + 94 + ```bash 95 + zig build test # run tests 96 + zig build run-example # run examples/basic.zig 97 + ``` 98 + 99 + test with real export requires LOGFIRE_WRITE_TOKEN in .env 100 + 101 + ## rust client reference 102 + 103 + `gh api repos/pydantic/logfire-rust/contents/src` - check metrics.rs for API parity
+7 -1
examples/basic.zig
··· 1 1 //! basic logfire example 2 2 //! 3 - //! demonstrates spans, logging, and export to logfire. 3 + //! demonstrates spans, logging, metrics, and export to logfire. 4 4 //! 5 5 //! run with: 6 6 //! LOGFIRE_TOKEN=your_token zig build example ··· 52 52 std.posix.nanosleep(0, 5 * std.time.ns_per_ms); 53 53 } 54 54 } 55 + 56 + // metrics 57 + logfire.counter("requests.total", 1); 58 + logfire.counter("requests.total", 1); 59 + logfire.gaugeInt("connections.active", 42); 60 + logfire.gaugeDouble("cpu.usage", 0.75); 55 61 56 62 // flush to ensure all data is sent 57 63 try lf.flush();
+148
src/attribute.zig
··· 1 + //! OTLP attribute types 2 + //! 3 + //! converts zig values to OTLP-compatible attribute format. 4 + 5 + const std = @import("std"); 6 + 7 + pub const Attribute = struct { 8 + key: []const u8, 9 + value: Value, 10 + 11 + pub const Value = union(enum) { 12 + string: []const u8, 13 + int: i64, 14 + float: f64, 15 + bool_val: bool, 16 + }; 17 + 18 + /// convert a comptime struct to attributes array 19 + /// returns number of attributes written 20 + pub fn fromStruct(attrs: anytype, out: []Attribute) usize { 21 + const T = @TypeOf(attrs); 22 + const info = @typeInfo(T); 23 + 24 + if (info != .@"struct") return 0; 25 + 26 + const fields = info.@"struct".fields; 27 + var count: usize = 0; 28 + 29 + inline for (fields) |field| { 30 + if (count >= out.len) break; 31 + 32 + const field_value = @field(attrs, field.name); 33 + if (toValue(field_value)) |value| { 34 + out[count] = .{ 35 + .key = field.name, 36 + .value = value, 37 + }; 38 + count += 1; 39 + } 40 + } 41 + 42 + return count; 43 + } 44 + 45 + /// convert a zig value to an attribute value 46 + fn toValue(value: anytype) ?Value { 47 + const T = @TypeOf(value); 48 + const info = @typeInfo(T); 49 + 50 + return switch (info) { 51 + .int, .comptime_int => .{ .int = @intCast(value) }, 52 + .float, .comptime_float => .{ .float = @floatCast(value) }, 53 + .bool => .{ .bool_val = value }, 54 + .pointer => |ptr| { 55 + if (ptr.size == .slice and ptr.child == u8) { 56 + return .{ .string = value }; 57 + } 58 + if (ptr.size == .one) { 59 + // pointer to array (e.g., *const [N]u8) 60 + const child_info = @typeInfo(ptr.child); 61 + if (child_info == .array and child_info.array.child == u8) { 62 + return .{ .string = value }; 63 + } 64 + } 65 + return null; 66 + }, 67 + .array => |arr| { 68 + if (arr.child == u8) { 69 + return .{ .string = &value }; 70 + } 71 + return null; 72 + }, 73 + .optional => { 74 + if (value) |v| { 75 + return toValue(v); 76 + } 77 + return null; 78 + }, 79 + else => null, 80 + }; 81 + } 82 + 83 + /// write attribute as OTLP JSON 84 + pub fn writeJson(self: Attribute, w: anytype) !void { 85 + try w.writeAll("{\"key\":"); 86 + try writeJsonString(w, self.key); 87 + try w.writeAll(",\"value\":{"); 88 + 89 + switch (self.value) { 90 + .string => |s| { 91 + try w.writeAll("\"stringValue\":"); 92 + try writeJsonString(w, s); 93 + }, 94 + .int => |i| { 95 + try w.print("\"intValue\":\"{d}\"", .{i}); 96 + }, 97 + .float => |f| { 98 + try w.print("\"doubleValue\":{d}", .{f}); 99 + }, 100 + .bool_val => |b| { 101 + try w.print("\"boolValue\":{}", .{b}); 102 + }, 103 + } 104 + 105 + try w.writeAll("}}"); 106 + } 107 + }; 108 + 109 + fn writeJsonString(w: anytype, s: []const u8) !void { 110 + try w.writeByte('"'); 111 + for (s) |c| { 112 + switch (c) { 113 + '"' => try w.writeAll("\\\""), 114 + '\\' => try w.writeAll("\\\\"), 115 + '\n' => try w.writeAll("\\n"), 116 + '\r' => try w.writeAll("\\r"), 117 + '\t' => try w.writeAll("\\t"), 118 + 0x00...0x08, 0x0b, 0x0c, 0x0e...0x1f => try w.print("\\u00{x:0>2}", .{c}), 119 + else => try w.writeByte(c), 120 + } 121 + } 122 + try w.writeByte('"'); 123 + } 124 + 125 + // tests 126 + 127 + test "fromStruct basic" { 128 + var attrs: [8]Attribute = undefined; 129 + const count = Attribute.fromStruct(.{ 130 + .name = "test", 131 + .count = @as(i64, 42), 132 + .enabled = true, 133 + }, &attrs); 134 + 135 + try std.testing.expectEqual(@as(usize, 3), count); 136 + try std.testing.expectEqualStrings("name", attrs[0].key); 137 + try std.testing.expectEqualStrings("test", attrs[0].value.string); 138 + try std.testing.expectEqual(@as(i64, 42), attrs[1].value.int); 139 + try std.testing.expectEqual(true, attrs[2].value.bool_val); 140 + } 141 + 142 + test "writeJson string" { 143 + var buf: [256]u8 = undefined; 144 + var fbs = std.io.fixedBufferStream(&buf); 145 + const attr = Attribute{ .key = "foo", .value = .{ .string = "bar" } }; 146 + try attr.writeJson(fbs.writer()); 147 + try std.testing.expectEqualStrings("{\"key\":\"foo\",\"value\":{\"stringValue\":\"bar\"}}", fbs.getWritten()); 148 + }
+465 -136
src/exporter.zig
··· 1 1 //! OTLP HTTP/JSON exporter 2 2 //! 3 - //! sends spans and logs to logfire (or any OTLP-compatible backend) 3 + //! sends spans, logs, and metrics to logfire (or any OTLP-compatible backend) 4 4 //! uses HTTP with JSON encoding for simplicity. 5 5 //! 6 6 //! endpoints: 7 7 //! - /v1/traces - span data 8 8 //! - /v1/logs - log records 9 + //! - /v1/metrics - metric data 9 10 //! 10 11 //! see: https://opentelemetry.io/docs/specs/otlp/ 11 12 12 13 const std = @import("std"); 14 + const json = std.json; 13 15 const Config = @import("config.zig").Config; 14 16 const Span = @import("span.zig").Span; 15 17 const LogRecord = @import("log.zig").LogRecord; 18 + const Attribute = @import("attribute.zig").Attribute; 19 + const metrics_mod = @import("metrics.zig"); 20 + const MetricData = metrics_mod.MetricData; 16 21 17 22 pub const Exporter = struct { 18 23 allocator: std.mem.Allocator, ··· 37 42 spans: []const Span.Data, 38 43 logs: []const LogRecord, 39 44 ) !void { 45 + try self.sendAll(spans, logs, &.{}); 46 + } 47 + 48 + /// send spans, logs, and metrics to OTLP endpoints 49 + pub fn sendAll( 50 + self: *Exporter, 51 + spans: []const Span.Data, 52 + logs: []const LogRecord, 53 + metrics: []const MetricData, 54 + ) !void { 40 55 if (!self.config.shouldSend()) { 41 - // just log to console if not sending 42 - self.printToConsole(spans, logs); 56 + self.printToConsole(spans, logs, metrics); 43 57 return; 44 58 } 45 59 46 60 const base_url = self.config.base_url orelse return error.NoBaseUrl; 47 61 48 - // send spans 49 62 if (spans.len > 0) { 50 63 const traces_json = try self.buildTracesJson(spans); 51 64 defer self.allocator.free(traces_json); 52 - 53 65 try self.sendToEndpoint(base_url, "/v1/traces", traces_json); 54 66 } 55 67 56 - // send logs 57 68 if (logs.len > 0) { 58 69 const logs_json = try self.buildLogsJson(logs); 59 70 defer self.allocator.free(logs_json); 71 + try self.sendToEndpoint(base_url, "/v1/logs", logs_json); 72 + } 60 73 61 - try self.sendToEndpoint(base_url, "/v1/logs", logs_json); 74 + if (metrics.len > 0) { 75 + const metrics_json = try self.buildMetricsJson(metrics); 76 + defer self.allocator.free(metrics_json); 77 + try self.sendToEndpoint(base_url, "/v1/metrics", metrics_json); 62 78 } 63 79 } 64 80 65 81 fn sendToEndpoint(self: *Exporter, base_url: []const u8, path: []const u8, body: []const u8) !void { 66 - // build full URL 67 82 var url_buf: [1024]u8 = undefined; 68 83 const url = std.fmt.bufPrint(&url_buf, "{s}{s}", .{ base_url, path }) catch return error.UrlTooLong; 69 84 70 - // response writer 71 85 var aw: std.Io.Writer.Allocating = .init(self.allocator); 72 86 defer aw.deinit(); 73 87 74 - // build auth header on stack 75 88 var auth_buf: [2048]u8 = undefined; 76 89 var auth_header: ?[]const u8 = null; 77 90 if (self.config.token) |token| { 78 91 auth_header = std.fmt.bufPrint(&auth_buf, "Bearer {s}", .{token}) catch null; 79 92 } 80 93 81 - // headers 82 94 var headers: std.http.Client.Request.Headers = .{ 83 95 .content_type = .{ .override = "application/json" }, 84 - // disable gzip: zig stdlib flate.Decompress can panic 85 96 .accept_encoding = .{ .override = "identity" }, 86 97 .user_agent = .{ .override = "logfire-zig/0.1.0" }, 87 98 }; ··· 103 114 } 104 115 } 105 116 106 - /// build OTLP JSON for traces 107 - /// see: https://opentelemetry.io/docs/specs/otlp/#otlphttp-request 108 117 fn buildTracesJson(self: *Exporter, spans: []const Span.Data) ![]u8 { 109 - var json: std.ArrayList(u8) = .{}; 110 - errdefer json.deinit(self.allocator); 118 + var output: std.Io.Writer.Allocating = .init(self.allocator); 119 + errdefer output.deinit(); 120 + var jw: json.Stringify = .{ .writer = &output.writer }; 111 121 112 - var w = json.writer(self.allocator); 122 + try jw.beginObject(); 123 + try jw.objectField("resourceSpans"); 124 + try jw.beginArray(); 125 + try jw.beginObject(); 113 126 114 - try w.writeAll("{\"resourceSpans\":[{"); 127 + // resource 128 + try jw.objectField("resource"); 129 + try jw.beginObject(); 130 + try jw.objectField("attributes"); 131 + try self.writeResourceAttributesArray(&jw); 132 + try jw.endObject(); 115 133 116 - // resource attributes 117 - try w.writeAll("\"resource\":{\"attributes\":["); 118 - try self.writeResourceAttributes(&w); 119 - try w.writeAll("]},"); 134 + // scopeSpans 135 + try jw.objectField("scopeSpans"); 136 + try jw.beginArray(); 137 + try jw.beginObject(); 138 + try jw.objectField("scope"); 139 + try jw.beginObject(); 140 + try jw.objectField("name"); 141 + try jw.write("logfire-zig"); 142 + try jw.endObject(); 120 143 121 - // scope spans 122 - try w.writeAll("\"scopeSpans\":[{"); 123 - try w.writeAll("\"scope\":{\"name\":\"logfire-zig\"},"); 124 - try w.writeAll("\"spans\":["); 144 + try jw.objectField("spans"); 145 + try jw.beginArray(); 146 + for (spans) |s| { 147 + try self.writeSpanObject(&jw, s); 148 + } 149 + try jw.endArray(); 125 150 126 - for (spans, 0..) |s, i| { 127 - if (i > 0) try w.writeByte(','); 128 - try self.writeSpanJson(&w, s); 129 - } 151 + try jw.endObject(); 152 + try jw.endArray(); 130 153 131 - try w.writeAll("]}]}]}"); 154 + try jw.endObject(); 155 + try jw.endArray(); 156 + try jw.endObject(); 132 157 133 - return json.toOwnedSlice(self.allocator); 158 + return output.toOwnedSlice(); 134 159 } 135 160 136 - /// build OTLP JSON for logs 137 161 fn buildLogsJson(self: *Exporter, logs: []const LogRecord) ![]u8 { 138 - var json: std.ArrayList(u8) = .{}; 139 - errdefer json.deinit(self.allocator); 162 + var output: std.Io.Writer.Allocating = .init(self.allocator); 163 + errdefer output.deinit(); 164 + var jw: json.Stringify = .{ .writer = &output.writer }; 165 + 166 + try jw.beginObject(); 167 + try jw.objectField("resourceLogs"); 168 + try jw.beginArray(); 169 + try jw.beginObject(); 170 + 171 + // resource 172 + try jw.objectField("resource"); 173 + try jw.beginObject(); 174 + try jw.objectField("attributes"); 175 + try self.writeResourceAttributesArray(&jw); 176 + try jw.endObject(); 177 + 178 + // scopeLogs 179 + try jw.objectField("scopeLogs"); 180 + try jw.beginArray(); 181 + try jw.beginObject(); 182 + try jw.objectField("scope"); 183 + try jw.beginObject(); 184 + try jw.objectField("name"); 185 + try jw.write("logfire-zig"); 186 + try jw.endObject(); 187 + 188 + try jw.objectField("logRecords"); 189 + try jw.beginArray(); 190 + for (logs) |log| { 191 + try self.writeLogObject(&jw, log); 192 + } 193 + try jw.endArray(); 194 + 195 + try jw.endObject(); 196 + try jw.endArray(); 197 + 198 + try jw.endObject(); 199 + try jw.endArray(); 200 + try jw.endObject(); 201 + 202 + return output.toOwnedSlice(); 203 + } 140 204 141 - var w = json.writer(self.allocator); 205 + fn buildMetricsJson(self: *Exporter, metrics: []const MetricData) ![]u8 { 206 + var output: std.Io.Writer.Allocating = .init(self.allocator); 207 + errdefer output.deinit(); 208 + var jw: json.Stringify = .{ .writer = &output.writer }; 142 209 143 - try w.writeAll("{\"resourceLogs\":[{"); 210 + try jw.beginObject(); 211 + try jw.objectField("resourceMetrics"); 212 + try jw.beginArray(); 213 + try jw.beginObject(); 144 214 145 - // resource attributes 146 - try w.writeAll("\"resource\":{\"attributes\":["); 147 - try self.writeResourceAttributes(&w); 148 - try w.writeAll("]},"); 215 + // resource 216 + try jw.objectField("resource"); 217 + try jw.beginObject(); 218 + try jw.objectField("attributes"); 219 + try self.writeResourceAttributesArray(&jw); 220 + try jw.endObject(); 149 221 150 - // scope logs 151 - try w.writeAll("\"scopeLogs\":[{"); 152 - try w.writeAll("\"scope\":{\"name\":\"logfire-zig\"},"); 153 - try w.writeAll("\"logRecords\":["); 222 + // scopeMetrics 223 + try jw.objectField("scopeMetrics"); 224 + try jw.beginArray(); 225 + try jw.beginObject(); 226 + try jw.objectField("scope"); 227 + try jw.beginObject(); 228 + try jw.objectField("name"); 229 + try jw.write("logfire-zig"); 230 + try jw.endObject(); 154 231 155 - for (logs, 0..) |log, i| { 156 - if (i > 0) try w.writeByte(','); 157 - try self.writeLogJson(&w, log); 232 + try jw.objectField("metrics"); 233 + try jw.beginArray(); 234 + for (metrics) |metric| { 235 + try writeMetricObject(&jw, metric); 158 236 } 237 + try jw.endArray(); 159 238 160 - try w.writeAll("]}]}]}"); 239 + try jw.endObject(); 240 + try jw.endArray(); 241 + 242 + try jw.endObject(); 243 + try jw.endArray(); 244 + try jw.endObject(); 161 245 162 - return json.toOwnedSlice(self.allocator); 246 + return output.toOwnedSlice(); 163 247 } 164 248 165 - fn writeResourceAttributes(self: *Exporter, w: anytype) !void { 166 - var first = true; 167 - 249 + fn writeResourceAttributesArray(self: *Exporter, jw: *json.Stringify) !void { 250 + try jw.beginArray(); 168 251 if (self.config.service_name) |name| { 169 - try writeAttribute(w, "service.name", .{ .string = name }, &first); 252 + try writeAttributeObject(jw, "service.name", .{ .string = name }); 170 253 } 171 254 if (self.config.service_version) |version| { 172 - try writeAttribute(w, "service.version", .{ .string = version }, &first); 255 + try writeAttributeObject(jw, "service.version", .{ .string = version }); 173 256 } 174 257 if (self.config.environment) |env| { 175 - try writeAttribute(w, "deployment.environment.name", .{ .string = env }, &first); 258 + try writeAttributeObject(jw, "deployment.environment.name", .{ .string = env }); 176 259 } 260 + try jw.endArray(); 177 261 } 178 262 179 - fn writeSpanJson(self: *Exporter, w: anytype, s: Span.Data) !void { 263 + fn writeSpanObject(self: *Exporter, jw: *json.Stringify, s: Span.Data) !void { 180 264 _ = self; 181 - try w.writeByte('{'); 265 + try jw.beginObject(); 182 266 183 - // trace and span IDs (hex encoded) 184 - try w.writeAll("\"traceId\":\""); 185 - try writeHex(w, &s.trace_id); 186 - try w.writeAll("\",\"spanId\":\""); 187 - try writeHex(w, &s.span_id); 188 - try w.writeAll("\","); 267 + try jw.objectField("traceId"); 268 + try writeHexString(jw, &s.trace_id); 189 269 190 - // name 191 - try w.writeAll("\"name\":"); 192 - try writeJsonString(w, s.name); 193 - try w.writeByte(','); 270 + try jw.objectField("spanId"); 271 + try writeHexString(jw, &s.span_id); 194 272 195 - // kind (internal = 1) 196 - try w.writeAll("\"kind\":1,"); 273 + try jw.objectField("name"); 274 + try jw.write(s.name); 197 275 198 - // timestamps (nanoseconds) 199 - try w.print("\"startTimeUnixNano\":\"{d}\",", .{s.start_time_ns}); 200 - try w.print("\"endTimeUnixNano\":\"{d}\",", .{s.end_time_ns}); 276 + try jw.objectField("kind"); 277 + try jw.write(@as(i64, 1)); 201 278 202 - // attributes (empty for now, will add later) 203 - try w.writeAll("\"attributes\":[]"); 279 + try jw.objectField("startTimeUnixNano"); 280 + try writeNsString(jw, s.start_time_ns); 204 281 205 - // status (unset = 0) 206 - try w.writeAll(",\"status\":{\"code\":0}"); 282 + try jw.objectField("endTimeUnixNano"); 283 + try writeNsString(jw, s.end_time_ns); 284 + 285 + try jw.objectField("attributes"); 286 + try jw.beginArray(); 287 + for (s.attributes[0..s.attribute_count]) |attr| { 288 + try writeAttributeFromAttr(jw, attr); 289 + } 290 + try jw.endArray(); 291 + 292 + try jw.objectField("status"); 293 + try jw.beginObject(); 294 + try jw.objectField("code"); 295 + try jw.write(@as(i64, 0)); 296 + try jw.endObject(); 207 297 208 - try w.writeByte('}'); 298 + try jw.endObject(); 209 299 } 210 300 211 - fn writeLogJson(self: *Exporter, w: anytype, log: LogRecord) !void { 301 + fn writeLogObject(self: *Exporter, jw: *json.Stringify, log: LogRecord) !void { 212 302 _ = self; 213 - try w.writeByte('{'); 303 + try jw.beginObject(); 214 304 215 - // timestamp 216 - try w.print("\"timeUnixNano\":\"{d}\",", .{log.timestamp_ns}); 305 + try jw.objectField("timeUnixNano"); 306 + try writeNsString(jw, log.timestamp_ns); 307 + 308 + try jw.objectField("severityNumber"); 309 + try jw.write(@as(i64, log.level.severity())); 217 310 218 - // severity 219 - try w.print("\"severityNumber\":{d},", .{log.level.severity()}); 220 - try w.writeAll("\"severityText\":"); 221 - try writeJsonString(w, log.level.name()); 222 - try w.writeByte(','); 311 + try jw.objectField("severityText"); 312 + try jw.write(log.level.name()); 223 313 224 - // body 225 - try w.writeAll("\"body\":{\"stringValue\":"); 226 - try writeJsonString(w, log.message); 227 - try w.writeAll("},"); 314 + try jw.objectField("body"); 315 + try jw.beginObject(); 316 + try jw.objectField("stringValue"); 317 + try jw.write(log.message); 318 + try jw.endObject(); 228 319 229 - // trace context 230 320 if (log.trace_id) |tid| { 231 - try w.writeAll("\"traceId\":\""); 232 - try writeHex(w, &tid); 233 - try w.writeAll("\","); 321 + try jw.objectField("traceId"); 322 + try writeHexString(jw, &tid); 234 323 } 235 324 236 - // attributes (empty for now) 237 - try w.writeAll("\"attributes\":[]"); 325 + try jw.objectField("attributes"); 326 + try jw.beginArray(); 327 + for (log.attributes[0..log.attribute_count]) |attr| { 328 + try writeAttributeFromAttr(jw, attr); 329 + } 330 + try jw.endArray(); 238 331 239 - try w.writeByte('}'); 332 + try jw.endObject(); 240 333 } 241 334 242 - fn printToConsole(self: *Exporter, spans: []const Span.Data, logs: []const LogRecord) void { 335 + fn printToConsole(self: *Exporter, spans: []const Span.Data, logs: []const LogRecord, metrics: []const MetricData) void { 243 336 const console = self.config.console orelse return; 244 337 if (!console.enabled) return; 245 338 ··· 252 345 if (@intFromEnum(log.level) < @intFromEnum(console.min_level)) continue; 253 346 std.debug.print("[{s}] {s}\n", .{ log.level.name(), log.message }); 254 347 } 348 + 349 + for (metrics) |metric| { 350 + std.debug.print("[metric] {s}\n", .{metric.name}); 351 + } 255 352 } 256 353 }; 257 354 355 + // helper functions 356 + 357 + fn writeMetricObject(jw: *json.Stringify, metric: MetricData) !void { 358 + try jw.beginObject(); 359 + 360 + try jw.objectField("name"); 361 + try jw.write(metric.name); 362 + 363 + if (metric.description.len > 0) { 364 + try jw.objectField("description"); 365 + try jw.write(metric.description); 366 + } 367 + 368 + if (metric.unit.len > 0) { 369 + try jw.objectField("unit"); 370 + try jw.write(metric.unit); 371 + } 372 + 373 + switch (metric.data) { 374 + .sum => |sum| { 375 + try jw.objectField("sum"); 376 + try jw.beginObject(); 377 + try jw.objectField("aggregationTemporality"); 378 + try jw.write(@as(i64, @intFromEnum(sum.temporality))); 379 + try jw.objectField("isMonotonic"); 380 + try jw.write(sum.is_monotonic); 381 + try jw.objectField("dataPoints"); 382 + try jw.beginArray(); 383 + for (sum.data_points) |dp| { 384 + try writeNumberDataPointObject(jw, dp); 385 + } 386 + try jw.endArray(); 387 + try jw.endObject(); 388 + }, 389 + .gauge => |gauge| { 390 + try jw.objectField("gauge"); 391 + try jw.beginObject(); 392 + try jw.objectField("dataPoints"); 393 + try jw.beginArray(); 394 + for (gauge.data_points) |dp| { 395 + try writeNumberDataPointObject(jw, dp); 396 + } 397 + try jw.endArray(); 398 + try jw.endObject(); 399 + }, 400 + .histogram => |hist| { 401 + try jw.objectField("histogram"); 402 + try jw.beginObject(); 403 + try jw.objectField("aggregationTemporality"); 404 + try jw.write(@as(i64, @intFromEnum(hist.temporality))); 405 + try jw.objectField("dataPoints"); 406 + try jw.beginArray(); 407 + for (hist.data_points) |dp| { 408 + try writeHistogramDataPointObject(jw, dp); 409 + } 410 + try jw.endArray(); 411 + try jw.endObject(); 412 + }, 413 + .exponential_histogram => |exp_hist| { 414 + try jw.objectField("exponentialHistogram"); 415 + try jw.beginObject(); 416 + try jw.objectField("aggregationTemporality"); 417 + try jw.write(@as(i64, @intFromEnum(exp_hist.temporality))); 418 + try jw.objectField("dataPoints"); 419 + try jw.beginArray(); 420 + for (exp_hist.data_points) |dp| { 421 + try writeExponentialHistogramDataPointObject(jw, dp); 422 + } 423 + try jw.endArray(); 424 + try jw.endObject(); 425 + }, 426 + } 427 + 428 + try jw.endObject(); 429 + } 430 + 431 + fn writeNumberDataPointObject(jw: *json.Stringify, dp: metrics_mod.NumberDataPoint) !void { 432 + try jw.beginObject(); 433 + 434 + try jw.objectField("startTimeUnixNano"); 435 + try writeNsString(jw, dp.start_time_ns); 436 + 437 + try jw.objectField("timeUnixNano"); 438 + try writeNsString(jw, dp.time_ns); 439 + 440 + switch (dp.value) { 441 + .int => |v| { 442 + try jw.objectField("asInt"); 443 + try writeIntString(jw, v); 444 + }, 445 + .double => |v| { 446 + try jw.objectField("asDouble"); 447 + try jw.write(v); 448 + }, 449 + } 450 + 451 + if (dp.attributes.len > 0) { 452 + try jw.objectField("attributes"); 453 + try jw.beginArray(); 454 + for (dp.attributes) |attr| { 455 + try writeAttributeFromAttr(jw, attr); 456 + } 457 + try jw.endArray(); 458 + } 459 + 460 + try jw.endObject(); 461 + } 462 + 463 + fn writeHistogramDataPointObject(jw: *json.Stringify, dp: metrics_mod.HistogramDataPoint) !void { 464 + try jw.beginObject(); 465 + 466 + try jw.objectField("startTimeUnixNano"); 467 + try writeNsString(jw, dp.start_time_ns); 468 + 469 + try jw.objectField("timeUnixNano"); 470 + try writeNsString(jw, dp.time_ns); 471 + 472 + try jw.objectField("count"); 473 + try writeIntString(jw, @intCast(dp.count)); 474 + 475 + try jw.objectField("sum"); 476 + try jw.write(dp.sum); 477 + 478 + try jw.objectField("bucketCounts"); 479 + try jw.beginArray(); 480 + for (dp.bucket_counts) |c| { 481 + try writeIntString(jw, @intCast(c)); 482 + } 483 + try jw.endArray(); 484 + 485 + try jw.objectField("explicitBounds"); 486 + try jw.beginArray(); 487 + for (dp.explicit_bounds) |b| { 488 + try jw.write(b); 489 + } 490 + try jw.endArray(); 491 + 492 + try jw.objectField("min"); 493 + try jw.write(dp.min); 494 + 495 + try jw.objectField("max"); 496 + try jw.write(dp.max); 497 + 498 + if (dp.attributes.len > 0) { 499 + try jw.objectField("attributes"); 500 + try jw.beginArray(); 501 + for (dp.attributes) |attr| { 502 + try writeAttributeFromAttr(jw, attr); 503 + } 504 + try jw.endArray(); 505 + } 506 + 507 + try jw.endObject(); 508 + } 509 + 510 + fn writeExponentialHistogramDataPointObject(jw: *json.Stringify, dp: metrics_mod.ExponentialHistogramDataPoint) !void { 511 + try jw.beginObject(); 512 + 513 + try jw.objectField("startTimeUnixNano"); 514 + try writeNsString(jw, dp.start_time_ns); 515 + 516 + try jw.objectField("timeUnixNano"); 517 + try writeNsString(jw, dp.time_ns); 518 + 519 + try jw.objectField("count"); 520 + try writeIntString(jw, @intCast(dp.count)); 521 + 522 + try jw.objectField("sum"); 523 + try jw.write(dp.sum); 524 + 525 + try jw.objectField("scale"); 526 + try jw.write(@as(i64, dp.scale)); 527 + 528 + try jw.objectField("zeroCount"); 529 + try writeIntString(jw, @intCast(dp.zero_count)); 530 + 531 + try jw.objectField("positive"); 532 + try jw.beginObject(); 533 + try jw.objectField("offset"); 534 + try jw.write(@as(i64, dp.positive_offset)); 535 + try jw.objectField("bucketCounts"); 536 + try jw.beginArray(); 537 + for (dp.positive_bucket_counts) |c| { 538 + try writeIntString(jw, @intCast(c)); 539 + } 540 + try jw.endArray(); 541 + try jw.endObject(); 542 + 543 + try jw.objectField("min"); 544 + try jw.write(dp.min); 545 + 546 + try jw.objectField("max"); 547 + try jw.write(dp.max); 548 + 549 + if (dp.attributes.len > 0) { 550 + try jw.objectField("attributes"); 551 + try jw.beginArray(); 552 + for (dp.attributes) |attr| { 553 + try writeAttributeFromAttr(jw, attr); 554 + } 555 + try jw.endArray(); 556 + } 557 + 558 + try jw.endObject(); 559 + } 560 + 258 561 const AttributeValue = union(enum) { 259 562 string: []const u8, 260 563 int: i64, ··· 262 565 bool_val: bool, 263 566 }; 264 567 265 - fn writeAttribute(w: anytype, key: []const u8, value: AttributeValue, first: *bool) !void { 266 - if (!first.*) try w.writeByte(','); 267 - first.* = false; 268 - try writeAttributeKv(w, key, value); 269 - } 270 - 271 - fn writeAttributeKv(w: anytype, key: []const u8, value: AttributeValue) !void { 272 - try w.writeAll("{\"key\":"); 273 - try writeJsonString(w, key); 274 - try w.writeAll(",\"value\":{"); 275 - 568 + fn writeAttributeObject(jw: *json.Stringify, key: []const u8, value: AttributeValue) !void { 569 + try jw.beginObject(); 570 + try jw.objectField("key"); 571 + try jw.write(key); 572 + try jw.objectField("value"); 573 + try jw.beginObject(); 276 574 switch (value) { 277 575 .string => |s| { 278 - try w.writeAll("\"stringValue\":"); 279 - try writeJsonString(w, s); 576 + try jw.objectField("stringValue"); 577 + try jw.write(s); 280 578 }, 281 579 .int => |i| { 282 - try w.print("\"intValue\":\"{d}\"", .{i}); 580 + try jw.objectField("intValue"); 581 + try writeIntString(jw, i); 283 582 }, 284 583 .float => |f| { 285 - try w.print("\"doubleValue\":{d}", .{f}); 584 + try jw.objectField("doubleValue"); 585 + try jw.write(f); 286 586 }, 287 587 .bool_val => |b| { 288 - try w.print("\"boolValue\":{}", .{b}); 588 + try jw.objectField("boolValue"); 589 + try jw.write(b); 289 590 }, 290 591 } 592 + try jw.endObject(); 593 + try jw.endObject(); 594 + } 291 595 292 - try w.writeAll("}}"); 596 + fn writeAttributeFromAttr(jw: *json.Stringify, attr: Attribute) !void { 597 + try jw.beginObject(); 598 + try jw.objectField("key"); 599 + try jw.write(attr.key); 600 + try jw.objectField("value"); 601 + try jw.beginObject(); 602 + switch (attr.value) { 603 + .string => |s| { 604 + try jw.objectField("stringValue"); 605 + try jw.write(s); 606 + }, 607 + .int => |i| { 608 + try jw.objectField("intValue"); 609 + try writeIntString(jw, i); 610 + }, 611 + .float => |f| { 612 + try jw.objectField("doubleValue"); 613 + try jw.write(f); 614 + }, 615 + .bool_val => |b| { 616 + try jw.objectField("boolValue"); 617 + try jw.write(b); 618 + }, 619 + } 620 + try jw.endObject(); 621 + try jw.endObject(); 293 622 } 294 623 295 - fn writeHex(w: anytype, bytes: []const u8) !void { 624 + fn writeHexString(jw: *json.Stringify, bytes: []const u8) !void { 625 + var buf: [64]u8 = undefined; 626 + var i: usize = 0; 296 627 for (bytes) |b| { 297 - try w.print("{x:0>2}", .{b}); 628 + _ = std.fmt.bufPrint(buf[i .. i + 2], "{x:0>2}", .{b}) catch unreachable; 629 + i += 2; 298 630 } 631 + try jw.write(buf[0..i]); 299 632 } 300 633 301 - fn writeJsonString(w: anytype, s: []const u8) !void { 302 - try w.writeByte('"'); 303 - for (s) |c| { 304 - switch (c) { 305 - '"' => try w.writeAll("\\\""), 306 - '\\' => try w.writeAll("\\\\"), 307 - '\n' => try w.writeAll("\\n"), 308 - '\r' => try w.writeAll("\\r"), 309 - '\t' => try w.writeAll("\\t"), 310 - 0x00...0x08, 0x0b, 0x0c, 0x0e...0x1f => try w.print("\\u00{x:0>2}", .{c}), 311 - else => try w.writeByte(c), 312 - } 313 - } 314 - try w.writeByte('"'); 634 + fn writeNsString(jw: *json.Stringify, ns: i128) !void { 635 + var buf: [32]u8 = undefined; 636 + const s = std.fmt.bufPrint(&buf, "{d}", .{ns}) catch unreachable; 637 + try jw.write(s); 638 + } 639 + 640 + fn writeIntString(jw: *json.Stringify, val: i64) !void { 641 + var buf: [24]u8 = undefined; 642 + const s = std.fmt.bufPrint(&buf, "{d}", .{val}) catch unreachable; 643 + try jw.write(s); 315 644 } 316 645 317 646 // tests ··· 337 666 }, 338 667 }; 339 668 340 - const json = try exporter.buildTracesJson(&spans); 341 - defer allocator.free(json); 669 + const json_out = try exporter.buildTracesJson(&spans); 670 + defer allocator.free(json_out); 342 671 343 - try std.testing.expect(std.mem.indexOf(u8, json, "test.span") != null); 344 - try std.testing.expect(std.mem.indexOf(u8, json, "test-service") != null); 672 + try std.testing.expect(std.mem.indexOf(u8, json_out, "test.span") != null); 673 + try std.testing.expect(std.mem.indexOf(u8, json_out, "test-service") != null); 345 674 }
+10 -3
src/log.zig
··· 4 4 5 5 const std = @import("std"); 6 6 pub const Level = @import("config.zig").Config.Level; 7 + const Attribute = @import("attribute.zig").Attribute; 7 8 8 9 pub const LogRecord = struct { 9 10 timestamp_ns: i128, 10 11 level: Level, 11 12 message: []const u8, 12 13 trace_id: ?[16]u8, 14 + attributes: [max_attributes]Attribute = undefined, 15 + attribute_count: usize = 0, 16 + 17 + pub const max_attributes = 32; 13 18 14 19 pub fn init( 15 20 trace_id: ?[16]u8, ··· 17 22 message: []const u8, 18 23 attrs: anytype, 19 24 ) LogRecord { 20 - _ = attrs; // TODO: implement attribute storage 21 - 22 - return .{ 25 + var record = LogRecord{ 23 26 .timestamp_ns = std.time.nanoTimestamp(), 24 27 .level = level, 25 28 .message = message, 26 29 .trace_id = trace_id, 27 30 }; 31 + 32 + record.attribute_count = Attribute.fromStruct(attrs, &record.attributes); 33 + 34 + return record; 28 35 } 29 36 }; 30 37
+615
src/metrics.zig
··· 1 + //! metrics for observability 2 + //! 3 + //! provides counter, gauge, up-down counter, and histogram instruments 4 + //! matching the OpenTelemetry metrics specification. 5 + //! 6 + //! ## usage 7 + //! 8 + //! ```zig 9 + //! var counter = logfire.u64_counter("requests.total", .{ 10 + //! .description = "total HTTP requests", 11 + //! .unit = "1", 12 + //! }); 13 + //! counter.add(1, &.{.{ .key = "method", .value = .{ .string = "GET" } }}); 14 + //! ``` 15 + 16 + const std = @import("std"); 17 + const Attribute = @import("attribute.zig").Attribute; 18 + 19 + // ============================================================================ 20 + // instrument options 21 + // ============================================================================ 22 + 23 + pub const InstrumentOptions = struct { 24 + description: []const u8 = "", 25 + unit: []const u8 = "", 26 + }; 27 + 28 + pub const HistogramOptions = struct { 29 + description: []const u8 = "", 30 + unit: []const u8 = "", 31 + /// explicit bucket boundaries (defaults to OpenTelemetry default boundaries) 32 + boundaries: ?[]const f64 = null, 33 + }; 34 + 35 + pub const ExponentialHistogramOptions = struct { 36 + description: []const u8 = "", 37 + unit: []const u8 = "", 38 + /// scale factor for exponential buckets (higher = more precision) 39 + scale: i8 = 20, 40 + }; 41 + 42 + // ============================================================================ 43 + // counter - monotonically increasing value 44 + // ============================================================================ 45 + 46 + pub fn Counter(comptime T: type) type { 47 + return struct { 48 + const Self = @This(); 49 + 50 + name: []const u8, 51 + description: []const u8, 52 + unit: []const u8, 53 + value: if (T == u64) std.atomic.Value(u64) else if (T == f64) std.atomic.Value(u64) else @compileError("Counter supports u64 and f64"), 54 + 55 + pub fn init(name: []const u8, opts: InstrumentOptions) Self { 56 + return .{ 57 + .name = name, 58 + .description = opts.description, 59 + .unit = opts.unit, 60 + .value = @TypeOf(@as(Self, undefined).value).init(0), 61 + }; 62 + } 63 + 64 + pub fn add(self: *Self, delta: T, attributes: []const Attribute) void { 65 + _ = attributes; // TODO: attribute aggregation 66 + if (T == f64) { 67 + // store f64 as bits 68 + const bits: u64 = @bitCast(delta); 69 + const old_bits = self.value.load(.monotonic); 70 + const old_val: f64 = @bitCast(old_bits); 71 + const new_bits: u64 = @bitCast(old_val + delta); 72 + // simple add for now (not atomic for f64, but close enough for metrics) 73 + self.value.store(new_bits, .monotonic); 74 + _ = bits; 75 + } else { 76 + _ = self.value.fetchAdd(delta, .monotonic); 77 + } 78 + } 79 + 80 + pub fn get(self: *const Self) T { 81 + const raw = self.value.load(.monotonic); 82 + if (T == f64) { 83 + return @bitCast(raw); 84 + } 85 + return raw; 86 + } 87 + }; 88 + } 89 + 90 + // ============================================================================ 91 + // gauge - instantaneous value 92 + // ============================================================================ 93 + 94 + pub fn Gauge(comptime T: type) type { 95 + return struct { 96 + const Self = @This(); 97 + 98 + name: []const u8, 99 + description: []const u8, 100 + unit: []const u8, 101 + value: if (T == i64) std.atomic.Value(i64) else if (T == u64) std.atomic.Value(u64) else if (T == f64) std.atomic.Value(u64) else @compileError("Gauge supports i64, u64, and f64"), 102 + 103 + pub fn init(name: []const u8, opts: InstrumentOptions) Self { 104 + return .{ 105 + .name = name, 106 + .description = opts.description, 107 + .unit = opts.unit, 108 + .value = @TypeOf(@as(Self, undefined).value).init(0), 109 + }; 110 + } 111 + 112 + pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 113 + _ = attributes; 114 + if (T == f64) { 115 + self.value.store(@bitCast(value), .monotonic); 116 + } else { 117 + self.value.store(value, .monotonic); 118 + } 119 + } 120 + 121 + pub fn get(self: *const Self) T { 122 + const raw = self.value.load(.monotonic); 123 + if (T == f64) { 124 + return @bitCast(raw); 125 + } 126 + return raw; 127 + } 128 + }; 129 + } 130 + 131 + // ============================================================================ 132 + // up-down counter - bidirectional counter 133 + // ============================================================================ 134 + 135 + pub fn UpDownCounter(comptime T: type) type { 136 + return struct { 137 + const Self = @This(); 138 + 139 + name: []const u8, 140 + description: []const u8, 141 + unit: []const u8, 142 + value: if (T == i64) std.atomic.Value(i64) else if (T == f64) std.atomic.Value(u64) else @compileError("UpDownCounter supports i64 and f64"), 143 + 144 + pub fn init(name: []const u8, opts: InstrumentOptions) Self { 145 + return .{ 146 + .name = name, 147 + .description = opts.description, 148 + .unit = opts.unit, 149 + .value = @TypeOf(@as(Self, undefined).value).init(0), 150 + }; 151 + } 152 + 153 + pub fn add(self: *Self, delta: T, attributes: []const Attribute) void { 154 + _ = attributes; 155 + if (T == f64) { 156 + const old_bits = self.value.load(.monotonic); 157 + const old_val: f64 = @bitCast(old_bits); 158 + const new_bits: u64 = @bitCast(old_val + delta); 159 + self.value.store(new_bits, .monotonic); 160 + } else { 161 + _ = self.value.fetchAdd(delta, .monotonic); 162 + } 163 + } 164 + 165 + pub fn get(self: *const Self) T { 166 + const raw = self.value.load(.monotonic); 167 + if (T == f64) { 168 + return @bitCast(raw); 169 + } 170 + return raw; 171 + } 172 + }; 173 + } 174 + 175 + // ============================================================================ 176 + // histogram - value distribution with explicit buckets 177 + // ============================================================================ 178 + 179 + pub fn Histogram(comptime T: type) type { 180 + return struct { 181 + const Self = @This(); 182 + 183 + name: []const u8, 184 + description: []const u8, 185 + unit: []const u8, 186 + boundaries: []const f64, 187 + counts: []std.atomic.Value(u64), 188 + sum: std.atomic.Value(i64), 189 + count: std.atomic.Value(u64), 190 + min: std.atomic.Value(u64), 191 + max: std.atomic.Value(u64), 192 + allocator: std.mem.Allocator, 193 + 194 + /// OpenTelemetry default bucket boundaries 195 + pub const default_boundaries = [_]f64{ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000 }; 196 + 197 + pub fn init(allocator: std.mem.Allocator, name: []const u8, opts: HistogramOptions) !Self { 198 + const boundaries = opts.boundaries orelse &default_boundaries; 199 + const counts = try allocator.alloc(std.atomic.Value(u64), boundaries.len + 1); 200 + for (counts) |*c| { 201 + c.* = std.atomic.Value(u64).init(0); 202 + } 203 + 204 + return .{ 205 + .name = name, 206 + .description = opts.description, 207 + .unit = opts.unit, 208 + .boundaries = boundaries, 209 + .counts = counts, 210 + .sum = std.atomic.Value(i64).init(0), 211 + .count = std.atomic.Value(u64).init(0), 212 + .min = std.atomic.Value(u64).init(std.math.maxInt(u64)), 213 + .max = std.atomic.Value(u64).init(0), 214 + .allocator = allocator, 215 + }; 216 + } 217 + 218 + pub fn deinit(self: *Self) void { 219 + self.allocator.free(self.counts); 220 + } 221 + 222 + pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 223 + _ = attributes; 224 + const val_f64: f64 = if (T == f64) value else @floatFromInt(value); 225 + 226 + // find bucket 227 + var bucket: usize = self.boundaries.len; 228 + for (self.boundaries, 0..) |bound, i| { 229 + if (val_f64 < bound) { 230 + bucket = i; 231 + break; 232 + } 233 + } 234 + 235 + _ = self.counts[bucket].fetchAdd(1, .monotonic); 236 + _ = self.sum.fetchAdd(@intFromFloat(val_f64), .monotonic); 237 + _ = self.count.fetchAdd(1, .monotonic); 238 + 239 + // update min/max 240 + const val_bits: u64 = @bitCast(val_f64); 241 + while (true) { 242 + const old_min = self.min.load(.monotonic); 243 + const old_min_f64: f64 = @bitCast(old_min); 244 + if (val_f64 >= old_min_f64) break; 245 + if (self.min.cmpxchgWeak(old_min, val_bits, .monotonic, .monotonic) == null) break; 246 + } 247 + while (true) { 248 + const old_max = self.max.load(.monotonic); 249 + const old_max_f64: f64 = @bitCast(old_max); 250 + if (val_f64 <= old_max_f64) break; 251 + if (self.max.cmpxchgWeak(old_max, val_bits, .monotonic, .monotonic) == null) break; 252 + } 253 + } 254 + 255 + pub fn getCount(self: *const Self) u64 { 256 + return self.count.load(.monotonic); 257 + } 258 + 259 + pub fn getSum(self: *const Self) f64 { 260 + return @floatFromInt(self.sum.load(.monotonic)); 261 + } 262 + }; 263 + } 264 + 265 + // ============================================================================ 266 + // exponential histogram - Base2 exponential bucket histogram 267 + // ============================================================================ 268 + 269 + pub fn ExponentialHistogram(comptime T: type) type { 270 + return struct { 271 + const Self = @This(); 272 + 273 + name: []const u8, 274 + description: []const u8, 275 + unit: []const u8, 276 + scale: i8, 277 + sum: std.atomic.Value(i64), 278 + count: std.atomic.Value(u64), 279 + zero_count: std.atomic.Value(u64), 280 + min: std.atomic.Value(u64), 281 + max: std.atomic.Value(u64), 282 + // positive bucket counts (simplified - fixed size) 283 + positive_counts: [256]std.atomic.Value(u64), 284 + positive_offset: i32, 285 + 286 + pub fn init(name: []const u8, opts: ExponentialHistogramOptions) Self { 287 + var self = Self{ 288 + .name = name, 289 + .description = opts.description, 290 + .unit = opts.unit, 291 + .scale = opts.scale, 292 + .sum = std.atomic.Value(i64).init(0), 293 + .count = std.atomic.Value(u64).init(0), 294 + .zero_count = std.atomic.Value(u64).init(0), 295 + .min = std.atomic.Value(u64).init(std.math.maxInt(u64)), 296 + .max = std.atomic.Value(u64).init(0), 297 + .positive_counts = undefined, 298 + .positive_offset = 0, 299 + }; 300 + for (&self.positive_counts) |*c| { 301 + c.* = std.atomic.Value(u64).init(0); 302 + } 303 + return self; 304 + } 305 + 306 + pub fn record(self: *Self, value: T, attributes: []const Attribute) void { 307 + _ = attributes; 308 + const val_f64: f64 = if (T == f64) value else @floatFromInt(value); 309 + 310 + if (val_f64 == 0) { 311 + _ = self.zero_count.fetchAdd(1, .monotonic); 312 + } else if (val_f64 > 0) { 313 + // compute bucket index using base2 exponential 314 + const scale_factor = std.math.pow(f64, 2, @as(f64, @floatFromInt(self.scale))); 315 + const bucket_idx: i32 = @intFromFloat(@ceil(@log2(val_f64) * scale_factor)); 316 + const adjusted_idx: usize = @intCast(@max(0, @min(255, bucket_idx - self.positive_offset))); 317 + _ = self.positive_counts[adjusted_idx].fetchAdd(1, .monotonic); 318 + } 319 + 320 + _ = self.sum.fetchAdd(@intFromFloat(val_f64), .monotonic); 321 + _ = self.count.fetchAdd(1, .monotonic); 322 + 323 + // update min/max 324 + const val_bits: u64 = @bitCast(val_f64); 325 + while (true) { 326 + const old_min = self.min.load(.monotonic); 327 + const old_min_f64: f64 = @bitCast(old_min); 328 + if (val_f64 >= old_min_f64) break; 329 + if (self.min.cmpxchgWeak(old_min, val_bits, .monotonic, .monotonic) == null) break; 330 + } 331 + while (true) { 332 + const old_max = self.max.load(.monotonic); 333 + const old_max_f64: f64 = @bitCast(old_max); 334 + if (val_f64 <= old_max_f64) break; 335 + if (self.max.cmpxchgWeak(old_max, val_bits, .monotonic, .monotonic) == null) break; 336 + } 337 + } 338 + 339 + pub fn getCount(self: *const Self) u64 { 340 + return self.count.load(.monotonic); 341 + } 342 + 343 + pub fn getSum(self: *const Self) f64 { 344 + return @floatFromInt(self.sum.load(.monotonic)); 345 + } 346 + }; 347 + } 348 + 349 + // ============================================================================ 350 + // observable instruments (callback-based) 351 + // ============================================================================ 352 + 353 + pub fn ObservableCallback(comptime T: type) type { 354 + return *const fn (observer: *Observer(T)) void; 355 + } 356 + 357 + pub fn Observer(comptime T: type) type { 358 + return struct { 359 + const Self = @This(); 360 + 361 + value: T = 0, 362 + attributes: [32]Attribute = undefined, 363 + attribute_count: usize = 0, 364 + 365 + pub fn observe(self: *Self, value: T, attributes: []const Attribute) void { 366 + self.value = value; 367 + self.attribute_count = @min(attributes.len, 32); 368 + @memcpy(self.attributes[0..self.attribute_count], attributes[0..self.attribute_count]); 369 + } 370 + }; 371 + } 372 + 373 + pub fn ObservableCounter(comptime T: type) type { 374 + return struct { 375 + const Self = @This(); 376 + 377 + name: []const u8, 378 + description: []const u8, 379 + unit: []const u8, 380 + callback: ObservableCallback(T), 381 + 382 + pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 383 + return .{ 384 + .name = name, 385 + .description = opts.description, 386 + .unit = opts.unit, 387 + .callback = callback, 388 + }; 389 + } 390 + 391 + pub fn observe(self: *const Self) Observer(T) { 392 + var observer = Observer(T){}; 393 + self.callback(&observer); 394 + return observer; 395 + } 396 + }; 397 + } 398 + 399 + pub fn ObservableGauge(comptime T: type) type { 400 + return struct { 401 + const Self = @This(); 402 + 403 + name: []const u8, 404 + description: []const u8, 405 + unit: []const u8, 406 + callback: ObservableCallback(T), 407 + 408 + pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 409 + return .{ 410 + .name = name, 411 + .description = opts.description, 412 + .unit = opts.unit, 413 + .callback = callback, 414 + }; 415 + } 416 + 417 + pub fn observe(self: *const Self) Observer(T) { 418 + var observer = Observer(T){}; 419 + self.callback(&observer); 420 + return observer; 421 + } 422 + }; 423 + } 424 + 425 + pub fn ObservableUpDownCounter(comptime T: type) type { 426 + return struct { 427 + const Self = @This(); 428 + 429 + name: []const u8, 430 + description: []const u8, 431 + unit: []const u8, 432 + callback: ObservableCallback(T), 433 + 434 + pub fn init(name: []const u8, opts: InstrumentOptions, callback: ObservableCallback(T)) Self { 435 + return .{ 436 + .name = name, 437 + .description = opts.description, 438 + .unit = opts.unit, 439 + .callback = callback, 440 + }; 441 + } 442 + 443 + pub fn observe(self: *const Self) Observer(T) { 444 + var observer = Observer(T){}; 445 + self.callback(&observer); 446 + return observer; 447 + } 448 + }; 449 + } 450 + 451 + // ============================================================================ 452 + // convenience type aliases matching Rust API 453 + // ============================================================================ 454 + 455 + pub const U64Counter = Counter(u64); 456 + pub const F64Counter = Counter(f64); 457 + 458 + pub const I64Gauge = Gauge(i64); 459 + pub const U64Gauge = Gauge(u64); 460 + pub const F64Gauge = Gauge(f64); 461 + 462 + pub const I64UpDownCounter = UpDownCounter(i64); 463 + pub const F64UpDownCounter = UpDownCounter(f64); 464 + 465 + pub const U64Histogram = Histogram(u64); 466 + pub const F64Histogram = Histogram(f64); 467 + 468 + pub const U64ExponentialHistogram = ExponentialHistogram(u64); 469 + pub const F64ExponentialHistogram = ExponentialHistogram(f64); 470 + 471 + pub const U64ObservableCounter = ObservableCounter(u64); 472 + pub const F64ObservableCounter = ObservableCounter(f64); 473 + 474 + pub const I64ObservableGauge = ObservableGauge(i64); 475 + pub const U64ObservableGauge = ObservableGauge(u64); 476 + pub const F64ObservableGauge = ObservableGauge(f64); 477 + 478 + pub const I64ObservableUpDownCounter = ObservableUpDownCounter(i64); 479 + pub const F64ObservableUpDownCounter = ObservableUpDownCounter(f64); 480 + 481 + // ============================================================================ 482 + // metric data for export 483 + // ============================================================================ 484 + 485 + /// aggregation temporality for metric export 486 + pub const AggregationTemporality = enum(u8) { 487 + unspecified = 0, 488 + delta = 1, 489 + cumulative = 2, 490 + }; 491 + 492 + /// data point for sum/gauge metrics 493 + pub const NumberDataPoint = struct { 494 + start_time_ns: i128, 495 + time_ns: i128, 496 + value: union(enum) { 497 + int: i64, 498 + double: f64, 499 + }, 500 + attributes: []const Attribute = &.{}, 501 + }; 502 + 503 + /// data point for histogram metrics 504 + pub const HistogramDataPoint = struct { 505 + start_time_ns: i128, 506 + time_ns: i128, 507 + count: u64, 508 + sum: f64, 509 + bucket_counts: []const u64, 510 + explicit_bounds: []const f64, 511 + min: f64, 512 + max: f64, 513 + attributes: []const Attribute = &.{}, 514 + }; 515 + 516 + /// data point for exponential histogram metrics 517 + pub const ExponentialHistogramDataPoint = struct { 518 + start_time_ns: i128, 519 + time_ns: i128, 520 + count: u64, 521 + sum: f64, 522 + scale: i8, 523 + zero_count: u64, 524 + positive_offset: i32, 525 + positive_bucket_counts: []const u64, 526 + min: f64, 527 + max: f64, 528 + attributes: []const Attribute = &.{}, 529 + }; 530 + 531 + /// metric data for export 532 + pub const MetricData = struct { 533 + name: []const u8, 534 + description: []const u8 = "", 535 + unit: []const u8 = "", 536 + data: union(enum) { 537 + sum: struct { 538 + data_points: []const NumberDataPoint, 539 + temporality: AggregationTemporality = .cumulative, 540 + is_monotonic: bool = true, 541 + }, 542 + gauge: struct { 543 + data_points: []const NumberDataPoint, 544 + }, 545 + histogram: struct { 546 + data_points: []const HistogramDataPoint, 547 + temporality: AggregationTemporality = .cumulative, 548 + }, 549 + exponential_histogram: struct { 550 + data_points: []const ExponentialHistogramDataPoint, 551 + temporality: AggregationTemporality = .cumulative, 552 + }, 553 + }, 554 + }; 555 + 556 + // ============================================================================ 557 + // tests 558 + // ============================================================================ 559 + 560 + test "u64 counter" { 561 + var counter = U64Counter.init("test.counter", .{ .description = "test counter" }); 562 + counter.add(5, &.{}); 563 + counter.add(3, &.{}); 564 + try std.testing.expectEqual(@as(u64, 8), counter.get()); 565 + } 566 + 567 + test "i64 gauge" { 568 + var gauge = I64Gauge.init("test.gauge", .{}); 569 + gauge.record(42, &.{}); 570 + try std.testing.expectEqual(@as(i64, 42), gauge.get()); 571 + gauge.record(-10, &.{}); 572 + try std.testing.expectEqual(@as(i64, -10), gauge.get()); 573 + } 574 + 575 + test "i64 up down counter" { 576 + var counter = I64UpDownCounter.init("test.updown", .{}); 577 + counter.add(5, &.{}); 578 + counter.add(-3, &.{}); 579 + try std.testing.expectEqual(@as(i64, 2), counter.get()); 580 + } 581 + 582 + test "f64 histogram" { 583 + var histogram = try F64Histogram.init(std.testing.allocator, "test.histogram", .{ 584 + .boundaries = &[_]f64{ 10, 50, 100 }, 585 + }); 586 + defer histogram.deinit(); 587 + 588 + histogram.record(5.0, &.{}); // bucket 0 589 + histogram.record(25.0, &.{}); // bucket 1 590 + histogram.record(75.0, &.{}); // bucket 2 591 + histogram.record(200.0, &.{}); // bucket 3 (overflow) 592 + 593 + try std.testing.expectEqual(@as(u64, 4), histogram.getCount()); 594 + } 595 + 596 + test "exponential histogram" { 597 + var histogram = U64ExponentialHistogram.init("test.exp_histogram", .{ .scale = 10 }); 598 + histogram.record(1, &.{}); 599 + histogram.record(10, &.{}); 600 + histogram.record(100, &.{}); 601 + 602 + try std.testing.expectEqual(@as(u64, 3), histogram.getCount()); 603 + } 604 + 605 + test "observable gauge" { 606 + const callback = struct { 607 + fn cb(observer: *Observer(i64)) void { 608 + observer.observe(42, &.{}); 609 + } 610 + }.cb; 611 + 612 + const gauge = I64ObservableGauge.init("test.observable", .{}, callback); 613 + const result = gauge.observe(); 614 + try std.testing.expectEqual(@as(i64, 42), result.value); 615 + }
+158 -3
src/root.zig
··· 27 27 pub const Config = @import("config.zig").Config; 28 28 pub const Span = @import("span.zig").Span; 29 29 pub const Exporter = @import("exporter.zig").Exporter; 30 + pub const Attribute = @import("attribute.zig").Attribute; 31 + 32 + const metrics_mod = @import("metrics.zig"); 33 + pub const Counter = metrics_mod.Counter; 34 + pub const Gauge = metrics_mod.Gauge; 35 + pub const UpDownCounter = metrics_mod.UpDownCounter; 36 + pub const Histogram = metrics_mod.Histogram; 37 + pub const ExponentialHistogram = metrics_mod.ExponentialHistogram; 38 + pub const MetricData = metrics_mod.MetricData; 39 + pub const NumberDataPoint = metrics_mod.NumberDataPoint; 40 + pub const HistogramDataPoint = metrics_mod.HistogramDataPoint; 41 + pub const ExponentialHistogramDataPoint = metrics_mod.ExponentialHistogramDataPoint; 42 + pub const InstrumentOptions = metrics_mod.InstrumentOptions; 43 + pub const HistogramOptions = metrics_mod.HistogramOptions; 44 + pub const AggregationTemporality = metrics_mod.AggregationTemporality; 30 45 31 46 const log_mod = @import("log.zig"); 32 47 pub const Level = log_mod.Level; ··· 41 56 config: Config, 42 57 exporter: Exporter, 43 58 44 - /// pending spans/logs waiting to be exported 59 + /// pending spans/logs/metrics waiting to be exported 45 60 pending_spans: std.ArrayList(Span.Data), 46 61 pending_logs: std.ArrayList(LogRecord), 62 + pending_metrics: std.ArrayList(MetricData), 47 63 pending_mutex: std.Thread.Mutex, 48 64 49 65 /// trace context ··· 60 76 .exporter = Exporter.init(allocator, resolved), 61 77 .pending_spans = .{}, 62 78 .pending_logs = .{}, 79 + .pending_metrics = .{}, 63 80 .pending_mutex = .{}, 64 81 .current_trace_id = null, 65 82 .span_id_counter = std.atomic.Value(u64).init(1), ··· 74 91 pub fn deinit(self: *Logfire) void { 75 92 self.pending_spans.deinit(self.allocator); 76 93 self.pending_logs.deinit(self.allocator); 94 + self.pending_metrics.deinit(self.allocator); 77 95 self.exporter.deinit(); 78 96 self.allocator.destroy(self); 79 97 } ··· 96 114 self.pending_mutex.lock(); 97 115 defer self.pending_mutex.unlock(); 98 116 99 - if (self.pending_spans.items.len > 0 or self.pending_logs.items.len > 0) { 100 - try self.exporter.send( 117 + if (self.pending_spans.items.len > 0 or self.pending_logs.items.len > 0 or self.pending_metrics.items.len > 0) { 118 + try self.exporter.sendAll( 101 119 self.pending_spans.items, 102 120 self.pending_logs.items, 121 + self.pending_metrics.items, 103 122 ); 104 123 self.pending_spans.clearRetainingCapacity(); 105 124 self.pending_logs.clearRetainingCapacity(); 125 + self.pending_metrics.clearRetainingCapacity(); 106 126 } 107 127 } 108 128 ··· 134 154 }; 135 155 } 136 156 157 + pub fn recordMetric(self: *Logfire, data: MetricData) void { 158 + self.pending_mutex.lock(); 159 + defer self.pending_mutex.unlock(); 160 + self.pending_metrics.append(self.allocator, data) catch { 161 + std.log.warn("logfire: failed to record metric", .{}); 162 + }; 163 + } 164 + 165 + /// record a counter value (monotonic sum) 166 + pub fn recordCounter(self: *Logfire, name: []const u8, value: i64, opts: InstrumentOptions) void { 167 + const now = std.time.nanoTimestamp(); 168 + const dp = self.allocator.create(NumberDataPoint) catch return; 169 + dp.* = .{ 170 + .start_time_ns = now, 171 + .time_ns = now, 172 + .value = .{ .int = value }, 173 + }; 174 + 175 + self.pending_mutex.lock(); 176 + defer self.pending_mutex.unlock(); 177 + self.pending_metrics.append(self.allocator, .{ 178 + .name = name, 179 + .description = opts.description, 180 + .unit = opts.unit, 181 + .data = .{ 182 + .sum = .{ 183 + .data_points = @as(*const [1]NumberDataPoint, dp), 184 + .temporality = .cumulative, 185 + .is_monotonic = true, 186 + }, 187 + }, 188 + }) catch { 189 + self.allocator.destroy(dp); 190 + }; 191 + } 192 + 193 + /// record a gauge value (instantaneous) 194 + pub fn recordGaugeInt(self: *Logfire, name: []const u8, value: i64, opts: InstrumentOptions) void { 195 + const now = std.time.nanoTimestamp(); 196 + const dp = self.allocator.create(NumberDataPoint) catch return; 197 + dp.* = .{ 198 + .start_time_ns = now, 199 + .time_ns = now, 200 + .value = .{ .int = value }, 201 + }; 202 + 203 + self.pending_mutex.lock(); 204 + defer self.pending_mutex.unlock(); 205 + self.pending_metrics.append(self.allocator, .{ 206 + .name = name, 207 + .description = opts.description, 208 + .unit = opts.unit, 209 + .data = .{ 210 + .gauge = .{ 211 + .data_points = @as(*const [1]NumberDataPoint, dp), 212 + }, 213 + }, 214 + }) catch { 215 + self.allocator.destroy(dp); 216 + }; 217 + } 218 + 219 + /// record a gauge value (instantaneous, f64) 220 + pub fn recordGaugeDouble(self: *Logfire, name: []const u8, value: f64, opts: InstrumentOptions) void { 221 + const now = std.time.nanoTimestamp(); 222 + const dp = self.allocator.create(NumberDataPoint) catch return; 223 + dp.* = .{ 224 + .start_time_ns = now, 225 + .time_ns = now, 226 + .value = .{ .double = value }, 227 + }; 228 + 229 + self.pending_mutex.lock(); 230 + defer self.pending_mutex.unlock(); 231 + self.pending_metrics.append(self.allocator, .{ 232 + .name = name, 233 + .description = opts.description, 234 + .unit = opts.unit, 235 + .data = .{ 236 + .gauge = .{ 237 + .data_points = @as(*const [1]NumberDataPoint, dp), 238 + }, 239 + }, 240 + }) catch { 241 + self.allocator.destroy(dp); 242 + }; 243 + } 244 + 137 245 fn generateTraceId() [16]u8 { 138 246 var id: [16]u8 = undefined; 139 247 std.crypto.random.bytes(&id); ··· 199 307 } 200 308 } 201 309 310 + // metric convenience functions 311 + 312 + pub fn counter(name: []const u8, value: i64) void { 313 + if (getInstance()) |lf| { 314 + lf.recordCounter(name, value, .{}); 315 + } 316 + } 317 + 318 + pub fn counterWithOpts(name: []const u8, value: i64, opts: InstrumentOptions) void { 319 + if (getInstance()) |lf| { 320 + lf.recordCounter(name, value, opts); 321 + } 322 + } 323 + 324 + pub fn gaugeInt(name: []const u8, value: i64) void { 325 + if (getInstance()) |lf| { 326 + lf.recordGaugeInt(name, value, .{}); 327 + } 328 + } 329 + 330 + pub fn gaugeDouble(name: []const u8, value: f64) void { 331 + if (getInstance()) |lf| { 332 + lf.recordGaugeDouble(name, value, .{}); 333 + } 334 + } 335 + 336 + pub fn metric(data: MetricData) void { 337 + if (getInstance()) |lf| { 338 + lf.recordMetric(data); 339 + } 340 + } 341 + 202 342 // tests 203 343 204 344 test "basic configuration" { ··· 236 376 try std.testing.expectEqual(@as(usize, 1), lf.pending_logs.items.len); 237 377 } 238 378 379 + test "metrics recording" { 380 + const lf = try configure(.{ 381 + .service_name = "test-service", 382 + .send_to_logfire = .no, 383 + }); 384 + defer lf.shutdown(); 385 + 386 + counter("requests.total", 1); 387 + gaugeInt("connections.active", 42); 388 + 389 + try std.testing.expectEqual(@as(usize, 2), lf.pending_metrics.items.len); 390 + } 391 + 239 392 // re-export tests from submodules 240 393 test { 241 394 _ = @import("config.zig"); 242 395 _ = @import("exporter.zig"); 243 396 _ = @import("span.zig"); 244 397 _ = @import("log.zig"); 398 + _ = @import("attribute.zig"); 399 + _ = @import("metrics.zig"); 245 400 }
+8 -2
src/span.zig
··· 13 13 14 14 const std = @import("std"); 15 15 const root = @import("root.zig"); 16 + const Attribute = @import("attribute.zig").Attribute; 16 17 17 18 pub const Span = struct { 18 19 logfire: ?*root.Logfire, 19 20 data: Data, 20 21 active: bool, 22 + 23 + pub const max_attributes = 32; 21 24 22 25 pub const Data = struct { 23 26 name: []const u8, ··· 25 28 span_id: [8]u8, 26 29 start_time_ns: i128, 27 30 end_time_ns: i128, 31 + attributes: [max_attributes]Attribute = undefined, 32 + attribute_count: usize = 0, 28 33 }; 29 34 30 35 /// create a span (called by Logfire.createSpan) 31 36 pub fn init(logfire: *root.Logfire, name: []const u8, span_id_num: u64, attrs: anytype) Span { 32 - _ = attrs; // TODO: implement attribute storage 33 - 34 37 var s = Span{ 35 38 .logfire = logfire, 36 39 .data = .{ ··· 45 48 46 49 // encode span ID from counter 47 50 std.mem.writeInt(u64, &s.data.span_id, span_id_num, .big); 51 + 52 + // store attributes 53 + s.data.attribute_count = Attribute.fromStruct(attrs, &s.data.attributes); 48 54 49 55 return s; 50 56 }