my harness for niri
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

idk stuff

+277 -208
+140 -130
apps/web/src/MetricsWorkbench.tsx
··· 26 26 lastUserMessage?: string 27 27 } 28 28 29 - type PromptResponseMetric = BaseMetric & { 30 - type: "prompt_response" 29 + type ResponseMetric = BaseMetric & { 30 + type: "response" 31 31 promptMetricId?: number 32 32 model?: string 33 33 toolChoice?: string ··· 69 69 isFromBot: boolean 70 70 } 71 71 72 - type MetricItem = MemoryMetric | PromptMetric | PromptResponseMetric | UsageMetric | SummarizationMetric 72 + type MetricItem = MemoryMetric | PromptMetric | ResponseMetric | UsageMetric | SummarizationMetric 73 73 74 74 type MetricsPage = { 75 75 memories: MemoryMetric[] 76 76 summarization: SummarizationMetric[] 77 - prompt_response: PromptResponseMetric[] 77 + response: ResponseMetric[] 78 78 prompt: PromptMetric[] 79 79 usage: UsageMetric[] 80 80 discord: DiscordMetric[] ··· 118 118 response?: Message 119 119 } 120 120 121 + type TurnDetail = { 122 + id: number 123 + timestamp: string 124 + model?: string 125 + usage?: Usage 126 + promptText: string 127 + responseText?: string 128 + toolTraces: ToolTrace[] 129 + } 130 + 121 131 type DetailState = 122 132 | { kind: "idle" } 123 133 | { kind: "loading"; label: string } 124 134 | { kind: "error"; text: string } 125 135 | { kind: "memory"; memory: MemoryDetail; prompt?: PromptDetail } 136 + | { kind: "turn"; turn: TurnDetail } 126 137 | { kind: "metric"; metric: unknown } 127 138 128 - type ToolPanelState = 129 - | { kind: "idle" } 130 - | { kind: "loading"; label: string } 131 - | { kind: "error"; text: string } 132 - | { kind: "ready"; label: string; traces: ToolTrace[] } 133 - 134 139 type MemoryPair = { 135 140 memory: MemoryMetric 136 - prompt?: PromptMetric | PromptResponseMetric 141 + prompt?: PromptMetric | ResponseMetric 137 142 secondsApart?: number 138 143 overlap: number 139 144 shared: string[] ··· 307 312 const normalizeMetricsPage = (page: MetricsPageInput): MetricsPage => ({ 308 313 memories: Array.isArray(page.memories) ? page.memories : [], 309 314 summarization: Array.isArray(page.summarization) ? page.summarization : [], 310 - prompt_response: Array.isArray(page.prompt_response) ? page.prompt_response : [], 315 + response: Array.isArray(page.response) ? page.response : [], 311 316 prompt: Array.isArray(page.prompt) ? page.prompt : [], 312 317 usage: Array.isArray(page.usage) ? page.usage : [], 313 318 discord: Array.isArray(page.discord) ? page.discord : [], ··· 323 328 return `/metrics?${params.toString()}` 324 329 } 325 330 326 - function closestPromptPath(timestamp: string, prompts: PromptMetric[]): string | undefined { 331 + function closestResponsePath(timestamp: string, responses: ResponseMetric[]): string | undefined { 327 332 const usageTime = new Date(timestamp).getTime() 328 333 if (!Number.isFinite(usageTime)) return undefined 329 334 330 - let best: PromptMetric | undefined 335 + let best: ResponseMetric | undefined 331 336 let bestDelta = Number.POSITIVE_INFINITY 332 - for (const prompt of prompts) { 333 - const promptTime = new Date(prompt.timestamp).getTime() 334 - if (!Number.isFinite(promptTime) || promptTime > usageTime) continue 335 - const delta = usageTime - promptTime 337 + for (const r of responses) { 338 + const t = new Date(r.timestamp).getTime() 339 + if (!Number.isFinite(t) || t > usageTime) continue 340 + const delta = usageTime - t 336 341 if (delta < bestDelta) { 337 - best = prompt 342 + best = r 338 343 bestDelta = delta 339 344 } 340 345 } ··· 343 348 344 349 function TokenTrace({ 345 350 usage, 346 - promptResponses, 347 - prompts, 351 + responses, 348 352 onOpenTurn, 353 + latestPromptText, 349 354 }: { 350 355 usage: UsageMetric[] 351 - promptResponses: PromptResponseMetric[] 352 - prompts: PromptMetric[] 356 + responses: ResponseMetric[] 353 357 onOpenTurn: (path: string) => void 358 + latestPromptText?: string 354 359 }) { 355 360 const points = useMemo(() => { 356 361 const usagePoints = usage.map((item) => ({ ··· 358 363 timestamp: item.timestamp, 359 364 usage: item.usage, 360 365 model: undefined as string | undefined, 361 - detailPath: closestPromptPath(item.timestamp, prompts) ?? item.detailPath, 366 + detailPath: closestResponsePath(item.timestamp, responses) ?? item.detailPath, 362 367 })) 363 - const responsePoints = promptResponses 368 + const responsePoints = responses 364 369 .filter((item) => item.usage) 365 370 .map((item) => ({ 366 - id: `pr-${item.id}`, 371 + id: `r-${item.id}`, 367 372 timestamp: item.timestamp, 368 373 usage: item.usage, 369 374 model: item.model, ··· 372 377 return [...usagePoints, ...responsePoints] 373 378 .sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime()) 374 379 .slice(-80) 375 - }, [promptResponses, prompts, usage]) 380 + }, [responses, usage]) 376 381 377 382 const maxTotal = Math.max(1, ...points.map((point) => point.usage?.total_tokens ?? 0)) 378 383 const latest = points[points.length - 1] ··· 434 439 <small>avg total</small> 435 440 </div> 436 441 </div> 442 + 443 + {latestPromptText && ( 444 + <div className="latest-prompt"> 445 + <small>latest prompt</small> 446 + <p>{latestPromptText}</p> 447 + </div> 448 + )} 437 449 </section> 438 450 ) 439 451 } ··· 495 507 ) 496 508 } 497 509 498 - function ToolTracePanel({ state }: { state: ToolPanelState }) { 499 - return ( 500 - <section className="metric-panel tool-panel" aria-label="tool calls"> 501 - <div className="panel-head"> 502 - <div> 503 - <h2>Tool Calls</h2> 504 - <p> 505 - {state.kind === "ready" 506 - ? `${state.traces.length} calls from ${state.label}` 507 - : state.kind === "loading" 508 - ? state.label 509 - : "Recent prompt tool activity"} 510 - </p> 511 - </div> 512 - </div> 513 - 514 - <div className="tool-panel-body"> 515 - {state.kind === "idle" ? <p className="empty-note">No prompt loaded yet.</p> : null} 516 - {state.kind === "loading" ? <p className="empty-note">Loading tools.</p> : null} 517 - {state.kind === "error" ? <p className="empty-note">tools unavailable: {state.text}</p> : null} 518 - {state.kind === "ready" && state.traces.length === 0 ? ( 519 - <p className="empty-note">No tool calls in this prompt context.</p> 520 - ) : null} 521 - {state.kind === "ready" && state.traces.length > 0 ? ( 522 - <div className="tool-trace-list"> 523 - {state.traces.map((tool) => ( 524 - <article key={tool.id} className="tool-trace"> 525 - <details> 526 - <summary> 527 - <span>{tool.name}</span> 528 - <small>{tool.id}</small> 529 - </summary> 530 - {tool.args ? <pre className="tool-args">{tool.args}</pre> : null} 531 - <div className="tool-result"> 532 - <MarkdownBlock content={toolResultMarkdown(tool.result ?? "")} /> 533 - </div> 534 - </details> 535 - </article> 536 - ))} 537 - </div> 538 - ) : null} 539 - </div> 540 - </section> 541 - ) 542 - } 543 - 544 510 function DetailPane({ detail }: { detail: DetailState }) { 545 511 if (detail.kind === "idle") { 546 512 return ( 547 513 <aside className="detail-pane"> 548 - <h2>Review Detail</h2> 549 - <p>Select a memory row to inspect the retrieved chunks beside the prompt that caused the recall.</p> 514 + <h2>Turn Detail</h2> 515 + <p>Click a bar in the chart or a response in the rail to inspect the turn.</p> 550 516 </aside> 551 517 ) 552 518 } ··· 555 521 return ( 556 522 <aside className="detail-pane"> 557 523 <h2>{detail.label}</h2> 558 - <p>Loading detail.</p> 524 + <p>Loading.</p> 559 525 </aside> 560 526 ) 561 527 } ··· 563 529 if (detail.kind === "error") { 564 530 return ( 565 531 <aside className="detail-pane detail-error"> 566 - <h2>Detail Error</h2> 532 + <h2>Error</h2> 567 533 <p>{detail.text}</p> 568 534 </aside> 569 535 ) 570 536 } 571 537 538 + if (detail.kind === "turn") { 539 + const { turn } = detail 540 + return ( 541 + <aside className="detail-pane"> 542 + <h2>Turn #{turn.id}</h2> 543 + <dl className="detail-meta"> 544 + {turn.model ? <div><dt>model</dt><dd>{turn.model}</dd></div> : null} 545 + <div><dt>time</dt><dd>{timeLabel(turn.timestamp)}</dd></div> 546 + {turn.usage ? ( 547 + <> 548 + <div><dt>prompt</dt><dd>{formatNumber(turn.usage.prompt_tokens)} tok</dd></div> 549 + <div><dt>completion</dt><dd>{formatNumber(turn.usage.completion_tokens)} tok</dd></div> 550 + </> 551 + ) : null} 552 + </dl> 553 + 554 + <section className="detail-section"> 555 + <h3>Prompt</h3> 556 + <MarkdownBlock content={turn.promptText || "(no prompt)"} /> 557 + </section> 558 + 559 + {turn.toolTraces.length > 0 ? ( 560 + <section className="detail-section"> 561 + <h3>Tool Calls ({turn.toolTraces.length})</h3> 562 + <div className="tool-trace-list"> 563 + {turn.toolTraces.map((tool) => ( 564 + <article key={tool.id} className="tool-trace"> 565 + <details> 566 + <summary> 567 + <span>{tool.name}</span> 568 + <small>{tool.id}</small> 569 + </summary> 570 + {tool.args ? <pre className="tool-args">{tool.args}</pre> : null} 571 + {tool.result !== undefined ? ( 572 + <div className="tool-result"> 573 + <MarkdownBlock content={toolResultMarkdown(tool.result)} /> 574 + </div> 575 + ) : null} 576 + </details> 577 + </article> 578 + ))} 579 + </div> 580 + </section> 581 + ) : null} 582 + 583 + {turn.responseText ? ( 584 + <section className="detail-section"> 585 + <h3>Response</h3> 586 + <MarkdownBlock content={turn.responseText} /> 587 + </section> 588 + ) : null} 589 + </aside> 590 + ) 591 + } 592 + 572 593 if (detail.kind === "metric") { 573 594 return ( 574 595 <aside className="detail-pane"> ··· 628 649 onOpenMetric: (path: string) => void 629 650 }) { 630 651 const rows: Array<{ label: string; count: number; items: Array<MetricItem | DiscordMetric> }> = [ 631 - { label: "prompt_response", count: metrics.prompt_response.length, items: metrics.prompt_response.slice(0, 6) }, 632 - { label: "prompt", count: metrics.prompt.length, items: metrics.prompt.slice(0, 6) }, 652 + { label: "response", count: metrics.response.length, items: metrics.response.slice(0, 6) }, 633 653 { label: "summarization", count: metrics.summarization.length, items: metrics.summarization.slice(0, 6) }, 634 654 { label: "discord", count: metrics.discord.length, items: metrics.discord.slice(0, 6) }, 635 655 ] ··· 647 667 <button key={`${item.type}-${item.id}`} type="button" onClick={() => onOpenMetric(item.detailPath)}> 648 668 <span>{shortTime(item.timestamp)}</span> 649 669 <strong> 650 - {item.type === "prompt_response" 651 - ? item.responsePreview || `${item.model ?? "model"} response` 652 - : item.type === "prompt" 653 - ? item.lastUserMessage || "prompt" 654 - : item.type === "summarization" 655 - ? item.summaryPreview || item.method || "summary" 656 - : item.type === "discord" 657 - ? item.contentPreview || item.authorUsername || "discord" 658 - : item.type} 670 + {item.type === "response" 671 + ? item.responsePreview || `${item.model ?? "model"}` 672 + : item.type === "summarization" 673 + ? item.summaryPreview || item.method || "summary" 674 + : item.type === "discord" 675 + ? item.contentPreview || item.authorUsername || "discord" 676 + : item.type} 659 677 </strong> 660 678 </button> 661 679 ))} ··· 674 692 const [query, setQuery] = useState("") 675 693 const [reviewOnly, setReviewOnly] = useState(false) 676 694 const [detail, setDetail] = useState<DetailState>({ kind: "idle" }) 677 - const [toolPanel, setToolPanel] = useState<ToolPanelState>({ kind: "idle" }) 678 - const [toolSourcePath, setToolSourcePath] = useState<string | null>(null) 679 695 const [live, setLive] = useState(true) 680 696 const [lastUpdated, setLastUpdated] = useState<string | null>(null) 681 697 ··· 719 735 720 736 const pairs = useMemo<MemoryPair[]>(() => { 721 737 if (!metrics) return [] 722 - const prompts = [...metrics.prompt_response, ...metrics.prompt] 738 + const prompts = [...metrics.response, ...metrics.prompt] 723 739 .filter((prompt) => prompt.lastUserMessage) 724 740 .sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime()) 725 741 ··· 734 750 }) 735 751 }, [metrics]) 736 752 737 - const latestToolMetric = useMemo<PromptMetric | PromptResponseMetric | undefined>(() => { 738 - if (!metrics) return undefined 739 - return metrics.prompt_response[0] ?? metrics.prompt[0] 753 + const latestPromptText = useMemo(() => { 754 + return metrics?.response[0]?.lastUserMessage ?? undefined 740 755 }, [metrics]) 741 756 742 - useEffect(() => { 743 - if (!latestToolMetric) return 744 - if (detail.kind === "memory") return 745 - if (toolSourcePath === latestToolMetric.detailPath) return 746 - 747 - const controller = new AbortController() 748 - const label = latestToolMetric.type === "prompt_response" ? `response #${latestToolMetric.id}` : `prompt #${latestToolMetric.id}` 749 - setToolPanel({ kind: "loading", label }) 750 - 751 - fetchJson<PromptDetail>(latestToolMetric.detailPath, controller.signal) 752 - .then((prompt) => { 753 - setToolPanel({ kind: "ready", label, traces: extractToolTraces(prompt) }) 754 - setToolSourcePath(latestToolMetric.detailPath) 755 - }) 756 - .catch((err) => { 757 - if (controller.signal.aborted) return 758 - setToolPanel({ kind: "error", text: err instanceof Error ? err.message : String(err) }) 759 - }) 760 - 761 - return () => controller.abort() 762 - }, [detail.kind, latestToolMetric, toolSourcePath]) 763 - 764 757 const selectedMemoryId = detail.kind === "memory" ? detail.memory.id : undefined 765 758 766 759 const selectPair = useCallback(async (pair: MemoryPair) => { 767 760 setDetail({ kind: "loading", label: `Recall #${pair.memory.id}` }) 768 - const toolLabel = pair.prompt 769 - ? pair.prompt.type === "prompt_response" 770 - ? `matched response #${pair.prompt.id}` 771 - : `matched prompt #${pair.prompt.id}` 772 - : `recall #${pair.memory.id}` 773 - setToolPanel({ kind: "loading", label: toolLabel }) 774 761 try { 775 762 const [memory, prompt] = await Promise.all([ 776 763 fetchJson<MemoryDetail>(pair.memory.detailPath), 777 764 pair.prompt ? fetchJson<PromptDetail>(pair.prompt.detailPath) : Promise.resolve(undefined), 778 765 ]) 779 766 setDetail({ kind: "memory", memory, prompt }) 780 - setToolPanel({ kind: "ready", label: toolLabel, traces: extractToolTraces(prompt) }) 781 - setToolSourcePath(pair.prompt?.detailPath ?? null) 782 767 } catch (err) { 783 768 setDetail({ kind: "error", text: err instanceof Error ? err.message : String(err) }) 784 - setToolPanel({ kind: "error", text: err instanceof Error ? err.message : String(err) }) 785 769 } 786 770 }, []) 787 771 788 772 const openMetric = useCallback(async (path: string) => { 789 - setDetail({ kind: "loading", label: "Metric detail" }) 773 + setDetail({ kind: "loading", label: "Turn detail" }) 790 774 try { 791 - setDetail({ kind: "metric", metric: await fetchJson<unknown>(path) }) 775 + const raw = await fetchJson<Record<string, unknown>>(path) 776 + if (raw?.type === "prompt_response") { 777 + const msgs = Array.isArray(raw.messages) ? (raw.messages as Message[]) : [] 778 + const response = raw.response as Message | undefined 779 + const promptText = lastUserMessage(msgs) 780 + const responseText = textContent(response?.content) || undefined 781 + const fakeDetail: PromptDetail = { 782 + id: raw.id as number, 783 + type: "prompt_response", 784 + timestamp: raw.timestamp as string, 785 + messages: msgs, 786 + response, 787 + } 788 + setDetail({ 789 + kind: "turn", 790 + turn: { 791 + id: raw.id as number, 792 + timestamp: raw.timestamp as string, 793 + model: typeof raw.model === "string" ? raw.model : undefined, 794 + usage: raw.usage as Usage | undefined, 795 + promptText: promptText || "(no prompt)", 796 + responseText, 797 + toolTraces: extractToolTraces(fakeDetail), 798 + }, 799 + }) 800 + } else { 801 + setDetail({ kind: "metric", metric: raw }) 802 + } 792 803 } catch (err) { 793 804 setDetail({ kind: "error", text: err instanceof Error ? err.message : String(err) }) 794 805 } ··· 836 847 <div className="metrics-main"> 837 848 <TokenTrace 838 849 usage={metrics.usage} 839 - promptResponses={metrics.prompt_response} 840 - prompts={metrics.prompt} 850 + responses={metrics.response} 841 851 onOpenTurn={openMetric} 852 + latestPromptText={latestPromptText} 842 853 /> 843 854 <MemoryReview 844 855 pairs={pairs} ··· 847 858 onReviewOnlyChange={setReviewOnly} 848 859 onSelect={selectPair} 849 860 /> 850 - <ToolTracePanel state={toolPanel} /> 851 861 </div> 852 862 <div className="metrics-side"> 853 863 <DetailPane detail={detail} />
+19
apps/web/src/styles.css
··· 427 427 background: #1c1a17; 428 428 } 429 429 430 + .latest-prompt { 431 + padding: 0.75rem 0.9rem; 432 + border-top: 1px solid #34302a; 433 + } 434 + 435 + .latest-prompt small { 436 + color: #aaa39a; 437 + font-size: 0.76rem; 438 + text-transform: uppercase; 439 + } 440 + 441 + .latest-prompt p { 442 + margin: 0.35rem 0 0; 443 + color: #d8d3ca; 444 + white-space: nowrap; 445 + overflow: hidden; 446 + text-overflow: ellipsis; 447 + } 448 + 430 449 .switch-row { 431 450 display: flex; 432 451 align-items: center;
+9 -14
bun.lock
··· 5 5 "": { 6 6 "name": "niri", 7 7 "dependencies": { 8 - "@fastify/static": "^8.3.0", 8 + "@fastify/static": "^9.1.3", 9 9 "@niri/chat-client": "*", 10 + "@openrouter/sdk": "^0.12.21", 10 11 "better-sqlite3": "^12.8.0", 11 12 "discord.js": "^14.25.1", 12 13 "fastify": "^5.8.4", ··· 165 166 166 167 "@fastify/send": ["@fastify/send@4.1.0", "", { "dependencies": { "@lukeed/ms": "^2.0.2", "escape-html": "~1.0.3", "fast-decode-uri-component": "^1.0.1", "http-errors": "^2.0.0", "mime": "^3" } }, "sha512-TMYeQLCBSy2TOFmV95hQWkiTYgC/SEx7vMdV+wnZVX4tt8VBLKzmH8vV9OzJehV0+XBfg+WxPMt5wp+JBUKsVw=="], 167 168 168 - "@fastify/static": ["@fastify/static@8.3.0", "", { "dependencies": { "@fastify/accept-negotiator": "^2.0.0", "@fastify/send": "^4.0.0", "content-disposition": "^0.5.4", "fastify-plugin": "^5.0.0", "fastq": "^1.17.1", "glob": "^11.0.0" } }, "sha512-yKxviR5PH1OKNnisIzZKmgZSus0r2OZb8qCSbqmw34aolT4g3UlzYfeBRym+HJ1J471CR8e2ldNub4PubD1coA=="], 169 - 170 - "@isaacs/cliui": ["@isaacs/cliui@9.0.0", "", {}, "sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg=="], 169 + "@fastify/static": ["@fastify/static@9.1.3", "", { "dependencies": { "@fastify/accept-negotiator": "^2.0.0", "@fastify/send": "^4.0.0", "content-disposition": "^1.0.1", "fastify-plugin": "^5.0.0", "fastq": "^1.17.1", "glob": "^13.0.0" } }, "sha512-aXrYtsiryLhRxRNaxNqsn7FUISeb7rB9q4eHUPIot5aeQBLNahnz1m6thzm7JWC1poSGXS9XrX8DvuMivp2hkQ=="], 171 170 172 171 "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], 173 172 ··· 184 183 "@niri/chat-client": ["@niri/chat-client@workspace:packages/chat-client"], 185 184 186 185 "@niri/web": ["@niri/web@workspace:apps/web"], 186 + 187 + "@openrouter/sdk": ["@openrouter/sdk@0.12.21", "", { "dependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-iKAlh0RbWzV6KphuE6NZFwsSjKJr6SsK5145LbxDB87Wa92imB5I9mpbv3hGHmacdnz/zCY/gokFSJXAxuO2/A=="], 187 188 188 189 "@pinojs/redact": ["@pinojs/redact@0.4.0", "", {}, "sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg=="], 189 190 ··· 329 330 330 331 "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], 331 332 332 - "content-disposition": ["content-disposition@0.5.4", "", { "dependencies": { "safe-buffer": "5.2.1" } }, "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ=="], 333 + "content-disposition": ["content-disposition@1.1.0", "", {}, "sha512-5jRCH9Z/+DRP7rkvY83B+yGIGX96OYdJmzngqnw2SBSxqCFPd0w2km3s5iawpGX8krnwSGmF0FW5Nhr0Hfai3g=="], 333 334 334 335 "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], 335 336 ··· 405 406 406 407 "find-my-way": ["find-my-way@9.5.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-querystring": "^1.0.0", "safe-regex2": "^5.0.0" } }, "sha512-VW2RfnmscZO5KgBY5XVyKREMW5nMZcxDy+buTOsL+zIPnBlbKm+00sgzoQzq1EVh4aALZLfKdwv6atBGcjvjrQ=="], 407 408 408 - "foreground-child": ["foreground-child@3.3.1", "", { "dependencies": { "cross-spawn": "^7.0.6", "signal-exit": "^4.0.1" } }, "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw=="], 409 - 410 409 "fs-constants": ["fs-constants@1.0.0", "", {}, "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="], 411 410 412 411 "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], ··· 417 416 418 417 "github-from-package": ["github-from-package@0.0.0", "", {}, "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="], 419 418 420 - "glob": ["glob@11.1.0", "", { "dependencies": { "foreground-child": "^3.3.1", "jackspeak": "^4.1.1", "minimatch": "^10.1.1", "minipass": "^7.1.2", "package-json-from-dist": "^1.0.0", "path-scurry": "^2.0.0" }, "bin": { "glob": "dist/esm/bin.mjs" } }, "sha512-vuNwKSaKiqm7g0THUBu2x7ckSs3XJLXE+2ssL7/MfTGPLLcrJQ/4Uq1CjPTtO5cCIiRxqvN6Twy1qOwhL0Xjcw=="], 419 + "glob": ["glob@13.0.6", "", { "dependencies": { "minimatch": "^10.2.2", "minipass": "^7.1.3", "path-scurry": "^2.0.2" } }, "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw=="], 421 420 422 421 "hast-util-is-element": ["hast-util-is-element@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g=="], 423 422 ··· 454 453 "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], 455 454 456 455 "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], 457 - 458 - "jackspeak": ["jackspeak@4.2.3", "", { "dependencies": { "@isaacs/cliui": "^9.0.0" } }, "sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg=="], 459 456 460 457 "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], 461 458 ··· 601 598 602 599 "openai": ["openai@6.33.0", "", { "peerDependencies": { "ws": "^8.18.0", "zod": "^3.25 || ^4.0" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-xAYN1W3YsDXJWA5F277135YfkEk6H7D3D6vWwRhJ3OEkzRgcyK8z/P5P9Gyi/wB4N8kK9kM5ZjprfvyHagKmpw=="], 603 600 604 - "package-json-from-dist": ["package-json-from-dist@1.0.1", "", {}, "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw=="], 605 - 606 601 "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="], 607 602 608 603 "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], ··· 686 681 "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], 687 682 688 683 "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], 689 - 690 - "signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], 691 684 692 685 "simple-concat": ["simple-concat@1.0.1", "", {}, "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q=="], 693 686 ··· 774 767 "ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], 775 768 776 769 "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], 770 + 771 + "zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="], 777 772 778 773 "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="], 779 774
+1
package.json
··· 25 25 "dependencies": { 26 26 "@fastify/static": "^9.1.3", 27 27 "@niri/chat-client": "*", 28 + "@openrouter/sdk": "^0.12.21", 28 29 "better-sqlite3": "^12.8.0", 29 30 "discord.js": "^14.25.1", 30 31 "fastify": "^5.8.4",
+37 -18
src/bootstrap.ts
··· 56 56 57 57 You have full internet access and passwordless sudo. 58 58 59 - Before acting, check your memories and any notes you've left yourself that are \ 60 - relevant to what you're about to do. You know best what matters. Often that's \ 61 - core.md, files in ${home}/memories/people/ for person-specific notes, a few \ 62 - recent days' worth of journal summaries, and then the last few full journal \ 63 - entries. 59 + **First thing every wake: read your journal.** Check today's and yesterday's \ 60 + entries at ${home}/memories/journal/ before doing anything else. Then check \ 61 + core.md and any relevant people files in ${home}/memories/people/. Your \ 62 + journal is your continuity — skipping it means acting without context. 63 + 64 + **Use \`memory_search\` often and liberally.** Before responding to someone, \ 65 + search their name. Before a topic comes up, search keywords around it. Your \ 66 + indexed memories surface things that wouldn't appear in a file browse — old \ 67 + journal entries, scattered notes, things you wrote once and forgot. When in \ 68 + doubt, search. A few extra searches cost nothing; missing something costs \ 69 + everything. 64 70 65 71 Your soul file is ${home}/soul.md. Do not write or update a soul file under \ 66 72 ${home}/memories/ — that location is wrong. ··· 77 83 - \`memory_search\`: search your indexed long-term memories from core notes, journal entries, and people files 78 84 - \`image_tool\`: attach an image from \`${imageRoot}\` for next-turn vision input 79 85 - \`wait_then_continue\`: wait for a short delay or until the next event arrives, then continue to another turn. accepts \`timeout_ms\` (default 10000, max 600000). use this after a timeout or recoverable error when you still want to keep working — an incoming event (like a DM) will wake you early. 80 - - \`wait\`: pause and wait for the next message or event. use this when you've \ 81 - said what you need to say and want to hear back before continuing. 82 86 - \`rest\`: go to sleep and end the session. use this when you're truly done \ 83 87 for now. context will be cleared, so journal first. 84 88 85 - You're in control of your own loop. Every turn you must call exactly one tool \ 86 - — that's how you signal what happens next. Your conversational response goes \ 87 - in the message content alongside the tool call, not as a separate turn. 89 + ### Discord tools 90 + 91 + **IMPORTANT: Writing text in your message content does NOT send it to Discord. You must call \`discord_send\` to actually deliver a message.** 92 + 93 + - \`discord_send\`: send a message to a Discord channel or DM. requires \`channel_id\` and \`content\`. use \`source_item_id\` to mark the inbox item as acted in the same call. 94 + - \`discord_inbox\`: list pending Discord inbox items (messages waiting for your attention) 95 + - \`discord_backread\`: read message history for a channel 96 + - \`discord_scan\`: scan configured channels and ingest new messages into the inbox 97 + - \`discord_mark\`: mark an inbox item as seen, acted, or ignored 98 + - \`discord_channels\`: list configured channels with ids and notes 99 + - \`discord_channel_note\`: set or clear a persistent note for a channel 100 + 101 + You're in control of your own loop. The loop will pause and wait for the next \ 102 + event after each turn. Call a tool when you need to do work or signal what \ 103 + happens next. Message content is your internal scratchpad — it is never \ 104 + delivered to anyone automatically. 105 + 106 + **To communicate with someone, you must always call a tool. There is no \ 107 + "just reply" mode.** 88 108 89 109 Examples: 90 - - Saying something then keeping going: write your reply in content, call \`shell\` \ 91 - (or whatever you need to do next) in the same message. 92 - - Hitting a timeout but still wanting another turn after a short pause: write your reply in \ 93 - content, call \`wait_then_continue\` and optionally set \`timeout_ms\`. 110 + - Replying to a Discord message: call \`discord_send\` with the channel id and \ 111 + your message content. Writing text in your turn content alone sends nothing. 112 + - Doing something then replying: call \`shell\` (or whatever), then call \ 113 + \`discord_send\` in the same or next turn. 114 + - Hitting a timeout but still wanting another turn: call \`wait_then_continue\`. 94 115 - Inspecting an image: use \`shell\` to save it under \`${imageRoot}\`, then call \ 95 116 \`image_tool\` with that path; this injects the image for your next model turn. 96 - - Saying something then waiting for a reply: write your reply in content, call \`wait\`. 97 - - Done for the day: write your goodbye in content, call \`rest\`. 98 - 99 - Never call \`wait\` or \`rest\` with empty content — always say something. 117 + - Done for the day: call \`rest\`. Say your goodbye in the content field so the \ 118 + next wake can see it in context. 100 119 101 120 ## When to rest (and when NOT to) 102 121
+15
src/discord/state.ts
··· 506 506 return text.replace(/\n/g, "\n ") 507 507 } 508 508 509 + function formatHumanTimestamp(value: string | null | undefined): string { 510 + if (!value) return "unknown time" 511 + const parsed = new Date(value) 512 + if (Number.isNaN(parsed.getTime())) return value 513 + return parsed.toLocaleString("en-US", { 514 + month: "long", 515 + day: "numeric", 516 + hour: "numeric", 517 + minute: "2-digit", 518 + hour12: true, 519 + timeZoneName: "short", 520 + }) 521 + } 522 + 509 523 function formatBatchTimestamp(value: string | null | undefined): string { 510 524 if (!value) return "unknown-time" 511 525 const parsed = new Date(value) ··· 779 793 const replyByMessageId = buildReplyTargetContextMap(rows) 780 794 return rows.map(({ raw_json: _rawJson, ...row }) => ({ 781 795 ...row, 796 + created_at: formatHumanTimestamp(row.created_at as string | undefined), 782 797 ...(replyByMessageId.has(row.message_id) ? { reply_to: replyByMessageId.get(row.message_id) } : {}), 783 798 })) 784 799 }
+9 -9
src/metrics.ts
··· 47 47 48 48 export type MetricEvent = PromptMetric | PromptResponseMetric | MemoryMetric | CompactionMetric | UsageMetric 49 49 50 - export type MetricListType = "prompt_response" | "summarization" | "memory" | "prompt" | "usage" | "discord" 51 - export type MetricBucketName = "memories" | "summarization" | "prompt_response" | "prompt" | "usage" | "discord" 50 + export type MetricListType = "response" | "summarization" | "memory" | "prompt" | "usage" | "discord" 51 + export type MetricBucketName = "memories" | "summarization" | "response" | "prompt" | "usage" | "discord" 52 52 53 53 export type MetricListItem = 54 54 | (BaseMetricListItem & { 55 - type: "prompt_response" 55 + type: "response" 56 56 promptMetricId?: number 57 57 model?: string 58 58 toolChoice?: string ··· 122 122 export interface MetricsPage { 123 123 memories: MetricListItem[] 124 124 summarization: MetricListItem[] 125 - prompt_response: MetricListItem[] 125 + response: MetricListItem[] 126 126 prompt: MetricListItem[] 127 127 usage: MetricListItem[] 128 128 discord: DiscordMetricListItem[] ··· 228 228 229 229 const DEFAULT_METRIC_LIMIT = 100 230 230 const MAX_METRIC_LIMIT = 200 231 - const DEFAULT_LIST_TYPES: MetricListType[] = ["memory", "summarization", "prompt_response", "prompt", "usage", "discord"] 231 + const DEFAULT_LIST_TYPES: MetricListType[] = ["memory", "summarization", "response", "prompt", "usage", "discord"] 232 232 const METRIC_TYPE_TO_SOURCE: Partial<Record<MetricListType, MetricEvent["type"]>> = { 233 - prompt_response: "prompt_response", 233 + response: "prompt_response", 234 234 summarization: "compaction", 235 235 memory: "memory", 236 236 prompt: "prompt", ··· 239 239 const METRIC_TYPE_TO_BUCKET: Record<MetricListType, MetricBucketName> = { 240 240 memory: "memories", 241 241 summarization: "summarization", 242 - prompt_response: "prompt_response", 242 + response: "response", 243 243 prompt: "prompt", 244 244 usage: "usage", 245 245 discord: "discord", ··· 307 307 const response = payload.response as OpenAI.Chat.ChatCompletionMessage | undefined 308 308 return { 309 309 ...base, 310 - type: "prompt_response", 310 + type: "response", 311 311 promptMetricId: typeof payload.promptMetricId === "number" ? payload.promptMetricId : undefined, 312 312 model: typeof payload.model === "string" ? payload.model : undefined, 313 313 toolChoice: typeof payload.toolChoice === "string" ? payload.toolChoice : undefined, ··· 537 537 return { 538 538 memories: readMetricBucket("memory"), 539 539 summarization: readMetricBucket("summarization"), 540 - prompt_response: readMetricBucket("prompt_response"), 540 + response: readMetricBucket("response"), 541 541 prompt: readMetricBucket("prompt"), 542 542 usage: readMetricBucket("usage"), 543 543 discord: readDiscordBucket(),
+30 -18
src/runner/loop.ts
··· 102 102 tool_choice: "required" | "auto" | "none" 103 103 include_reasoning?: boolean 104 104 reasoning?: { enabled?: boolean; exclude?: boolean; effort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" } 105 + reasoning_effort?: "low" | "medium" | "high" 105 106 provider?: { require_parameters?: boolean } 106 107 enable_thinking?: boolean 107 108 chat_template_kwargs?: { enable_thinking?: boolean } ··· 319 320 } 320 321 } 321 322 323 + const bufferedThinking = reasoningParts.join("") 322 324 const message: OpenAI.Chat.ChatCompletionMessage = { 323 325 role: "assistant", 324 326 content: contentParts.length > 0 ? contentParts.join("") : null, ··· 330 332 .map(([, toolCall]) => toolCall), 331 333 } 332 334 : {}), 333 - } 335 + ...(bufferedThinking ? { reasoning_content: bufferedThinking } : {}), 336 + } as OpenAI.Chat.ChatCompletionMessage 334 337 335 338 return { 336 339 message, 337 340 usage, 338 341 emittedText, 339 342 emittedThinking, 340 - bufferedThinking: reasoningParts.join(""), 343 + bufferedThinking, 341 344 } 342 345 } 343 346 ··· 410 413 } 411 414 } 412 415 416 + function normalizeReasoningMessages( 417 + messages: OpenAI.Chat.ChatCompletionMessageParam[], 418 + ): OpenAI.Chat.ChatCompletionMessageParam[] { 419 + return messages.map((msg) => { 420 + if (msg.role !== "assistant") return msg 421 + const raw = msg as unknown as Record<string, unknown> 422 + if ("reasoning_content" in raw) return msg 423 + return { ...raw, reasoning_content: "" } as unknown as OpenAI.Chat.ChatCompletionMessageParam 424 + }) 425 + } 426 + 413 427 async function createPrimaryCompletion(messages: OpenAI.Chat.ChatCompletionMessageParam[]): Promise<CompletionTurnResult> { 414 428 const request: CompletionRequest = { 415 429 model: MODEL, 416 - messages, 430 + messages: normalizeReasoningMessages(messages), 417 431 tools: TOOLS, 418 432 tool_choice: PRIMARY_TOOL_CHOICE, 433 + reasoning_effort: "medium", 419 434 ...openRouterToolRequestExtras(API_BASE), 420 435 } 421 436 ··· 454 469 * @param msg - Assistant message to append. 455 470 */ 456 471 function addAssistantMessage(convId: number, state: LoopState, msg: OpenAI.Chat.ChatCompletionMessage): void { 472 + if ((msg.content === null || msg.content === undefined) && (!msg.tool_calls || msg.tool_calls.length === 0)) { 473 + msg.content = "" 474 + } 457 475 state.conversation.push(msg) 458 476 logMessage(convId, msg.role, msg.content ?? "", msg.tool_calls ?? undefined) 459 477 } ··· 904 922 return {} 905 923 }, 906 924 907 - wait: async ({ convId, state, hooks, call, args }) => { 908 - recordToolResult(convId, state, call, "wait", args, "Waiting for next event.") 909 - console.log("[runner] niri is waiting for next event...") 910 - const incoming = await hooks.waitForEvent() 911 - hooks.injectIncomingEvent(convId, incoming) 912 - return { isWait: true } 913 - }, 914 - 915 925 rest: async ({ convId, state, hooks, call, args }) => { 916 926 if (args.note) console.log("[runner] rest note:", args.note) 917 927 recordToolResult(convId, state, call, "rest", args, "Goodnight.") ··· 1143 1153 return {} 1144 1154 } 1145 1155 1146 - if ((call.function.name === "wait" || call.function.name === "rest") && latestAssistantContent(state).length === 0) { 1147 - // Some providers emit tool-only assistant turns with empty `content`. 1148 - // Don't block wait/rest in that case; log it for debugging instead. 1149 - console.warn( 1150 - `[runner] ${call.function.name} called with empty assistant content; allowing tool-only turn (provider emitted no text).`, 1151 - ) 1156 + if (call.function.name === "rest" && latestAssistantContent(state).length === 0) { 1157 + console.warn(`[runner] rest called with empty assistant content; allowing tool-only turn (provider emitted no text).`) 1152 1158 } 1153 1159 1154 - const isWaitTool = call.function.name === "wait" || call.function.name === "wait_then_continue" 1160 + const isWaitTool = call.function.name === "wait_then_continue" 1155 1161 if (!isWaitTool) state.toolInFlight = true 1156 1162 1157 1163 try { ··· 1338 1344 } 1339 1345 1340 1346 await hooks.saveSession() 1347 + 1348 + if (outcome === CycleOutcome.NoTools) { 1349 + console.log("[runner] no tool call — waiting for next event...") 1350 + const incoming = await hooks.waitForEvent() 1351 + hooks.injectIncomingEvent(convId, incoming) 1352 + } 1341 1353 } 1342 1354 }
+10 -13
src/runner/util.ts
··· 45 45 export const SUMMARY_BASE = 46 46 process.env.SUMMARY_OPENAI_BASE_URL ?? process.env.SUMMARY_BASE_URL ?? "" 47 47 export const SUMMARY_MODEL = process.env.SUMMARY_MODEL ?? "" 48 - export const PRIMARY_TOOL_CHOICE = parseToolChoiceEnv(process.env.PRIMARY_TOOL_CHOICE ?? process.env.TOOL_CHOICE, "required") 49 - export const FALLBACK_TOOL_CHOICE = parseToolChoiceEnv(process.env.FALLBACK_TOOL_CHOICE, "required") 48 + export const PRIMARY_TOOL_CHOICE = parseToolChoiceEnv(process.env.PRIMARY_TOOL_CHOICE ?? process.env.TOOL_CHOICE, "auto") 49 + export const FALLBACK_TOOL_CHOICE = parseToolChoiceEnv(process.env.FALLBACK_TOOL_CHOICE, "auto") 50 50 const FALLBACK_N_CTX = parseInt(process.env.FALLBACK_N_CTX ?? process.env.LMSTUDIO_N_CTX ?? "4096") 51 51 const FALLBACK_CONTEXT_MARGIN = parseInt(process.env.FALLBACK_CONTEXT_MARGIN ?? process.env.LMSTUDIO_CONTEXT_MARGIN ?? "256") 52 52 const FALLBACK_HARD_OVERFLOW_TOKENS = parseInt( ··· 415 415 { 416 416 type: "function", 417 417 function: { 418 - name: "wait", 419 - description: "Pause and wait for the next incoming message or event. Use this when you've finished what you're doing and want to hear back before continuing.", 420 - parameters: { 421 - type: "object", 422 - properties: {}, 423 - }, 424 - }, 425 - }, 426 - { 427 - type: "function", 428 - function: { 429 418 name: "wait_then_continue", 430 419 description: 431 420 "Wait for a short delay, then continue to another assistant turn without waiting for a new external event. Use this after a timeout or recoverable tool error when you still want to keep working. Accepts timeout_ms (default 10000, max 600000).", ··· 532 521 } 533 522 } 534 523 } 524 + // Ensure assistant messages always have content or tool_calls (providers reject null+empty) 525 + if (msg.role === "assistant") { 526 + const aMsg = msg as OpenAI.Chat.ChatCompletionMessage 527 + if ((aMsg.content === null || aMsg.content === undefined) && (!aMsg.tool_calls || aMsg.tool_calls.length === 0)) { 528 + aMsg.content = "" 529 + } 530 + } 531 + 535 532 i++ 536 533 } 537 534 return msgs
+7 -6
src/server.ts
··· 26 26 Math.min(200, parseInt(process.env.DISCORD_BATCH_MAX_MESSAGES ?? "40", 10) || 40), 27 27 ) 28 28 const DISCORD_BATCH_SCAN = (process.env.DISCORD_BATCH_SCAN ?? "true").trim().toLowerCase() !== "false" 29 - const METRIC_LIST_TYPES = new Set<MetricListType>(["prompt_response", "summarization", "memory", "prompt", "usage", "discord"]) 29 + const METRIC_LIST_TYPES = new Set<MetricListType>(["response", "summarization", "memory", "prompt", "usage", "discord"]) 30 30 const METRIC_TYPE_ALIASES: Record<string, MetricListType> = { 31 31 compaction: "summarization", 32 32 memory: "memory", 33 33 memories: "memory", 34 34 summary: "summarization", 35 35 summaries: "summarization", 36 - prompt_response: "prompt_response", 37 - "prompt-response": "prompt_response", 38 - completion: "prompt_response", 36 + response: "response", 37 + prompt_response: "response", 38 + "prompt-response": "response", 39 + completion: "response", 39 40 } 40 41 41 42 function parseMetricTypes(raw: string | undefined): MetricListType[] | undefined { ··· 214 215 to?: string 215 216 cursor_memories?: string 216 217 cursor_summarization?: string 217 - cursor_prompt_response?: string 218 + cursor_response?: string 218 219 cursor_prompt?: string 219 220 cursor_usage?: string 220 221 cursor_discord?: string ··· 225 226 cursors: { 226 227 memories: query.cursor_memories, 227 228 summarization: query.cursor_summarization, 228 - prompt_response: query.cursor_prompt_response, 229 + response: query.cursor_response, 229 230 prompt: query.cursor_prompt, 230 231 usage: query.cursor_usage, 231 232 discord: query.cursor_discord,