this repo has no description
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(detect): add Haiku 4.5 based overwhelm and brain dump detection (#3)

* feat(detect): add Haiku 4.5 based overwhelm and brain dump detection

Replace crude regex-only detection with Claude Haiku 4.5 classification:

- Quick regex pre-filter triggers Haiku only when patterns detected
- Haiku classifies: overwhelm, brain_dump, self_bullying, urgency
- Brain dumps are parsed and saved to DB in parallel
- Detection context prepended to message for Opus to respond appropriately

Flow: regex trigger -> Haiku classify -> (if brain_dump) Haiku parse + save

* docs: update README and CLAUDE.md for Haiku detection

- Add Haiku 4.5 detection to architecture diagram
- Add HAIKU_MODEL to environment variables
- Add detect.ts and wins.ts to project structure
- Mark M3 (detection) and M4 (tiny wins) as complete
- Document dual-model setup (Opus for agent, Haiku for detection)

* fix(detect): harden Haiku parsing

* fix(detect): preserve flags and avoid userId=0

* fix(detect): make parsed item saves atomic

---------

Co-authored-by: Claude <noreply@anthropic.com>

authored by

Alice
Claude
and committed by
GitHub
103ac23a b5256fcc

+646 -9
+18 -3
CLAUDE.md
··· 108 108 109 109 --- 110 110 111 - ## Claude Model 111 + ## Claude Models 112 112 113 - **Always use Claude Opus 4.5** for this project: 113 + This project uses **two Claude models**: 114 + 115 + ### Opus 4.5 (Main Agent) 114 116 - Model ID: `claude-opus-4-5-20251101` 115 117 - Letta handle: `openai/claude-opus-4-5-20251101` (via LiteLLM proxy) 118 + - Used for: Main conversational agent, tool execution, user interactions 116 119 117 - Do NOT use other Claude models (sonnet, haiku, etc.) unless explicitly requested. 120 + ### Haiku 4.5 (Detection) 121 + - Model ID: `claude-haiku-4-5-20251001` 122 + - Config: `HAIKU_MODEL` environment variable 123 + - Used for: Fast classification of user messages (overwhelm, brain dump, self-bullying) 124 + - Called via LiteLLM at `LITELLM_URL` 125 + 126 + The detection flow in `src/detect.ts`: 127 + 1. Regex pre-filter triggers Haiku only when patterns detected 128 + 2. Haiku classifies: `overwhelm`, `brainDump`, `selfBullying`, `urgency` 129 + 3. If brain dump detected, Haiku parses and saves tasks/ideas to DB 130 + 4. Detection context prepended to message for Opus to respond appropriately 131 + 132 + Do NOT use Sonnet unless explicitly requested. 118 133 119 134 ### Letta Agent Creation Workaround 120 135
+10 -3
README.md
··· 6 6 7 7 ``` 8 8 Telegram Bot (Bun) 9 - 9 + 10 + ├──► Haiku 4.5 Detection ──► LiteLLM ──► Anthropic API 11 + │ (overwhelm, brain dump, self-bullying) 12 + 13 + 10 14 Letta (port 8283) - AI agent framework 11 15 ↓ OpenAI-compatible API 12 16 LiteLLM (port 4000) - API translation layer ··· 17 21 ``` 18 22 19 23 - **Bun**: Runtime and HTTP server for Telegram bot 20 - - **Letta**: AI agent framework with persistent memory 24 + - **Haiku 4.5 Detection**: Fast classification of user messages for overwhelm, brain dumps, and self-bullying 25 + - **Letta**: AI agent framework with persistent memory (uses Opus 4.5) 21 26 - **LiteLLM**: Translates OpenAI-compatible requests to Anthropic format 22 27 - **anthropic-proxy**: OAuth proxy for Anthropic API access 23 28 - **SQLite**: Local storage for items, wins, and context ··· 118 123 | `TELEGRAM_WEBHOOK_SECRET_TOKEN` | (empty) | Webhook verification secret | 119 124 | `ANTHROPIC_PROXY_SESSION_ID` | (empty) | Filled after OAuth flow | 120 125 | `DB_PATH` | `./data/assistant.db` | SQLite database path | 126 + | `HAIKU_MODEL` | `claude-haiku-4-5-20251001` | Model for fast detection/classification | 121 127 122 128 ## Telegram Bot Setup 123 129 ··· 196 202 │ ├── index.ts # Main server entry point 197 203 │ ├── bot.ts # Telegram bot handlers 198 204 │ ├── config.ts # Environment configuration 205 + │ ├── detect.ts # Haiku-based overwhelm/brain dump detection 199 206 │ ├── health.ts # Health check endpoints 200 207 │ ├── letta.ts # Letta client bootstrap 201 208 │ ├── db/ ··· 250 257 - [x] **M0**: Infrastructure (Docker, config, health, Letta client) 251 258 - [x] **M1**: E2E Chat (Telegram bot, basic message flow) 252 259 - [x] **M2**: Tools + Items (database, capture, breakdown) 253 - - [ ] **M3**: Tone + Detection (overwhelm, self-bullying) 260 + - [x] **M3**: Tone + Detection (Haiku 4.5 for overwhelm, brain dump, self-bullying) 254 261 - [x] **M4**: Tiny Wins (win tracking, daily breakdown, delete) 255 262 - [ ] **M5**: Threading (focus, deviations) 256 263 - [ ] **M6**: Hardening (idempotency, retries, tests)
+12 -1
litellm-config.yaml
··· 1 1 model_list: 2 - # Claude Opus 4.5 - the only model for this project 2 + # Claude Opus 4.5 - main conversational agent 3 3 # Connects directly to anthropic-proxy with custom headers 4 4 # Note: os.environ/ works in extra_headers per LiteLLM docs ("ANY value") 5 5 # and source code (_check_for_os_environ_vars recursively processes dicts) 6 6 - model_name: claude-opus-4-5-20251101 7 7 litellm_params: 8 8 model: anthropic/claude-opus-4-5-20251101 9 + api_base: http://anthropic-proxy:4001 10 + api_key: "unused" 11 + extra_headers: 12 + x-api-key: os.environ/ANTHROPIC_PROXY_SESSION_ID 13 + Accept-Encoding: identity 14 + 15 + # Claude Haiku 4.5 - fast detection/classification 16 + # Used for overwhelm, brain dump, and self-bullying detection 17 + - model_name: claude-haiku-4-5-20251001 18 + litellm_params: 19 + model: anthropic/claude-haiku-4-5-20251001 9 20 api_base: http://anthropic-proxy:4001 10 21 api_key: "unused" 11 22 extra_headers:
+13 -2
src/bot.ts
··· 11 11 import { Telegraf, type Context } from 'telegraf'; 12 12 import type { Update } from 'telegraf/types'; 13 13 import { config } from './config'; 14 + import { detectAndParse, formatDetectionContext } from './detect'; 14 15 import { getLettaClient, getRegisteredToolIds } from './letta'; 15 16 16 17 /** ··· 366 367 return; 367 368 } 368 369 370 + // Get user ID for detection (needed to save parsed items) 371 + const userId = ctx.from?.id ?? null; 372 + 369 373 try { 370 374 // Show typing indicator while processing 371 375 await ctx.sendChatAction('typing'); 372 376 377 + // Run Haiku-based detection for overwhelm, brain dumps, self-bullying 378 + const detection = await detectAndParse(messageText, userId); 379 + 380 + // Format detection context to prepend to message for Opus 381 + const detectionContext = formatDetectionContext(detection); 382 + 373 383 // Get or create the single agent 374 384 const currentAgentId = await getOrCreateAgent(); 375 385 376 - // Send message to agent and get response 377 - const response = await sendMessageToAgent(currentAgentId, messageText); 386 + // Send message to agent with detection context 387 + const messageForAgent = detectionContext + messageText; 388 + const response = await sendMessageToAgent(currentAgentId, messageForAgent); 378 389 379 390 // Reply to user with Markdown formatting (fallback to plain text if parsing fails) 380 391 try {
+3
src/config.ts
··· 65 65 // === LiteLLM === 66 66 LITELLM_URL: optionalEnv('LITELLM_URL', 'http://localhost:4000'), 67 67 68 + // === Models === 69 + HAIKU_MODEL: optionalEnv('HAIKU_MODEL', 'claude-haiku-4-5-20251001'), 70 + 68 71 // === OpenAI (embeddings only) === 69 72 OPENAI_API_KEY: requireEnv('OPENAI_API_KEY'), 70 73
+49
src/detect.test.ts
··· 1 + import { expect, test } from 'bun:test'; 2 + 3 + import { formatDetectionContext, type DetectionResult } from './detect'; 4 + 5 + test('formatDetectionContext returns empty string when not triggered', () => { 6 + const result: DetectionResult = { 7 + triggered: false, 8 + overwhelm: false, 9 + brainDump: false, 10 + selfBullying: false, 11 + urgency: 'low', 12 + }; 13 + 14 + expect(formatDetectionContext(result)).toBe(''); 15 + }); 16 + 17 + test('formatDetectionContext returns empty string when triggered but no flags or parsed data', () => { 18 + const result: DetectionResult = { 19 + triggered: true, 20 + overwhelm: false, 21 + brainDump: false, 22 + selfBullying: false, 23 + urgency: 'low', 24 + }; 25 + 26 + expect(formatDetectionContext(result)).toBe(''); 27 + }); 28 + 29 + test('formatDetectionContext includes flags and parsed tasks', () => { 30 + const result: DetectionResult = { 31 + triggered: true, 32 + overwhelm: false, 33 + brainDump: true, 34 + selfBullying: false, 35 + urgency: 'medium', 36 + parsed: { 37 + tasks: [{ content: 'Call the dentist', priority: 2 }], 38 + ideas: [{ content: 'Try a standing desk' }], 39 + saved: true, 40 + savedTaskIds: ['task-1'], 41 + savedIdeaIds: ['idea-1'], 42 + }, 43 + }; 44 + 45 + const context = formatDetectionContext(result); 46 + expect(context).toContain('[DETECTED: brain_dump=true, urgency=medium]'); 47 + expect(context).toContain('[PARSED & SAVED: 1 tasks, 1 ideas]'); 48 + expect(context).toContain('- Call the dentist'); 49 + });
+541
src/detect.ts
··· 1 + /** 2 + * Haiku-based message detection for overwhelm, brain dumps, and self-bullying 3 + * 4 + * Flow: 5 + * 1. Quick regex pre-filter to decide if Haiku classification is needed 6 + * 2. If triggered, call Haiku 4.5 for comprehensive classification 7 + * 3. If brain dump detected, parse in parallel and save to DB 8 + * 4. Return results for Opus to respond with full context 9 + */ 10 + 11 + import { config } from './config'; 12 + import { db, schema } from './db'; 13 + 14 + function isUnknownArray(value: unknown): value is unknown[] { 15 + return Array.isArray(value); 16 + } 17 + 18 + function extractFirstChoiceContent(data: unknown): string | null { 19 + if (typeof data !== 'object' || data === null) { 20 + return null; 21 + } 22 + 23 + const maybeChoices = (data as { choices?: unknown }).choices; 24 + if (!isUnknownArray(maybeChoices) || maybeChoices.length === 0) { 25 + return null; 26 + } 27 + 28 + const firstChoice = maybeChoices[0]; 29 + if (typeof firstChoice !== 'object' || firstChoice === null) { 30 + return null; 31 + } 32 + 33 + const maybeMessage = (firstChoice as { message?: unknown }).message; 34 + if (typeof maybeMessage !== 'object' || maybeMessage === null) { 35 + return null; 36 + } 37 + 38 + const maybeContent = (maybeMessage as { content?: unknown }).content; 39 + return typeof maybeContent === 'string' ? maybeContent : null; 40 + } 41 + 42 + function normalizeContent(value: unknown): string | null { 43 + if (typeof value !== 'string') { 44 + return null; 45 + } 46 + 47 + const trimmed = value.trim(); 48 + return trimmed === '' ? null : trimmed; 49 + } 50 + 51 + function normalizePriority(value: unknown): number { 52 + if (typeof value !== 'number' || !Number.isFinite(value)) { 53 + return 2; 54 + } 55 + 56 + // Haiku might return floats; normalize to an int in [0, 4] 57 + const rounded = Math.round(value); 58 + return Math.min(4, Math.max(0, rounded)); 59 + } 60 + 61 + /** 62 + * Classification result from Haiku 63 + */ 64 + export interface DetectionResult { 65 + /** Whether detection was triggered and run */ 66 + triggered: boolean; 67 + /** User is feeling overwhelmed/stuck */ 68 + overwhelm: boolean; 69 + /** Stream of consciousness needing structure */ 70 + brainDump: boolean; 71 + /** User being hard on themselves */ 72 + selfBullying: boolean; 73 + /** Urgency level */ 74 + urgency: 'low' | 'medium' | 'high'; 75 + /** If brain dump was parsed, the extracted items */ 76 + parsed?: { 77 + tasks: { content: string; priority: number }[]; 78 + ideas: { content: string }[]; 79 + saved: boolean; 80 + savedTaskIds: string[]; 81 + savedIdeaIds: string[]; 82 + }; 83 + /** Raw classification reasoning (for debugging) */ 84 + reasoning?: string; 85 + } 86 + 87 + /** 88 + * Regex patterns that trigger Haiku classification 89 + * 90 + * These are intentionally broad - Haiku does the nuanced analysis 91 + */ 92 + const TRIGGER_PATTERNS = [ 93 + // Overwhelm signals 94 + /overwhelm/i, 95 + /too much/i, 96 + /can'?t (cope|handle|deal|do this)/i, 97 + /stressed/i, 98 + /stuck/i, 99 + /everything is/i, 100 + /drowning/i, 101 + /falling behind/i, 102 + 103 + // Brain dump signals 104 + /brain\s*dump/i, 105 + /dump/i, 106 + /everything.*(head|mind)/i, 107 + /get.*(out|down).*(head|mind)/i, 108 + /list of/i, 109 + /bunch of/i, 110 + /need to.*need to.*need to/i, // Multiple "need to" in one message 111 + 112 + // Self-bullying signals 113 + /i('m| am) (so )?(lazy|useless|stupid|pathetic|worthless|terrible)/i, 114 + /what('s| is) wrong with me/i, 115 + /why (can'?t|don'?t) i/i, 116 + /i (always|never) /i, 117 + /i('m| am) (a |the )?(worst|failure|mess|disaster)/i, 118 + /hate myself/i, 119 + /i suck/i, 120 + /should be able to/i, 121 + /can'?t do anything right/i, 122 + /i('m| am) broken/i, 123 + 124 + // Long messages often indicate brain dumps 125 + // (checked separately by length) 126 + ]; 127 + 128 + /** 129 + * Quick pre-filter to decide if Haiku should be called 130 + * 131 + * @param text - User message 132 + * @returns true if Haiku classification should run 133 + */ 134 + export function shouldTriggerDetection(text: string): boolean { 135 + // Long messages (>300 chars without line breaks) suggest brain dump 136 + if (text.length > 300 && !text.includes('\n')) { 137 + return true; 138 + } 139 + 140 + // Multiple sentences with task-like content 141 + if (text.length > 150 && (text.match(/\./g)?.length ?? 0) >= 3) { 142 + return true; 143 + } 144 + 145 + // Check regex patterns 146 + return TRIGGER_PATTERNS.some((pattern) => pattern.test(text)); 147 + } 148 + 149 + /** 150 + * Call Haiku 4.5 via LiteLLM to classify the message 151 + */ 152 + async function classifyWithHaiku(text: string): Promise<{ 153 + overwhelm: boolean; 154 + brainDump: boolean; 155 + selfBullying: boolean; 156 + urgency: 'low' | 'medium' | 'high'; 157 + reasoning: string; 158 + }> { 159 + const systemPrompt = `You are a message classifier for an ADHD support assistant. Analyze the user's message and classify it. 160 + 161 + Return ONLY valid JSON (no markdown, no explanation outside JSON): 162 + { 163 + "overwhelm": boolean, // User feels stuck, can't cope, too much on plate 164 + "brainDump": boolean, // Stream of consciousness, list of thoughts/tasks needing structure 165 + "selfBullying": boolean, // Negative self-talk, being hard on themselves, inner critic 166 + "urgency": "low|medium|high", // How urgent does this feel for the user 167 + "reasoning": "brief explanation" 168 + } 169 + 170 + Guidelines: 171 + - overwhelm: Look for words like "can't", "too much", "stuck", "drowning", emotional exhaustion 172 + - brainDump: Multiple tasks/thoughts listed, stream of consciousness, "need to X, need to Y, also Z" 173 + - selfBullying: "I'm so lazy", "what's wrong with me", "I always fail", harsh self-judgment 174 + - urgency: high = crisis/distress, medium = needs help soon, low = casual/informational 175 + 176 + Be generous with detection - it's better to offer support than miss someone struggling.`; 177 + 178 + const response = await fetch(`${config.LITELLM_URL}/chat/completions`, { 179 + method: 'POST', 180 + headers: { 181 + 'Content-Type': 'application/json', 182 + }, 183 + body: JSON.stringify({ 184 + model: config.HAIKU_MODEL, 185 + messages: [ 186 + { role: 'system', content: systemPrompt }, 187 + { role: 'user', content: text }, 188 + ], 189 + temperature: 0.1, // Low temperature for consistent classification 190 + max_tokens: 300, 191 + }), 192 + }); 193 + 194 + if (!response.ok) { 195 + const errorText = await response.text(); 196 + throw new Error(`Haiku classification failed: ${String(response.status)} ${errorText}`); 197 + } 198 + 199 + const data = await response.json(); 200 + const content = extractFirstChoiceContent(data) ?? '{}'; 201 + 202 + try { 203 + // Parse the JSON response, handling potential markdown code blocks 204 + let jsonStr = content.trim(); 205 + if (jsonStr.startsWith('```')) { 206 + jsonStr = jsonStr.replace(/^```(?:json)?\n?/, '').replace(/\n?```$/, ''); 207 + } 208 + 209 + const parsed = JSON.parse(jsonStr) as { 210 + overwhelm?: boolean; 211 + brainDump?: boolean; 212 + selfBullying?: boolean; 213 + urgency?: string; 214 + reasoning?: string; 215 + }; 216 + 217 + return { 218 + overwhelm: parsed.overwhelm === true, 219 + brainDump: parsed.brainDump === true, 220 + selfBullying: parsed.selfBullying === true, 221 + urgency: (['low', 'medium', 'high'].includes(parsed.urgency ?? '') ? parsed.urgency : 'low') as 222 + | 'low' 223 + | 'medium' 224 + | 'high', 225 + reasoning: parsed.reasoning ?? '', 226 + }; 227 + } catch { 228 + console.warn('Failed to parse Haiku classification response:', content); 229 + // Default to safe values if parsing fails 230 + return { 231 + overwhelm: false, 232 + brainDump: false, 233 + selfBullying: false, 234 + urgency: 'low', 235 + reasoning: 'Parse error', 236 + }; 237 + } 238 + } 239 + 240 + /** 241 + * Parse a brain dump using Haiku to extract tasks and ideas 242 + */ 243 + async function parseBrainDumpWithHaiku(text: string): Promise<{ 244 + tasks: { content: string; priority: number }[]; 245 + ideas: { content: string }[]; 246 + }> { 247 + const systemPrompt = `You are a brain dump parser for an ADHD support assistant. Extract actionable tasks and ideas from the user's stream of consciousness. 248 + 249 + Return ONLY valid JSON (no markdown, no explanation outside JSON): 250 + { 251 + "tasks": [ 252 + {"content": "task description", "priority": 2} 253 + ], 254 + "ideas": [ 255 + {"content": "idea or thought to save"} 256 + ] 257 + } 258 + 259 + Guidelines: 260 + - tasks: Actionable items with clear verbs (call, email, buy, fix, finish, etc.) 261 + - ideas: Thoughts, notes, things to remember that aren't actionable yet 262 + - priority: 0=critical, 1=high, 2=medium (default), 3=low, 4=backlog 263 + - Keep task descriptions concise but complete 264 + - Extract the essence, don't just copy text verbatim 265 + - If something could be both, prefer task over idea 266 + - Infer priority from context (urgent language = higher priority)`; 267 + 268 + const response = await fetch(`${config.LITELLM_URL}/chat/completions`, { 269 + method: 'POST', 270 + headers: { 271 + 'Content-Type': 'application/json', 272 + }, 273 + body: JSON.stringify({ 274 + model: config.HAIKU_MODEL, 275 + messages: [ 276 + { role: 'system', content: systemPrompt }, 277 + { role: 'user', content: text }, 278 + ], 279 + temperature: 0.3, 280 + max_tokens: 1000, 281 + }), 282 + }); 283 + 284 + if (!response.ok) { 285 + const errorText = await response.text(); 286 + throw new Error(`Haiku brain dump parsing failed: ${String(response.status)} ${errorText}`); 287 + } 288 + 289 + const data = await response.json(); 290 + const content = extractFirstChoiceContent(data) ?? '{}'; 291 + 292 + try { 293 + let jsonStr = content.trim(); 294 + if (jsonStr.startsWith('```')) { 295 + jsonStr = jsonStr.replace(/^```(?:json)?\n?/, '').replace(/\n?```$/, ''); 296 + } 297 + 298 + const parsed = JSON.parse(jsonStr) as { tasks?: unknown; ideas?: unknown }; 299 + 300 + const rawTasks = isUnknownArray(parsed.tasks) ? parsed.tasks : []; 301 + const rawIdeas = isUnknownArray(parsed.ideas) ? parsed.ideas : []; 302 + 303 + const tasks = rawTasks 304 + .map((task) => { 305 + const content = normalizeContent((task as { content?: unknown }).content); 306 + if (content === null) { 307 + return null; 308 + } 309 + 310 + const priority = normalizePriority((task as { priority?: unknown }).priority); 311 + return { content, priority }; 312 + }) 313 + .filter((task): task is { content: string; priority: number } => task !== null); 314 + 315 + const ideas = rawIdeas 316 + .map((idea) => { 317 + const content = normalizeContent((idea as { content?: unknown }).content); 318 + return content === null ? null : { content }; 319 + }) 320 + .filter((idea): idea is { content: string } => idea !== null); 321 + 322 + return { tasks, ideas }; 323 + } catch { 324 + console.warn('Failed to parse brain dump response:', content); 325 + return { tasks: [], ideas: [] }; 326 + } 327 + } 328 + 329 + /** 330 + * Save parsed brain dump items to database 331 + */ 332 + function saveParsedItems( 333 + userId: number, 334 + tasks: { content: string; priority: number }[], 335 + ideas: { content: string }[] 336 + ): { taskIds: string[]; ideaIds: string[] } { 337 + const taskIds: string[] = []; 338 + const ideaIds: string[] = []; 339 + 340 + db.transaction((tx) => { 341 + // Save tasks 342 + for (const task of tasks) { 343 + if (task.content.trim() === '') { 344 + continue; 345 + } 346 + 347 + const id = crypto.randomUUID(); 348 + tx.insert(schema.items) 349 + .values({ 350 + id, 351 + userId, 352 + type: 'task', 353 + content: task.content, 354 + status: 'open', 355 + priority: task.priority, 356 + parentId: null, 357 + }) 358 + .run(); 359 + taskIds.push(id); 360 + } 361 + 362 + // Save ideas as brain_dump items 363 + for (const idea of ideas) { 364 + if (idea.content.trim() === '') { 365 + continue; 366 + } 367 + 368 + const id = crypto.randomUUID(); 369 + tx.insert(schema.items) 370 + .values({ 371 + id, 372 + userId, 373 + type: 'brain_dump', 374 + content: idea.content, 375 + status: 'open', 376 + priority: 3, // Low priority for ideas 377 + parentId: null, 378 + }) 379 + .run(); 380 + ideaIds.push(id); 381 + } 382 + }); 383 + 384 + return { taskIds, ideaIds }; 385 + } 386 + 387 + /** 388 + * Main detection function - classifies message and optionally parses brain dumps 389 + * 390 + * @param text - User message 391 + * @param userId - Telegram user ID for saving parsed items (optional) 392 + * @returns Detection result with classification and any parsed items 393 + */ 394 + export async function detectAndParse(text: string, userId: number | null): Promise<DetectionResult> { 395 + // Quick pre-filter 396 + if (!shouldTriggerDetection(text)) { 397 + return { 398 + triggered: false, 399 + overwhelm: false, 400 + brainDump: false, 401 + selfBullying: false, 402 + urgency: 'low', 403 + }; 404 + } 405 + 406 + console.log('Detection triggered, calling Haiku for classification...'); 407 + 408 + // Step 1: Classify with Haiku 409 + let classification: Awaited<ReturnType<typeof classifyWithHaiku>>; 410 + try { 411 + classification = await classifyWithHaiku(text); 412 + } catch (error) { 413 + console.error('Detection error:', error); 414 + // On error, return safe defaults but mark as triggered so we know something happened 415 + return { 416 + triggered: true, 417 + overwhelm: false, 418 + brainDump: false, 419 + selfBullying: false, 420 + urgency: 'low', 421 + reasoning: `Detection error: ${error instanceof Error ? error.message : 'Unknown'}`, 422 + }; 423 + } 424 + 425 + console.log('Haiku classification:', classification); 426 + 427 + const result: DetectionResult = { 428 + triggered: true, 429 + ...classification, 430 + }; 431 + 432 + const appendReasoning = (extra: string): void => { 433 + if (extra === '') { 434 + return; 435 + } 436 + result.reasoning = 437 + result.reasoning !== undefined && result.reasoning !== '' ? `${result.reasoning}\n${extra}` : extra; 438 + }; 439 + 440 + // Step 2: If brain dump detected, parse and save (if possible) 441 + if (classification.brainDump) { 442 + console.log('Brain dump detected, parsing with Haiku...'); 443 + 444 + try { 445 + const { tasks, ideas } = await parseBrainDumpWithHaiku(text); 446 + 447 + if (tasks.length > 0 || ideas.length > 0) { 448 + const saveUserId = typeof userId === 'number' && userId > 0 ? userId : null; 449 + 450 + if (saveUserId !== null) { 451 + try { 452 + const { taskIds, ideaIds } = saveParsedItems(saveUserId, tasks, ideas); 453 + 454 + result.parsed = { 455 + tasks, 456 + ideas, 457 + saved: true, 458 + savedTaskIds: taskIds, 459 + savedIdeaIds: ideaIds, 460 + }; 461 + 462 + console.log(`Parsed brain dump: ${String(tasks.length)} tasks, ${String(ideas.length)} ideas saved`); 463 + } catch (saveError) { 464 + console.error('Failed to save parsed items:', saveError); 465 + appendReasoning(`save_error=${saveError instanceof Error ? saveError.message : 'Unknown'}`); 466 + 467 + result.parsed = { 468 + tasks, 469 + ideas, 470 + saved: false, 471 + savedTaskIds: [], 472 + savedIdeaIds: [], 473 + }; 474 + } 475 + } else { 476 + appendReasoning('save_skipped=no_user_id'); 477 + 478 + result.parsed = { 479 + tasks, 480 + ideas, 481 + saved: false, 482 + savedTaskIds: [], 483 + savedIdeaIds: [], 484 + }; 485 + } 486 + } 487 + } catch (parseError) { 488 + console.error('Failed to parse brain dump:', parseError); 489 + appendReasoning(`parse_error=${parseError instanceof Error ? parseError.message : 'Unknown'}`); 490 + } 491 + } 492 + 493 + return result; 494 + } 495 + 496 + /** 497 + * Format detection result as context prefix for Opus 498 + * 499 + * @param result - Detection result 500 + * @returns Formatted string to prepend to user message 501 + */ 502 + export function formatDetectionContext(result: DetectionResult): string { 503 + if (!result.triggered) { 504 + return ''; 505 + } 506 + 507 + const flags: string[] = []; 508 + 509 + if (result.overwhelm) { 510 + flags.push('overwhelm=true'); 511 + } 512 + if (result.brainDump) { 513 + flags.push('brain_dump=true'); 514 + } 515 + if (result.selfBullying) { 516 + flags.push('self_bullying=true'); 517 + } 518 + if (result.urgency !== 'low') { 519 + flags.push(`urgency=${result.urgency}`); 520 + } 521 + 522 + if (flags.length === 0 && result.parsed === undefined) { 523 + return ''; 524 + } 525 + 526 + let context = `[DETECTED: ${flags.join(', ')}]`; 527 + 528 + if (result.parsed) { 529 + const { tasks, ideas, saved } = result.parsed; 530 + const label = saved ? 'PARSED & SAVED' : 'PARSED (NOT SAVED)'; 531 + context += `\n[${label}: ${String(tasks.length)} tasks, ${String(ideas.length)} ideas]`; 532 + 533 + // Include task summaries for Opus to reference 534 + if (tasks.length > 0) { 535 + const taskList = tasks.map((t) => `- ${t.content}`).join('\n'); 536 + context += `\n[TASKS:\n${taskList}]`; 537 + } 538 + } 539 + 540 + return context + '\n\n'; 541 + }