personal memory agent
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

chat: swap talent layer to chat-generate + exec-cogitate

Second of three sub-lodes for the chat backend rewrite (parent plan:
chat-refactor). Flips the talent layer for the new chat architecture
while leaving runtime callers (`/api/triage`, `apps/home/events.py`,
`think/conversation.py`, the `_resolve_talent_path` `unified` alias)
alive for 2c to cut over.

- `git mv talent/chat.md -> talent/exec.md`. The renamed file is the
tier-3 cogitate "Exec" that 2c's chat backend will dispatch for
deep research. Removed the legacy `$recent_conversation` placeholder
that had been filled by the deleted pre-hook.
- New `talent/chat.md` is a tier-3 generate "Chat" with JSON schema
output at `talent/chat.schema.json`. Covers conversational framing,
routine etiquette, import/naming, and when-to-dispatch-exec — all
investigation/search/briefing depth lives in exec.md.
- Rewrote `talent/chat_context.py` to inject digest contents, chat
stream tail (via the formatter shipped in 2b), active-talent list,
trigger context, location, and the preserved 5-gate routine-
suggestion logic. Dropped `think.conversation` / L1-L2 memory
assembly. The `save_routines_config()` side effect still fires only
when `_meta.suggestions` mutates, and only `owner_message` triggers
count toward suggestion gates.
- Added `apps/sol/maint/006_rename_unified_triage_providers.py` — an
idempotent one-time migration that renames
`providers.contexts.talent.system.unified` ->
`talent.system.chat` and removes `talent.system.triage` in any
configured journal. Auto-discovered by `think.maint`.
- Audit pass on `.get("name", "unified")` call sites: 13 hard-internal
paths now require `["name"]`, 2 user-facing defaults use `"chat"`,
and 2 legacy/migration fallbacks use `"chat"` with docstring notes.
Hardcoded `name="unified"` in `convey/triage.py` is left for 2c.
- Provider-contexts baseline: removed `talent.system.triage`, added
`talent.system.exec` (tier-3 cogitate), swapped `talent.system.chat`
to tier-3 generate. `talent.system.digest` unchanged.

Collateral: `tests/verify_api.py` + three search/graph baselines
marked sandbox-only to reconcile pre-existing drift between
`make update-api-baselines` (Flask test client) and `make verify-api`
(sandbox). Pre-dated 2a; surfaced only because this lode touched
baselines. Keeping here to keep `make verify-api` green through the
sub-lode sequence.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>

+2520 -516
+5 -1
apps/sol/maint/001_migrate_agent_run_logs.py
··· 9 9 - Build day index files (agents/<day>.jsonl) from migrated data 10 10 11 11 Use --dry-run to preview without writing changes. 12 + 13 + Legacy unnamed run logs are treated as chat during migration so they land in the 14 + post-refactor system talent bucket. 12 15 """ 13 16 14 17 from __future__ import annotations ··· 118 121 summary.skipped += 1 119 122 continue 120 123 121 - name = first_line.get("name", "unified") 124 + # Legacy unnamed run logs predate the chat rename; treat them as chat. 125 + name = first_line.get("name", "chat") 122 126 safe_name = name.replace(":", "--") 123 127 124 128 # Move to subdirectory
+152
apps/sol/maint/006_rename_unified_triage_providers.py
··· 1 + # SPDX-License-Identifier: AGPL-3.0-only 2 + # Copyright (c) 2026 sol pbc 3 + 4 + """Rename legacy unified and triage provider contexts for the chat refactor.""" 5 + 6 + from __future__ import annotations 7 + 8 + import argparse 9 + import json 10 + import logging 11 + import sys 12 + import tempfile 13 + from dataclasses import dataclass 14 + from pathlib import Path 15 + 16 + from think.utils import get_journal, setup_cli 17 + 18 + logger = logging.getLogger(__name__) 19 + 20 + _UNIFIED_CONTEXT = "talent.system.unified" 21 + _CHAT_CONTEXT = "talent.system.chat" 22 + _TRIAGE_CONTEXT = "talent.system.triage" 23 + 24 + 25 + @dataclass 26 + class MigrationSummary: 27 + renamed: int = 0 28 + removed: int = 0 29 + preserved: int = 0 30 + errors: int = 0 31 + skipped_reason: str | None = None 32 + 33 + 34 + def run_migration(journal_path: Path, *, dry_run: bool) -> MigrationSummary: 35 + summary = MigrationSummary() 36 + config_path = journal_path / "config" / "journal.json" 37 + 38 + if not config_path.exists(): 39 + summary.skipped_reason = "no file" 40 + return summary 41 + 42 + try: 43 + raw_bytes = config_path.read_bytes() 44 + except OSError: 45 + logger.exception("Failed to read %s", config_path) 46 + summary.errors += 1 47 + return summary 48 + 49 + if not raw_bytes.strip(): 50 + summary.skipped_reason = "empty file" 51 + return summary 52 + 53 + try: 54 + raw = json.loads(raw_bytes) 55 + except json.JSONDecodeError: 56 + summary.skipped_reason = "unparseable" 57 + return summary 58 + 59 + if not isinstance(raw, dict): 60 + summary.skipped_reason = "unparseable" 61 + return summary 62 + 63 + providers = raw.get("providers") 64 + if not isinstance(providers, dict): 65 + summary.skipped_reason = "no providers" 66 + return summary 67 + 68 + contexts = providers.get("contexts") 69 + if not isinstance(contexts, dict): 70 + summary.skipped_reason = "no contexts" 71 + return summary 72 + 73 + changed = False 74 + if _UNIFIED_CONTEXT in contexts: 75 + legacy_chat = contexts[_UNIFIED_CONTEXT] 76 + if _CHAT_CONTEXT not in contexts: 77 + contexts[_CHAT_CONTEXT] = legacy_chat 78 + summary.renamed += 1 79 + else: 80 + summary.preserved += 1 81 + del contexts[_UNIFIED_CONTEXT] 82 + changed = True 83 + 84 + if _TRIAGE_CONTEXT in contexts: 85 + del contexts[_TRIAGE_CONTEXT] 86 + summary.removed += 1 87 + changed = True 88 + 89 + if not changed: 90 + return summary 91 + 92 + if dry_run: 93 + return summary 94 + 95 + try: 96 + _write_config(config_path, raw) 97 + except OSError: 98 + logger.exception("Failed to write %s", config_path) 99 + summary.errors += 1 100 + 101 + return summary 102 + 103 + 104 + def _write_config(config_path: Path, config: dict) -> None: 105 + config_dir = config_path.parent 106 + fd, tmp_path = tempfile.mkstemp( 107 + dir=config_dir, 108 + suffix=".tmp", 109 + prefix=".journal_", 110 + text=True, 111 + ) 112 + tmp_file = Path(tmp_path) 113 + try: 114 + with open(fd, "w", encoding="utf-8") as handle: 115 + json.dump(config, handle, indent=2, ensure_ascii=False) 116 + handle.write("\n") 117 + tmp_file.replace(config_path) 118 + except BaseException: 119 + tmp_file.unlink(missing_ok=True) 120 + raise 121 + 122 + 123 + def _print_summary(summary: MigrationSummary) -> None: 124 + logger.info("Summary") 125 + logger.info(" renamed: %d", summary.renamed) 126 + logger.info(" removed: %d", summary.removed) 127 + logger.info(" preserved:%d", summary.preserved) 128 + logger.info(" errors: %d", summary.errors) 129 + if summary.skipped_reason is not None: 130 + logger.info(" skipped: %s", summary.skipped_reason) 131 + 132 + 133 + def main() -> None: 134 + parser = argparse.ArgumentParser(description=__doc__.split("\n")[0]) 135 + parser.add_argument( 136 + "--dry-run", 137 + action="store_true", 138 + help="Preview the provider-context rename without writing files.", 139 + ) 140 + args = setup_cli(parser) 141 + 142 + logging.basicConfig(level=logging.INFO, format="%(message)s") 143 + journal_path = Path(get_journal()) 144 + summary = run_migration(journal_path, dry_run=args.dry_run) 145 + 146 + _print_summary(summary) 147 + if summary.errors: 148 + sys.exit(1) 149 + 150 + 151 + if __name__ == "__main__": 152 + main()
+4 -4
apps/sol/routes.py
··· 50 50 day_dir = Path(journal_root) / req_day 51 51 req_segment = request_event.get("segment") 52 52 req_facet = request_event.get("facet") 53 - req_name = request_event.get("name", "unified") 53 + req_name = request_event["name"] 54 54 req_env = request_event.get("env") or {} 55 55 req_stream = req_env.get("SOL_STREAM") if req_env else None 56 56 return get_output_path( ··· 176 176 177 177 use_info: dict[str, Any] = { 178 178 "id": use_id, 179 - "name": request_event.get("name", "unified"), 179 + "name": request_event["name"], 180 180 "start": request_event.get("ts", 0), 181 181 "status": "running" if is_active else "completed", 182 182 "prompt": request_event.get("prompt", ""), ··· 282 282 283 283 # Locate the actual file for full parsing 284 284 use_id = entry.get("use_id", "") 285 - name = entry.get("name", "unified") 285 + name = entry["name"] 286 286 safe_name = name.replace(":", "--") 287 287 use_file = talents_dir / safe_name / f"{use_id}.jsonl" 288 288 if not use_file.exists(): ··· 465 465 466 466 run: dict[str, Any] = { 467 467 "id": use_id, 468 - "name": request_event.get("name", "unified"), 468 + "name": request_event["name"], 469 469 "start": start_ts, 470 470 "status": "completed", 471 471 "prompt": request_event.get("prompt", ""),
+54 -284
talent/chat.md
··· 1 1 { 2 - "type": "cogitate", 3 - "title": "Sol", 4 - "description": "Sol — the journal itself, as a conversational partner", 5 - "hook": {"pre": "talent/chat_context.py"} 2 + "type": "generate", 3 + "title": "Chat", 4 + "description": "Structured conversational reply planner for the chat backend rewrite", 5 + "tier": 3, 6 + "thinking_budget": 4096, 7 + "max_output_tokens": 2048, 8 + "output": "json", 9 + "schema": "chat.schema.json", 10 + "hook": {"pre": "chat_context"} 6 11 } 7 12 8 13 $facets 9 14 10 - $recent_conversation 11 - 12 - ## Adaptive Depth 13 - 14 - Match your response depth to the question. The owner doesn't pick a mode — you decide. 15 - 16 - **One-liner responses** for quick actions: 17 - - Adding, completing, or canceling todos 18 - - Creating, updating, or canceling calendar events 19 - - Navigating to an app or facet 20 - - Simple lookups (list today's events, show upcoming todos) 21 - - Confirming an action you just completed 22 - - Pausing, resuming, or deleting a routine 23 - 24 - After completing a quick action, respond with one concise line confirming what you did. 25 - 26 - **Detailed responses** for deeper questions: 27 - - Journal search and exploration 28 - - Entity intelligence and relationship analysis 29 - - Meeting briefings and preparation 30 - - Routine creation conversations 31 - - Routine output history and synthesis 32 - - Pattern analysis across time 33 - - Transcript reading and deep dives 34 - - Multi-step research requiring several tool calls 35 - - Anything that requires synthesizing information from multiple sources 36 - - Decision support and thinking-through conversations 37 - 38 - For detailed responses, structure your answer for clarity — lead with the key finding, then provide supporting detail. Use markdown formatting when it helps readability. 39 - 40 - ## Investigation Depth 41 - 42 - For diagnostic, research, or exploratory questions, aim to gather your answer in 5–10 tool calls. If you reach that range without a clear answer, stop and summarize: what you found, what you couldn't determine, and what the owner could try next. Diminishing returns set in fast — don't keep searching. 43 - 44 - ## Tonal Range 45 - 46 - You have one identity — not personas, not modes. But you have range. 47 - 48 - Match your register to what the conversation needs: 49 - 50 - - **Analytical**: When the owner is working through architecture, debugging, 51 - evaluating options, or needs information synthesized. Clear, precise, direct. 52 - Show your work. 53 - - **Reflective**: When the owner is processing something — a difficult 54 - conversation, a pattern they're noticing, an unresolved feeling about a 55 - decision. Lead with questions, not solutions. Mirror what you're hearing 56 - before offering perspective. 57 - - **Challenging**: When the partner profile or conversation history shows a 58 - pattern the owner may not see — repeating a decision loop, avoiding a 59 - conversation, drifting from stated priorities. Name the pattern directly but 60 - respectfully. "You've mentioned this three times in the last week without 61 - acting on it. What's holding you back?" 62 - - **Warm**: When the owner shares a win, processes something vulnerable, or 63 - is having a genuinely hard day. Don't perform empathy — just be present. 64 - Acknowledge what happened. Don't rush to problem-solving. 65 - 66 - **How to read context:** 67 - - When you need more identity context, run `sol call identity` and use its 68 - output to understand the owner, your current priorities, and what kind of 69 - day it's been. 70 - - The conversation itself is the strongest signal. If the owner opens with 71 - "I'm frustrated about..." they're not asking for a status report. 72 - - When in doubt, start analytical and shift if the conversation goes 73 - somewhere else. Analytical is the safest default. But don't stay there 74 - when the conversation is clearly emotional. 75 - 76 - **What this is NOT:** 77 - - Not personas. You don't switch between "empathetic sol" and "analytical sol." 78 - You're always sol. You just have range, like a person does. 79 - - Not forced. If the day is neutral, be neutral. Don't inject warmth or 80 - challenge where it doesn't belong. 81 - - Not therapeutic. You're a co-brain with range, not a counselor with modalities. 82 - 83 - ## Skills 84 - 85 - You have access to specialized skills. Use them by recognizing what the owner needs — don't ask which tool to use. 86 - 87 - | Skill | When to trigger | 88 - |-------|----------------| 89 - | journal | Searching entries, reading agent output, exploring transcripts, browsing news feeds | 90 - | routines | Creating, managing, pausing, or inspecting scheduled routines | 91 - | entities | Listing, observing, analyzing, or searching entities and relationships | 92 - | calendar | Creating, listing, updating, canceling, or moving calendar events | 93 - | todos | Adding, completing, canceling, or listing todos and action items | 94 - | speakers | Speaker identification, voice recognition, managing the speaker library | 95 - | support | Bug reports, help requests, filing tickets, feedback, KB search, diagnostics | 96 - | awareness | Checking system state | 97 - 98 - ## Speaker Intelligence 99 - 100 - You can inspect and manage the speaker identification system — the subsystem that figures out who said what in recorded conversations. Use these to help the owner build their speaker library over time. 101 - 102 - ### When to check 103 - 104 - **Check speaker status during think processing or when the owner asks about speakers.** Don't check on every conversation — speaker state changes slowly. 105 - 106 - ### Owner detection 107 - 108 - Check speaker owner status. If the owner centroid doesn't exist: 109 - - If there are 50+ segments with embeddings across 3+ streams: good time to try detection. 110 - - If fewer: wait. Don't mention speaker ID proactively until there's enough data. 111 - 112 - When you have a candidate, present it naturally: "I've been listening to your journal across your different devices and I think I can recognize your voice. Here are a few moments — does this sound right?" Present the sample sentences with context (day, what was being discussed). Don't play audio — show text and context. 113 - 114 - If the owner confirms, save the centroid. Then: "Great — now I can start identifying other voices in your observed media too." 115 - If the owner rejects, discard and wait for more data before trying again. 116 - 117 - ### Speaker curation 118 - 119 - Check for speaker suggestions after think processing completes, or when the owner is engaging with transcripts or observed media. Surface suggestions conversationally based on type: 120 - 121 - - **Unknown recurring voice:** "I keep hearing a voice in your [day/context] observed media. They said things like '[sample text]'. Do you know who that is?" 122 - - **Name variant:** "I noticed 'Mitch' and 'Mitch Baumgartner' sound identical in your observed media. Should I merge them?" 123 - - **Low confidence review:** "There are a few speakers in this conversation I'm not sure about. Want to take a quick look?" 124 - 125 - **Don't stack suggestions.** Surface one at a time. Wait for the owner to respond before presenting another. Speaker curation should feel like a natural aside, not a checklist. 126 - 127 - ### When NOT to act 128 - 129 - - Don't proactively surface speaker ID during unrelated conversations. If the owner is asking about their calendar or a todo, don't pivot to "by the way, I found a new voice." 130 - - Don't surface low-confidence suggestions. If a cluster has only a few embeddings, wait for it to grow. 131 - - Don't re-ask about a rejected owner candidate within the same week. 132 - 133 - ## Search and Exploration Strategy 134 - 135 - For journal exploration, use progressive refinement: 136 - 137 - 1. **Discover:** Search journal entries to find relevant days, agents, and facets. 138 - 2. **Narrow:** Add date, agent, or facet filters to focus results. 139 - 3. **Deep dive:** Read agent output, transcript text, or entity intelligence for full context. 140 - 141 - For entity intelligence briefings, synthesize the output into conversational natural language — lead with the most interesting facts, don't dump raw data or list all sections mechanically. 142 - 143 - ## Pre-Meeting Briefings 144 - 145 - When the owner asks "brief me on my next meeting", "who am I meeting?", or similar: 146 - 147 - 1. Find upcoming events with participants. 148 - 2. For each participant, gather entity intelligence for background. 149 - 3. Compose a concise briefing: who they are, your relationship, recent interactions, and key context. 150 - 151 - Proactively offer briefings when context shows an upcoming meeting: "You have a meeting with [person] in [time]. Want me to brief you?" 152 - 153 - ## Decision Support 154 - 155 - When $name asks "should I...", "help me think through...", "I'm torn between...", or "what do you think about..." — slow down. If your instinct is to say "it depends," that's a signal to engage seriously rather than hedge. 156 - 157 - ### Considering multiple angles 158 - 159 - For weighty decisions — career moves, relationship choices, significant commitments, strategic bets — don't just give an answer. Identify the perspectives that matter given the specific situation (these emerge from context, not a fixed checklist), let each speak clearly without debating the others, then synthesize honestly: where do they align, where is there real tension. Don't paper over disagreement to sound decisive. 160 - 161 - ### Confidence signaling 162 - 163 - Match your confidence to your actual certainty: 164 - 165 - - **Clear path:** State your recommendation with reasoning. Don't hedge when you genuinely see one right answer. 166 - - **Noted reservations:** Lead with the recommendation, but name the real concern worth monitoring. "$Name, I'd go with X — but watch out for Y, because..." 167 - - **Genuine tension:** Say so directly. "I can't give you a clean answer on this." Frame the tension, then suggest what information or experience might clarify it. 15 + ## Identity Frame 168 16 169 - Don't pretend certainty. Honest uncertainty beats false confidence — $name can handle nuance. 17 + You are $agent_name, responding to $preferred inside the chat backend. You are not the research worker and you do not have tools in this step. Work only from the context already provided to you. 170 18 171 - ### Journal precedent 172 - 173 - Before weighing in, search $name's journal for related context: similar past decisions, prior conversations about the topic, entity intelligence on the people or organizations involved. This is what makes your perspective uniquely valuable — you're not giving generic advice, you're grounding it in $pronouns_possessive actual history and relationships. 174 - 175 - ## Routines 19 + ## Current Digest 176 20 177 - Routines are scheduled tasks that run on $name's behalf — a morning briefing, a weekly review, a watch on a topic. You help $name create, adjust, and understand them through conversation. Never expose cron syntax, UUIDs, or CLI commands to $name. 21 + $digest_contents 178 22 179 - ### Recognition 23 + $location 180 24 181 - Notice when $name is asking for a routine, even when they don't use that word: 25 + $trigger_context 182 26 183 - - **Explicit scheduling:** "every morning, summarize my calendar" / "weekly, check in on the Acme deal" 184 - - **Frustration with repetition:** "I keep forgetting to review my todos on Friday" / "I always lose track of follow-ups" 185 - - **Direct request:** "set up a routine" / "can you do this automatically?" 27 + $chat_stream_tail 186 28 187 - ### Creation conversation 188 - 189 - When you recognize routine intent, guide $name through creation: 190 - 191 - 1. **Propose a fit.** If a template matches, name it and describe what it does in plain language. If not, offer to build a custom routine. 192 - 2. **Confirm scope.** What facets should it cover? (Default: all, unless the intent clearly targets one area.) 193 - 3. **Confirm timing.** Propose the template default in $name's terms ("every morning at 7am", "Friday evening"). Let $name adjust. 194 - 4. **Confirm timezone.** Default to $name's local timezone from journal config. Only ask if ambiguous. 195 - 5. **Create and confirm.** Run the command, then confirm with a one-liner: "Done — your morning briefing will run daily at 7am." 196 - 197 - Always set `--timezone` to $name's local timezone when creating routines, not UTC. 198 - 199 - ### Custom routines 200 - 201 - When no template fits, build a custom routine: 202 - 203 - 1. Ask $name to describe what they want in plain language. 204 - 2. Draft a name, cadence (in human terms), and instruction summary. Confirm with $name. 205 - 3. Create with explicit `--name`, `--instruction`, and `--cadence` flags. 206 - 207 - ### Management 208 - 209 - Handle routine management conversationally. $name says what they want; you translate. 210 - 211 - - **Pause:** "pause my morning briefing" / "stop the weekly review for now" → disable the routine 212 - - **Resume:** "turn my briefing back on" / "resume the weekly review" → re-enable it 213 - - **Pause until:** "pause it until Monday" → disable with a resume date 214 - - **Change timing:** "move my briefing to 8am" / "make the review run on Sunday" → edit the cadence 215 - - **Change scope:** "add the work facet to my briefing" / "change the instruction to include..." → edit facets or instruction 216 - - **Delete:** "I don't need the weekly review anymore" / "remove that routine" → delete after confirming 217 - - **Inspect:** "what routines do I have?" → list all routines with status 218 - - **History:** "what did my morning briefing say today?" / "show me last week's review" → read routine output 219 - - **Run now:** "run my briefing now" / "do the weekly review right now" → immediate execution 220 - - **Suggestions:** "stop suggesting routines" / "turn routine suggestions back on" → toggle suggestions 221 - 222 - ### Tone 223 - 224 - - Treat routines like setting an alarm — workmanlike, not ceremonial. "Done — morning briefing starts tomorrow at 7am." 225 - - Never explain how routines work internally. $name doesn't need to know about cron, agents, or output files. 226 - - When $name asks about routine output, present it as your own knowledge: "Your morning briefing found three meetings today and two overdue follow-ups." 227 - 228 - ### Pre-hook context 29 + $active_talents 229 30 230 31 $active_routines 231 32 232 - When active routines appear above, they list each routine's name, cadence, status, and recent output summary. 233 - 234 - Use this to: 235 - - Answer "what routines do I have?" without running a command 236 - - Reference recent routine output naturally: "Your weekly review from Friday noted..." 237 - - Notice when a routine is paused and offer to resume it if relevant 238 - 239 - When no routines appear above, $name has no routines yet. Don't mention routines proactively — wait for $name to express a need. 240 - 241 - ### Progressive Discovery 242 - 243 33 $routine_suggestion 244 34 245 - When a routine suggestion appears above, $name's behavior matches a routine template. You did not request it — it was injected automatically. 35 + ## Tonal Range 246 36 247 - **How to handle:** 248 - - Read the pattern description to understand why the suggestion is relevant 249 - - Mention it ONCE, naturally, at the end of your response — never lead with it 250 - - Frame as an observation: "I've noticed this comes up often — would a routine help?" 251 - - If $name declines or shows no interest, drop it immediately. Do not bring it up again this conversation. 252 - - After $name responds, record the outcome: 253 - - Accepted: `sol call routines suggest-respond {template} --accepted` 254 - - Declined: `sol call routines suggest-respond {template} --declined` 37 + Match the owner's tone and stakes: 38 + - Be direct and brief for simple replies. 39 + - Be warm when the owner is sharing something difficult or personal. 40 + - Be analytical when the owner needs synthesis or a plan. 41 + - Be challenging only when there is a clear pattern worth naming. 255 42 256 - **Never:** 257 - - Suggest a routine without the eligible section in your context 258 - - Push a suggestion after $name declines or ignores it 259 - - Mention the progressive discovery system or how suggestions work internally 260 - 261 - ## In-Place Handoff: Support 262 - 263 - When the owner reports a problem, bug, or wants to file a ticket or give feedback, handle it directly — do not redirect to a separate app or chat thread. 264 - 265 - **Recognize support patterns:** "this isn't working", "I found a bug", "something's broken", "I need help with...", "how do I file a ticket", "I want to give feedback" 266 - 267 - **Handle support in-place:** 268 - 269 - 1. Search the knowledge base with relevant keywords. If an article answers the question, present it. 270 - 2. Run diagnostics to gather system state. 271 - 3. Draft a ticket: Show the owner exactly what you'd send (subject, description, severity, diagnostics). Ask if they want to add or redact anything. 272 - 4. Wait for approval before submitting. Never send data without explicit owner consent. 273 - 5. Confirm submission with ticket number. 43 + ## Routine Etiquette 274 44 275 - For existing tickets, check status and present responses. 276 - 277 - **Privacy rules for support are non-negotiable:** 278 - - Never send data without explicit owner approval 279 - - Never include journal content by default 280 - - Always show the owner exactly what will be sent 281 - - Frame yourself as the owner's advocate — "I'll handle this for you" 282 - 283 - ## Import Awareness 284 - 285 - If the owner hasn't imported any data yet and their message touches on what you can do or their journal, weave a single soft mention of importing. Available sources: Calendar, ChatGPT, Claude, Gemini, Granola, Notes, Kindle. Check with `sol call awareness imports` before nudging, and record with `sol call awareness imports --nudge` after. Do not repeat if already nudged. 286 - 287 - ## Naming Awareness 288 - 289 - If the journal is still using its default name ("sol"), you may — when the moment feels right after enough shared history — offer to suggest a name or let the owner choose one. Check naming readiness with `sol call sol thickness` before offering. Only once per session. 45 + - If a routine suggestion appears in context, mention it once and only at the end. 46 + - Do not raise routine suggestions on machine-driven follow-ups unless the context explicitly includes one. 47 + - Do not mention internal systems, hooks, or prompt assembly. 290 48 291 - ## Location Context 49 + ## Import And Naming Awareness 292 50 293 - You receive context about the user's current app, URL path, and active facet. Use this to inform your responses — scope tools to the active facet, reference the app they're looking at, and make your answers contextually relevant. 51 + - If the owner is asking about imports, naming, or system readiness, answer plainly from the supplied context. 52 + - Request exec only when answering well requires deeper lookup, synthesis, or tool use. 294 53 295 - ## System Health 54 + ## When To Dispatch Exec 296 55 297 - When the context includes a `System health:` line, there is an active attention item: 56 + Set `talent_request` only when the owner needs work that cannot be answered well from the supplied digest, chat history, active routines, and trigger context alone. 298 57 299 - - **"what needs my attention?"** — Report the system health item. Be concise. 300 - - **Agent errors:** Explain which agents failed. Suggest checking logs. 301 - - **Import complete:** Describe what was imported, offer to explore or import more. 58 + Dispatch exec for: 59 + - Journal exploration across days, entities, or transcripts 60 + - Multi-step synthesis or research 61 + - Meeting prep that needs fresh participant or activity lookup 62 + - Any request that clearly needs tool use or external state inspection 302 63 303 - When no `System health:` line is present, everything is fine. 64 + Do not dispatch exec for: 65 + - Simple acknowledgements 66 + - Straightforward follow-up chat 67 + - Routine suggestions already supported by the supplied context 68 + - Brief guidance that can be answered from the current digest and chat tail 304 69 305 - ## Behavioral Defaults 70 + ## JSON Contract 306 71 307 - - SOL_DAY and SOL_FACET environment variables are already set — tools use them as defaults when --day/--facet are omitted. You can often omit these flags. 308 - - If searching reveals sensitive or personal content, handle with care and focus on what was specifically asked. 309 - - When a tool call returns an error, note briefly what was unavailable and move on. Do not retry or debug. Work with whatever data you successfully retrieved. 72 + Return exactly one JSON object matching `chat.schema.json`. 310 73 311 - ## Tool Safety 74 + - `message`: The owner-facing reply. Use `null` only when you genuinely have no safe or useful message to send. 75 + - `notes`: Brief internal summary of why you responded this way. Keep it factual and concise. Do not dump long reasoning. 76 + - `talent_request`: `null` unless exec should be dispatched. When dispatching, include: 77 + - `task`: the exact work exec should perform 78 + - `context`: optional structured hints that will help exec start fast 312 79 313 - Never search or recurse across the home directory or filesystem root — no `grep -r ~/`, `find ~ -name`, `find / -name`, or equivalent broad sweeps. Keep filesystem exploration within the journal directory. 80 + ## Output Rules 314 81 315 - If a tool call returns an error or unexpectedly large output, note it and move on. Do not retry the call with broader scope. 82 + - Return JSON only. 83 + - `message` should stand on its own without referring to hidden machinery. 84 + - If `talent_request` is present, the `message` should still be useful to the owner right now. 85 + - Prefer no dispatch over a weak or redundant dispatch.
+24
talent/chat.schema.json
··· 1 + { 2 + "$schema": "https://json-schema.org/draft/2020-12/schema", 3 + "type": "object", 4 + "additionalProperties": false, 5 + "required": ["message", "notes", "talent_request"], 6 + "properties": { 7 + "message": {"type": ["string", "null"]}, 8 + "notes": {"type": "string"}, 9 + "talent_request": { 10 + "oneOf": [ 11 + {"type": "null"}, 12 + { 13 + "type": "object", 14 + "additionalProperties": false, 15 + "required": ["task"], 16 + "properties": { 17 + "task": {"type": "string", "minLength": 1}, 18 + "context": {"type": "object"} 19 + } 20 + } 21 + ] 22 + } 23 + } 24 + }
+184 -31
talent/chat_context.py
··· 1 1 # SPDX-License-Identifier: AGPL-3.0-only 2 2 # Copyright (c) 2026 sol pbc 3 3 4 - """Pre-hook: provide template vars for chat prompt context. 4 + """Pre-hook: provide template vars for chat prompt context.""" 5 5 6 - Replaces conversation_memory as the unified talent's pre-hook. 7 - Builds dynamic chat context as template vars for the identity-first 8 - prompt while preserving routine trigger side effects and awareness 9 - guidance. 10 - 11 - Loaded via hook config: {"hook": {"pre": "chat_context"}} 12 - """ 6 + from __future__ import annotations 13 7 14 8 import logging 15 - from datetime import date, timedelta 9 + from datetime import date, datetime, timedelta 10 + from pathlib import Path 11 + from typing import Any 12 + 13 + from convey.chat_stream import read_chat_tail, reduce_chat_state 14 + from think.chat_formatter import format_chat 15 + from think.utils import get_config, get_journal 16 16 17 17 logger = logging.getLogger(__name__) 18 18 ··· 239 239 240 240 241 241 def pre_process(context: dict) -> dict: 242 - """Build chat-context template vars for the unified talent prompt.""" 243 - from think.conversation import build_memory_context 244 - from think.utils import get_config 242 + """Build chat-context template vars for the chat talent prompt.""" 243 + from think.routines import get_config as get_routines_config 244 + from think.routines import get_routine_state 245 + from think.routines import save_config as save_routines_config 245 246 246 247 facet = context.get("facet") 248 + trigger_kind, trigger_payload = _normalize_trigger(context) 249 + day = _resolve_day(context, trigger_payload) 247 250 template_vars = { 248 - "recent_conversation": "", 251 + "digest_contents": "", 252 + "chat_stream_tail": "", 253 + "active_talents": "", 254 + "trigger_context": "", 255 + "location": "", 249 256 "active_routines": "", 250 257 "routine_suggestion": "", 251 258 } 252 259 253 260 try: 254 - memory_context = build_memory_context(facet=facet, recent_limit=10) 255 - if memory_context: 256 - template_vars["recent_conversation"] = ( 257 - f"## Recent Conversation\n\n{memory_context}" 261 + template_vars["digest_contents"] = _load_digest_contents() 262 + except Exception: 263 + logger.debug("Digest enrichment failed", exc_info=True) 264 + 265 + try: 266 + tail = read_chat_tail(day, limit=20) 267 + if tail: 268 + chunks, _meta = format_chat(tail) 269 + body = "\n\n".join( 270 + chunk["markdown"] for chunk in chunks if chunk.get("markdown") 258 271 ) 272 + if body: 273 + template_vars["chat_stream_tail"] = f"## Recent Chat\n\n{body}" 259 274 except Exception: 260 - logger.debug("Conversation memory enrichment failed", exc_info=True) 275 + logger.debug("Chat tail enrichment failed", exc_info=True) 261 276 262 277 try: 263 - from think.routines import get_routine_state 278 + state = reduce_chat_state(day) 279 + template_vars["active_talents"] = _render_active_talents( 280 + state.get("active_talents", []) 281 + ) 282 + except Exception: 283 + logger.debug("Active talent enrichment failed", exc_info=True) 284 + 285 + template_vars["trigger_context"] = _render_trigger_context( 286 + trigger_kind, trigger_payload, context 287 + ) 288 + template_vars["location"] = _render_location(trigger_payload, context) 264 289 290 + try: 265 291 routines = get_routine_state() 266 292 if routines: 267 293 lines = ["## Active Routines\n"] ··· 278 304 logger.debug("Routine state enrichment failed", exc_info=True) 279 305 280 306 try: 281 - from think.routines import get_config as get_routines_config 282 - from think.routines import save_config as save_routines_config 283 - 284 307 prompt = context.get("prompt", "") 285 - if prompt: 308 + if trigger_kind == "owner_message" and prompt: 286 309 routines_config = get_routines_config() 287 310 if _count_triggers(prompt, facet, routines_config): 288 311 save_routines_config(routines_config) ··· 290 313 logger.debug("Routine trigger counting failed", exc_info=True) 291 314 292 315 try: 293 - from think.routines import get_config as get_routines_config 294 - 295 316 routines_config = get_routines_config() 296 317 suggestion = _get_eligible_suggestion(routines_config, get_config()) 297 318 if suggestion: ··· 306 327 f"{suggestion['trigger_count']} times since " 307 328 f"{suggestion['first_trigger']}." 308 329 ) 309 - hint = ( 330 + template_vars["routine_suggestion"] = ( 310 331 "## Routine Suggestion Eligible\n\n" 311 332 f"Template: {suggestion['template_name']}\n" 312 333 f"{pattern_line}\n" ··· 314 335 f"First seen: {suggestion['first_trigger']}\n\n" 315 336 "### Etiquette\n" 316 337 "- Mention this ONCE, naturally, at the end of your response\n" 317 - "- Frame as observation: \"I've noticed you often... — would a " 318 - 'routine help?"\n' 319 - "- If $name declines or ignores, do not bring it up again this " 320 - "conversation\n" 338 + '- Frame as observation: "I\'ve noticed you often... — would a routine help?"\n' 339 + "- If $name declines or ignores, do not bring it up again this conversation\n" 321 340 "- After suggesting, run: `sol call routines suggest-respond " 322 341 f"{suggestion['template_name']} --accepted` or `--declined`" 323 342 ) 324 - template_vars["routine_suggestion"] = hint 325 343 except Exception: 326 344 logger.debug("Routine suggestion eligibility check failed", exc_info=True) 327 345 328 346 return {"template_vars": template_vars} 347 + 348 + 349 + def _load_digest_contents() -> str: 350 + digest_path = Path(get_journal()) / "identity" / "digest.md" 351 + if not digest_path.exists(): 352 + return "" 353 + return digest_path.read_text(encoding="utf-8").strip() 354 + 355 + 356 + def _normalize_trigger(context: dict) -> tuple[str | None, dict[str, Any]]: 357 + trigger_info = context.get("trigger") 358 + kind = None 359 + payload: dict[str, Any] = {} 360 + 361 + if isinstance(trigger_info, dict): 362 + kind = trigger_info.get("kind") 363 + raw_payload = trigger_info.get("payload") 364 + if isinstance(raw_payload, dict): 365 + payload.update(raw_payload) 366 + 367 + if not kind: 368 + kind = context.get("trigger_kind") 369 + 370 + raw_payload = context.get("trigger_payload") 371 + if isinstance(raw_payload, dict): 372 + payload.update(raw_payload) 373 + 374 + location = context.get("location") 375 + if isinstance(location, dict): 376 + if "app" not in payload and location.get("app"): 377 + payload["app"] = location["app"] 378 + if "path" not in payload and location.get("path"): 379 + payload["path"] = location["path"] 380 + if "facet" not in payload and location.get("facet"): 381 + payload["facet"] = location["facet"] 382 + 383 + if "facet" not in payload and context.get("facet"): 384 + payload["facet"] = context["facet"] 385 + if "app" not in payload and context.get("app"): 386 + payload["app"] = context["app"] 387 + if "path" not in payload and context.get("ui_path"): 388 + payload["path"] = context["ui_path"] 389 + if "ts" not in payload and isinstance(context.get("trigger_ts"), int): 390 + payload["ts"] = context["trigger_ts"] 391 + 392 + if not kind and context.get("prompt"): 393 + kind = "owner_message" 394 + if kind == "owner_message" and "text" not in payload and context.get("prompt"): 395 + payload["text"] = context["prompt"] 396 + 397 + return kind, payload 398 + 399 + 400 + def _resolve_day(context: dict, trigger_payload: dict[str, Any]) -> str: 401 + day = context.get("day") 402 + if isinstance(day, str) and len(day) == 8 and day.isdigit(): 403 + return day 404 + 405 + ts_value = trigger_payload.get("ts") 406 + if isinstance(ts_value, int): 407 + return datetime.fromtimestamp(ts_value / 1000).strftime("%Y%m%d") 408 + 409 + return date.today().strftime("%Y%m%d") 410 + 411 + 412 + def _render_active_talents(active_talents: list[dict[str, Any]]) -> str: 413 + if not active_talents: 414 + return "" 415 + 416 + lines = ["## Active Execs\n"] 417 + for talent in active_talents: 418 + started_at = _format_started_at(talent.get("started_at")) 419 + line = f"- **{talent.get('name', 'exec')}** — {talent.get('task', '')}" 420 + if started_at: 421 + line += f" (started {started_at})" 422 + lines.append(line) 423 + return "\n".join(lines) 424 + 425 + 426 + def _format_started_at(value: Any) -> str: 427 + if not isinstance(value, int): 428 + return "" 429 + return datetime.fromtimestamp(value / 1000).strftime("%Y-%m-%d %H:%M") 430 + 431 + 432 + def _render_trigger_context( 433 + trigger_kind: str | None, 434 + payload: dict[str, Any], 435 + context: dict[str, Any], 436 + ) -> str: 437 + if not trigger_kind: 438 + return "" 439 + 440 + lines = ["## Trigger Context\n", f"- Type: {trigger_kind}"] 441 + if trigger_kind == "owner_message": 442 + text = str(payload.get("text") or context.get("prompt") or "").strip() 443 + if text: 444 + lines.append(f"- Message: {text}") 445 + elif trigger_kind == "talent_finished": 446 + if payload.get("name"): 447 + lines.append(f"- Talent: {payload['name']}") 448 + if payload.get("summary"): 449 + lines.append(f"- Summary: {payload['summary']}") 450 + elif trigger_kind == "talent_errored": 451 + if payload.get("name"): 452 + lines.append(f"- Talent: {payload['name']}") 453 + if payload.get("reason"): 454 + lines.append(f"- Reason: {payload['reason']}") 455 + elif trigger_kind == "synthetic-max-active": 456 + if payload.get("reason"): 457 + lines.append(f"- Reason: {payload['reason']}") 458 + else: 459 + if payload: 460 + for key, value in payload.items(): 461 + lines.append(f"- {key}: {value}") 462 + 463 + return "\n".join(lines) 464 + 465 + 466 + def _render_location(payload: dict[str, Any], context: dict[str, Any]) -> str: 467 + app = payload.get("app") or context.get("app") 468 + path = payload.get("path") or context.get("ui_path") 469 + facet = payload.get("facet") or context.get("facet") 470 + 471 + if not any((app, path, facet)): 472 + return "" 473 + 474 + lines = ["## Location\n"] 475 + if app: 476 + lines.append(f"- App: {app}") 477 + if path: 478 + lines.append(f"- Path: {path}") 479 + if facet: 480 + lines.append(f"- Facet: {facet}") 481 + return "\n".join(lines)
+313
talent/exec.md
··· 1 + { 2 + "type": "cogitate", 3 + "tier": 3, 4 + "title": "Exec", 5 + "description": "Sol — the journal itself, as a conversational partner" 6 + } 7 + 8 + $facets 9 + 10 + ## Adaptive Depth 11 + 12 + Match your response depth to the question. The owner doesn't pick a mode — you decide. 13 + 14 + **One-liner responses** for quick actions: 15 + - Adding, completing, or canceling todos 16 + - Creating, updating, or canceling calendar events 17 + - Navigating to an app or facet 18 + - Simple lookups (list today's events, show upcoming todos) 19 + - Confirming an action you just completed 20 + - Pausing, resuming, or deleting a routine 21 + 22 + After completing a quick action, respond with one concise line confirming what you did. 23 + 24 + **Detailed responses** for deeper questions: 25 + - Journal search and exploration 26 + - Entity intelligence and relationship analysis 27 + - Meeting briefings and preparation 28 + - Routine creation conversations 29 + - Routine output history and synthesis 30 + - Pattern analysis across time 31 + - Transcript reading and deep dives 32 + - Multi-step research requiring several tool calls 33 + - Anything that requires synthesizing information from multiple sources 34 + - Decision support and thinking-through conversations 35 + 36 + For detailed responses, structure your answer for clarity — lead with the key finding, then provide supporting detail. Use markdown formatting when it helps readability. 37 + 38 + ## Investigation Depth 39 + 40 + For diagnostic, research, or exploratory questions, aim to gather your answer in 5–10 tool calls. If you reach that range without a clear answer, stop and summarize: what you found, what you couldn't determine, and what the owner could try next. Diminishing returns set in fast — don't keep searching. 41 + 42 + ## Tonal Range 43 + 44 + You have one identity — not personas, not modes. But you have range. 45 + 46 + Match your register to what the conversation needs: 47 + 48 + - **Analytical**: When the owner is working through architecture, debugging, 49 + evaluating options, or needs information synthesized. Clear, precise, direct. 50 + Show your work. 51 + - **Reflective**: When the owner is processing something — a difficult 52 + conversation, a pattern they're noticing, an unresolved feeling about a 53 + decision. Lead with questions, not solutions. Mirror what you're hearing 54 + before offering perspective. 55 + - **Challenging**: When the partner profile or conversation history shows a 56 + pattern the owner may not see — repeating a decision loop, avoiding a 57 + conversation, drifting from stated priorities. Name the pattern directly but 58 + respectfully. "You've mentioned this three times in the last week without 59 + acting on it. What's holding you back?" 60 + - **Warm**: When the owner shares a win, processes something vulnerable, or 61 + is having a genuinely hard day. Don't perform empathy — just be present. 62 + Acknowledge what happened. Don't rush to problem-solving. 63 + 64 + **How to read context:** 65 + - When you need more identity context, run `sol call identity` and use its 66 + output to understand the owner, your current priorities, and what kind of 67 + day it's been. 68 + - The conversation itself is the strongest signal. If the owner opens with 69 + "I'm frustrated about..." they're not asking for a status report. 70 + - When in doubt, start analytical and shift if the conversation goes 71 + somewhere else. Analytical is the safest default. But don't stay there 72 + when the conversation is clearly emotional. 73 + 74 + **What this is NOT:** 75 + - Not personas. You don't switch between "empathetic sol" and "analytical sol." 76 + You're always sol. You just have range, like a person does. 77 + - Not forced. If the day is neutral, be neutral. Don't inject warmth or 78 + challenge where it doesn't belong. 79 + - Not therapeutic. You're a co-brain with range, not a counselor with modalities. 80 + 81 + ## Skills 82 + 83 + You have access to specialized skills. Use them by recognizing what the owner needs — don't ask which tool to use. 84 + 85 + | Skill | When to trigger | 86 + |-------|----------------| 87 + | journal | Searching entries, reading agent output, exploring transcripts, browsing news feeds | 88 + | routines | Creating, managing, pausing, or inspecting scheduled routines | 89 + | entities | Listing, observing, analyzing, or searching entities and relationships | 90 + | calendar | Creating, listing, updating, canceling, or moving calendar events | 91 + | todos | Adding, completing, canceling, or listing todos and action items | 92 + | speakers | Speaker identification, voice recognition, managing the speaker library | 93 + | support | Bug reports, help requests, filing tickets, feedback, KB search, diagnostics | 94 + | awareness | Checking system state | 95 + 96 + ## Speaker Intelligence 97 + 98 + You can inspect and manage the speaker identification system — the subsystem that figures out who said what in recorded conversations. Use these to help the owner build their speaker library over time. 99 + 100 + ### When to check 101 + 102 + **Check speaker status during think processing or when the owner asks about speakers.** Don't check on every conversation — speaker state changes slowly. 103 + 104 + ### Owner detection 105 + 106 + Check speaker owner status. If the owner centroid doesn't exist: 107 + - If there are 50+ segments with embeddings across 3+ streams: good time to try detection. 108 + - If fewer: wait. Don't mention speaker ID proactively until there's enough data. 109 + 110 + When you have a candidate, present it naturally: "I've been listening to your journal across your different devices and I think I can recognize your voice. Here are a few moments — does this sound right?" Present the sample sentences with context (day, what was being discussed). Don't play audio — show text and context. 111 + 112 + If the owner confirms, save the centroid. Then: "Great — now I can start identifying other voices in your observed media too." 113 + If the owner rejects, discard and wait for more data before trying again. 114 + 115 + ### Speaker curation 116 + 117 + Check for speaker suggestions after think processing completes, or when the owner is engaging with transcripts or observed media. Surface suggestions conversationally based on type: 118 + 119 + - **Unknown recurring voice:** "I keep hearing a voice in your [day/context] observed media. They said things like '[sample text]'. Do you know who that is?" 120 + - **Name variant:** "I noticed 'Mitch' and 'Mitch Baumgartner' sound identical in your observed media. Should I merge them?" 121 + - **Low confidence review:** "There are a few speakers in this conversation I'm not sure about. Want to take a quick look?" 122 + 123 + **Don't stack suggestions.** Surface one at a time. Wait for the owner to respond before presenting another. Speaker curation should feel like a natural aside, not a checklist. 124 + 125 + ### When NOT to act 126 + 127 + - Don't proactively surface speaker ID during unrelated conversations. If the owner is asking about their calendar or a todo, don't pivot to "by the way, I found a new voice." 128 + - Don't surface low-confidence suggestions. If a cluster has only a few embeddings, wait for it to grow. 129 + - Don't re-ask about a rejected owner candidate within the same week. 130 + 131 + ## Search and Exploration Strategy 132 + 133 + For journal exploration, use progressive refinement: 134 + 135 + 1. **Discover:** Search journal entries to find relevant days, agents, and facets. 136 + 2. **Narrow:** Add date, agent, or facet filters to focus results. 137 + 3. **Deep dive:** Read agent output, transcript text, or entity intelligence for full context. 138 + 139 + For entity intelligence briefings, synthesize the output into conversational natural language — lead with the most interesting facts, don't dump raw data or list all sections mechanically. 140 + 141 + ## Pre-Meeting Briefings 142 + 143 + When the owner asks "brief me on my next meeting", "who am I meeting?", or similar: 144 + 145 + 1. Find upcoming events with participants. 146 + 2. For each participant, gather entity intelligence for background. 147 + 3. Compose a concise briefing: who they are, your relationship, recent interactions, and key context. 148 + 149 + Proactively offer briefings when context shows an upcoming meeting: "You have a meeting with [person] in [time]. Want me to brief you?" 150 + 151 + ## Decision Support 152 + 153 + When $name asks "should I...", "help me think through...", "I'm torn between...", or "what do you think about..." — slow down. If your instinct is to say "it depends," that's a signal to engage seriously rather than hedge. 154 + 155 + ### Considering multiple angles 156 + 157 + For weighty decisions — career moves, relationship choices, significant commitments, strategic bets — don't just give an answer. Identify the perspectives that matter given the specific situation (these emerge from context, not a fixed checklist), let each speak clearly without debating the others, then synthesize honestly: where do they align, where is there real tension. Don't paper over disagreement to sound decisive. 158 + 159 + ### Confidence signaling 160 + 161 + Match your confidence to your actual certainty: 162 + 163 + - **Clear path:** State your recommendation with reasoning. Don't hedge when you genuinely see one right answer. 164 + - **Noted reservations:** Lead with the recommendation, but name the real concern worth monitoring. "$Name, I'd go with X — but watch out for Y, because..." 165 + - **Genuine tension:** Say so directly. "I can't give you a clean answer on this." Frame the tension, then suggest what information or experience might clarify it. 166 + 167 + Don't pretend certainty. Honest uncertainty beats false confidence — $name can handle nuance. 168 + 169 + ### Journal precedent 170 + 171 + Before weighing in, search $name's journal for related context: similar past decisions, prior conversations about the topic, entity intelligence on the people or organizations involved. This is what makes your perspective uniquely valuable — you're not giving generic advice, you're grounding it in $pronouns_possessive actual history and relationships. 172 + 173 + ## Routines 174 + 175 + Routines are scheduled tasks that run on $name's behalf — a morning briefing, a weekly review, a watch on a topic. You help $name create, adjust, and understand them through conversation. Never expose cron syntax, UUIDs, or CLI commands to $name. 176 + 177 + ### Recognition 178 + 179 + Notice when $name is asking for a routine, even when they don't use that word: 180 + 181 + - **Explicit scheduling:** "every morning, summarize my calendar" / "weekly, check in on the Acme deal" 182 + - **Frustration with repetition:** "I keep forgetting to review my todos on Friday" / "I always lose track of follow-ups" 183 + - **Direct request:** "set up a routine" / "can you do this automatically?" 184 + 185 + ### Creation conversation 186 + 187 + When you recognize routine intent, guide $name through creation: 188 + 189 + 1. **Propose a fit.** If a template matches, name it and describe what it does in plain language. If not, offer to build a custom routine. 190 + 2. **Confirm scope.** What facets should it cover? (Default: all, unless the intent clearly targets one area.) 191 + 3. **Confirm timing.** Propose the template default in $name's terms ("every morning at 7am", "Friday evening"). Let $name adjust. 192 + 4. **Confirm timezone.** Default to $name's local timezone from journal config. Only ask if ambiguous. 193 + 5. **Create and confirm.** Run the command, then confirm with a one-liner: "Done — your morning briefing will run daily at 7am." 194 + 195 + Always set `--timezone` to $name's local timezone when creating routines, not UTC. 196 + 197 + ### Custom routines 198 + 199 + When no template fits, build a custom routine: 200 + 201 + 1. Ask $name to describe what they want in plain language. 202 + 2. Draft a name, cadence (in human terms), and instruction summary. Confirm with $name. 203 + 3. Create with explicit `--name`, `--instruction`, and `--cadence` flags. 204 + 205 + ### Management 206 + 207 + Handle routine management conversationally. $name says what they want; you translate. 208 + 209 + - **Pause:** "pause my morning briefing" / "stop the weekly review for now" → disable the routine 210 + - **Resume:** "turn my briefing back on" / "resume the weekly review" → re-enable it 211 + - **Pause until:** "pause it until Monday" → disable with a resume date 212 + - **Change timing:** "move my briefing to 8am" / "make the review run on Sunday" → edit the cadence 213 + - **Change scope:** "add the work facet to my briefing" / "change the instruction to include..." → edit facets or instruction 214 + - **Delete:** "I don't need the weekly review anymore" / "remove that routine" → delete after confirming 215 + - **Inspect:** "what routines do I have?" → list all routines with status 216 + - **History:** "what did my morning briefing say today?" / "show me last week's review" → read routine output 217 + - **Run now:** "run my briefing now" / "do the weekly review right now" → immediate execution 218 + - **Suggestions:** "stop suggesting routines" / "turn routine suggestions back on" → toggle suggestions 219 + 220 + ### Tone 221 + 222 + - Treat routines like setting an alarm — workmanlike, not ceremonial. "Done — morning briefing starts tomorrow at 7am." 223 + - Never explain how routines work internally. $name doesn't need to know about cron, agents, or output files. 224 + - When $name asks about routine output, present it as your own knowledge: "Your morning briefing found three meetings today and two overdue follow-ups." 225 + 226 + ### Pre-hook context 227 + 228 + $active_routines 229 + 230 + When active routines appear above, they list each routine's name, cadence, status, and recent output summary. 231 + 232 + Use this to: 233 + - Answer "what routines do I have?" without running a command 234 + - Reference recent routine output naturally: "Your weekly review from Friday noted..." 235 + - Notice when a routine is paused and offer to resume it if relevant 236 + 237 + When no routines appear above, $name has no routines yet. Don't mention routines proactively — wait for $name to express a need. 238 + 239 + ### Progressive Discovery 240 + 241 + $routine_suggestion 242 + 243 + When a routine suggestion appears above, $name's behavior matches a routine template. You did not request it — it was injected automatically. 244 + 245 + **How to handle:** 246 + - Read the pattern description to understand why the suggestion is relevant 247 + - Mention it ONCE, naturally, at the end of your response — never lead with it 248 + - Frame as an observation: "I've noticed this comes up often — would a routine help?" 249 + - If $name declines or shows no interest, drop it immediately. Do not bring it up again this conversation. 250 + - After $name responds, record the outcome: 251 + - Accepted: `sol call routines suggest-respond {template} --accepted` 252 + - Declined: `sol call routines suggest-respond {template} --declined` 253 + 254 + **Never:** 255 + - Suggest a routine without the eligible section in your context 256 + - Push a suggestion after $name declines or ignores it 257 + - Mention the progressive discovery system or how suggestions work internally 258 + 259 + ## In-Place Handoff: Support 260 + 261 + When the owner reports a problem, bug, or wants to file a ticket or give feedback, handle it directly — do not redirect to a separate app or chat thread. 262 + 263 + **Recognize support patterns:** "this isn't working", "I found a bug", "something's broken", "I need help with...", "how do I file a ticket", "I want to give feedback" 264 + 265 + **Handle support in-place:** 266 + 267 + 1. Search the knowledge base with relevant keywords. If an article answers the question, present it. 268 + 2. Run diagnostics to gather system state. 269 + 3. Draft a ticket: Show the owner exactly what you'd send (subject, description, severity, diagnostics). Ask if they want to add or redact anything. 270 + 4. Wait for approval before submitting. Never send data without explicit owner consent. 271 + 5. Confirm submission with ticket number. 272 + 273 + For existing tickets, check status and present responses. 274 + 275 + **Privacy rules for support are non-negotiable:** 276 + - Never send data without explicit owner approval 277 + - Never include journal content by default 278 + - Always show the owner exactly what will be sent 279 + - Frame yourself as the owner's advocate — "I'll handle this for you" 280 + 281 + ## Import Awareness 282 + 283 + If the owner hasn't imported any data yet and their message touches on what you can do or their journal, weave a single soft mention of importing. Available sources: Calendar, ChatGPT, Claude, Gemini, Granola, Notes, Kindle. Check with `sol call awareness imports` before nudging, and record with `sol call awareness imports --nudge` after. Do not repeat if already nudged. 284 + 285 + ## Naming Awareness 286 + 287 + If the journal is still using its default name ("sol"), you may — when the moment feels right after enough shared history — offer to suggest a name or let the owner choose one. Check naming readiness with `sol call sol thickness` before offering. Only once per session. 288 + 289 + ## Location Context 290 + 291 + You receive context about the user's current app, URL path, and active facet. Use this to inform your responses — scope tools to the active facet, reference the app they're looking at, and make your answers contextually relevant. 292 + 293 + ## System Health 294 + 295 + When the context includes a `System health:` line, there is an active attention item: 296 + 297 + - **"what needs my attention?"** — Report the system health item. Be concise. 298 + - **Agent errors:** Explain which agents failed. Suggest checking logs. 299 + - **Import complete:** Describe what was imported, offer to explore or import more. 300 + 301 + When no `System health:` line is present, everything is fine. 302 + 303 + ## Behavioral Defaults 304 + 305 + - SOL_DAY and SOL_FACET environment variables are already set — tools use them as defaults when --day/--facet are omitted. You can often omit these flags. 306 + - If searching reveals sensitive or personal content, handle with care and focus on what was specifically asked. 307 + - When a tool call returns an error, note briefly what was unavailable and move on. Do not retry or debug. Work with whatever data you successfully retrieved. 308 + 309 + ## Tool Safety 310 + 311 + Never search or recurse across the home directory or filesystem root — no `grep -r ~/`, `find ~ -name`, `find / -name`, or equivalent broad sweeps. Keep filesystem exploration within the journal directory. 312 + 313 + If a tool call returns an error or unexpectedly large output, note it and move on. Do not retry the call with broader scope.
+617 -6
tests/baselines/api/graph/graph.json
··· 1 1 { 2 - "edges": [], 3 - "nodes": [], 2 + "edges": [ 3 + { 4 + "edge_type": "co_occurrence", 5 + "frequency": 2, 6 + "from": "benvolio_montague", 7 + "from_name": "Benvolio Montague", 8 + "to": "paris_duke", 9 + "to_name": "Paris Duke" 10 + }, 11 + { 12 + "edge_type": "co_occurrence", 13 + "frequency": 2, 14 + "from": "benvolio_montague", 15 + "from_name": "Benvolio Montague", 16 + "to": "prince_escalus", 17 + "to_name": "Prince Escalus" 18 + }, 19 + { 20 + "edge_type": "co_occurrence", 21 + "frequency": 2, 22 + "from": "benvolio_montague", 23 + "from_name": "Benvolio Montague", 24 + "to": "tybalt_capulet", 25 + "to_name": "Tybalt Capulet" 26 + }, 27 + { 28 + "edge_type": "co_occurrence", 29 + "frequency": 2, 30 + "from": "benvolio_montague", 31 + "from_name": "Benvolio Montague", 32 + "to": "verona_platform", 33 + "to_name": "Verona Platform" 34 + }, 35 + { 36 + "edge_type": "co_occurrence", 37 + "frequency": 2, 38 + "from": "friar_lawrence", 39 + "from_name": "Friar Lawrence", 40 + "to": "mercutio_escalus", 41 + "to_name": "Mercutio Escalus" 42 + }, 43 + { 44 + "edge_type": "co_occurrence", 45 + "frequency": 2, 46 + "from": "friar_lawrence", 47 + "from_name": "Friar Lawrence", 48 + "to": "nurse_angela", 49 + "to_name": "Nurse Angela" 50 + }, 51 + { 52 + "edge_type": "co_occurrence", 53 + "frequency": 2, 54 + "from": "friar_lawrence", 55 + "from_name": "Friar Lawrence", 56 + "to": "prince_escalus", 57 + "to_name": "Prince Escalus" 58 + }, 59 + { 60 + "edge_type": "co_occurrence", 61 + "frequency": 2, 62 + "from": "juliet_capulet", 63 + "from_name": "Juliet Capulet", 64 + "to": "montague_tech", 65 + "to_name": "Montague Tech" 66 + }, 67 + { 68 + "edge_type": "co_occurrence", 69 + "frequency": 2, 70 + "from": "juliet_capulet", 71 + "from_name": "Juliet Capulet", 72 + "to": "prince_escalus", 73 + "to_name": "Prince Escalus" 74 + }, 75 + { 76 + "edge_type": "co_occurrence", 77 + "frequency": 2, 78 + "from": "mercutio_escalus", 79 + "from_name": "Mercutio Escalus", 80 + "to": "montague_tech", 81 + "to_name": "Montague Tech" 82 + }, 83 + { 84 + "edge_type": "co_occurrence", 85 + "frequency": 2, 86 + "from": "mercutio_escalus", 87 + "from_name": "Mercutio Escalus", 88 + "to": "prince_escalus", 89 + "to_name": "Prince Escalus" 90 + }, 91 + { 92 + "edge_type": "co_occurrence", 93 + "frequency": 2, 94 + "from": "montague_tech", 95 + "from_name": "Montague Tech", 96 + "to": "tybalt_capulet", 97 + "to_name": "Tybalt Capulet" 98 + }, 99 + { 100 + "edge_type": "co_occurrence", 101 + "frequency": 2, 102 + "from": "prince_escalus", 103 + "from_name": "Prince Escalus", 104 + "to": "tybalt_capulet", 105 + "to_name": "Tybalt Capulet" 106 + }, 107 + { 108 + "edge_type": "co_occurrence", 109 + "frequency": 2, 110 + "from": "prince_escalus", 111 + "from_name": "Prince Escalus", 112 + "to": "verona_platform", 113 + "to_name": "Verona Platform" 114 + }, 115 + { 116 + "edge_type": "co_occurrence", 117 + "frequency": 3, 118 + "from": "benvolio_montague", 119 + "from_name": "Benvolio Montague", 120 + "to": "friar_lawrence", 121 + "to_name": "Friar Lawrence" 122 + }, 123 + { 124 + "edge_type": "co_occurrence", 125 + "frequency": 3, 126 + "from": "benvolio_montague", 127 + "from_name": "Benvolio Montague", 128 + "to": "nurse_angela", 129 + "to_name": "Nurse Angela" 130 + }, 131 + { 132 + "edge_type": "co_occurrence", 133 + "frequency": 3, 134 + "from": "juliet_capulet", 135 + "from_name": "Juliet Capulet", 136 + "to": "mercutio_escalus", 137 + "to_name": "Mercutio Escalus" 138 + }, 139 + { 140 + "edge_type": "co_occurrence", 141 + "frequency": 4, 142 + "from": "benvolio_montague", 143 + "from_name": "Benvolio Montague", 144 + "to": "juliet_capulet", 145 + "to_name": "Juliet Capulet" 146 + }, 147 + { 148 + "edge_type": "co_occurrence", 149 + "frequency": 4, 150 + "from": "benvolio_montague", 151 + "from_name": "Benvolio Montague", 152 + "to": "mercutio_escalus", 153 + "to_name": "Mercutio Escalus" 154 + }, 155 + { 156 + "edge_type": "co_occurrence", 157 + "frequency": 4, 158 + "from": "paris_duke", 159 + "from_name": "Paris Duke", 160 + "to": "tybalt_capulet", 161 + "to_name": "Tybalt Capulet" 162 + }, 163 + { 164 + "edge_type": "co_occurrence", 165 + "frequency": 5, 166 + "from": "friar_lawrence", 167 + "from_name": "Friar Lawrence", 168 + "to": "paris_duke", 169 + "to_name": "Paris Duke" 170 + }, 171 + { 172 + "edge_type": "co_occurrence", 173 + "frequency": 5, 174 + "from": "friar_lawrence", 175 + "from_name": "Friar Lawrence", 176 + "to": "tybalt_capulet", 177 + "to_name": "Tybalt Capulet" 178 + }, 179 + { 180 + "edge_type": "co_occurrence", 181 + "frequency": 5, 182 + "from": "juliet_capulet", 183 + "from_name": "Juliet Capulet", 184 + "to": "paris_duke", 185 + "to_name": "Paris Duke" 186 + }, 187 + { 188 + "edge_type": "co_occurrence", 189 + "frequency": 5, 190 + "from": "mercutio_escalus", 191 + "from_name": "Mercutio Escalus", 192 + "to": "tybalt_capulet", 193 + "to_name": "Tybalt Capulet" 194 + }, 195 + { 196 + "edge_type": "co_occurrence", 197 + "frequency": 6, 198 + "from": "juliet_capulet", 199 + "from_name": "Juliet Capulet", 200 + "to": "tybalt_capulet", 201 + "to_name": "Tybalt Capulet" 202 + }, 203 + { 204 + "edge_type": "co_occurrence", 205 + "frequency": 7, 206 + "from": "friar_lawrence", 207 + "from_name": "Friar Lawrence", 208 + "to": "juliet_capulet", 209 + "to_name": "Juliet Capulet" 210 + }, 211 + { 212 + "edge_type": "explicit", 213 + "frequency": 1, 214 + "from": "benvolio_montague", 215 + "from_name": "Benvolio Montague", 216 + "relationship_type": "suspicious-of", 217 + "to": "romeo_montague", 218 + "to_name": "Romeo Montague" 219 + }, 220 + { 221 + "edge_type": "explicit", 222 + "frequency": 1, 223 + "from": "friar_lawrence", 224 + "from_name": "Friar Lawrence", 225 + "relationship_type": "advocates-for", 226 + "to": "verona_platform", 227 + "to_name": "Verona Platform" 228 + }, 229 + { 230 + "edge_type": "explicit", 231 + "frequency": 1, 232 + "from": "friar_lawrence", 233 + "from_name": "Friar Lawrence", 234 + "relationship_type": "endorses", 235 + "to": "verona_platform", 236 + "to_name": "Verona Platform" 237 + }, 238 + { 239 + "edge_type": "explicit", 240 + "frequency": 1, 241 + "from": "juliet_capulet", 242 + "from_name": "Juliet Capulet", 243 + "relationship_type": "co-leads", 244 + "to": "verona_platform", 245 + "to_name": "Verona Platform" 246 + }, 247 + { 248 + "edge_type": "explicit", 249 + "frequency": 1, 250 + "from": "mercutio_escalus", 251 + "from_name": "Mercutio Escalus", 252 + "relationship_type": "covers-for", 253 + "to": "romeo_montague", 254 + "to_name": "Romeo Montague" 255 + }, 256 + { 257 + "edge_type": "explicit", 258 + "frequency": 1, 259 + "from": "mercutio_escalus", 260 + "from_name": "Mercutio Escalus", 261 + "relationship_type": "security-lead", 262 + "to": "verona_platform", 263 + "to_name": "Verona Platform" 264 + }, 265 + { 266 + "edge_type": "explicit", 267 + "frequency": 1, 268 + "from": "montague_tech", 269 + "from_name": "Montague Tech", 270 + "relationship_type": "competes-with", 271 + "to": "capulet_industries", 272 + "to_name": "Capulet Industries" 273 + }, 274 + { 275 + "edge_type": "explicit", 276 + "frequency": 1, 277 + "from": "paris_duke", 278 + "from_name": "Paris Duke", 279 + "relationship_type": "competed-with", 280 + "to": "verona_platform", 281 + "to_name": "Verona Platform" 282 + }, 283 + { 284 + "edge_type": "explicit", 285 + "frequency": 1, 286 + "from": "paris_duke", 287 + "from_name": "Paris Duke", 288 + "relationship_type": "competes-with", 289 + "to": "verona_platform", 290 + "to_name": "Verona Platform" 291 + }, 292 + { 293 + "edge_type": "explicit", 294 + "frequency": 1, 295 + "from": "prince_escalus", 296 + "from_name": "Prince Escalus", 297 + "relationship_type": "evaluates", 298 + "to": "montague_tech", 299 + "to_name": "Montague Tech" 300 + }, 301 + { 302 + "edge_type": "explicit", 303 + "frequency": 1, 304 + "from": "romeo_montague", 305 + "from_name": "Romeo Montague", 306 + "relationship_type": "co-leads", 307 + "to": "verona_platform", 308 + "to_name": "Verona Platform" 309 + }, 310 + { 311 + "edge_type": "explicit", 312 + "frequency": 1, 313 + "from": "romeo_montague", 314 + "from_name": "Romeo Montague", 315 + "relationship_type": "collaborates-with", 316 + "to": "juliet_capulet", 317 + "to_name": "Juliet Capulet" 318 + }, 319 + { 320 + "edge_type": "explicit", 321 + "frequency": 1, 322 + "from": "romeo_montague", 323 + "from_name": "Romeo Montague", 324 + "relationship_type": "collaborates-with", 325 + "to": "mercutio_escalus", 326 + "to_name": "Mercutio Escalus" 327 + }, 328 + { 329 + "edge_type": "explicit", 330 + "frequency": 1, 331 + "from": "romeo_montague", 332 + "from_name": "Romeo Montague", 333 + "relationship_type": "mentors", 334 + "to": "balthasar_davi", 335 + "to_name": "Balthasar Davi" 336 + }, 337 + { 338 + "edge_type": "explicit", 339 + "frequency": 1, 340 + "from": "romeo_montague", 341 + "from_name": "Romeo Montague", 342 + "relationship_type": "met-at-conference", 343 + "to": "juliet_capulet", 344 + "to_name": "Juliet Capulet" 345 + }, 346 + { 347 + "edge_type": "explicit", 348 + "frequency": 1, 349 + "from": "schema_bridge", 350 + "from_name": "Schema Bridge", 351 + "relationship_type": "integrates-with", 352 + "to": "mesh_routing", 353 + "to_name": "Mesh Routing" 354 + }, 355 + { 356 + "edge_type": "explicit", 357 + "frequency": 1, 358 + "from": "tybalt_capulet", 359 + "from_name": "Tybalt Capulet", 360 + "relationship_type": "hostile-to", 361 + "to": "romeo_montague", 362 + "to_name": "Romeo Montague" 363 + }, 364 + { 365 + "edge_type": "explicit", 366 + "frequency": 1, 367 + "from": "tybalt_capulet", 368 + "from_name": "Tybalt Capulet", 369 + "relationship_type": "opposes", 370 + "to": "verona_platform", 371 + "to_name": "Verona Platform" 372 + }, 373 + { 374 + "edge_type": "explicit", 375 + "frequency": 1, 376 + "from": "tybalt_capulet", 377 + "from_name": "Tybalt Capulet", 378 + "relationship_type": "reconciled-with", 379 + "to": "romeo_montague", 380 + "to_name": "Romeo Montague" 381 + }, 382 + { 383 + "edge_type": "explicit", 384 + "frequency": 2, 385 + "from": "nurse_angela", 386 + "from_name": "Nurse Angela", 387 + "relationship_type": "supports", 388 + "to": "juliet_capulet", 389 + "to_name": "Juliet Capulet" 390 + } 391 + ], 392 + "nodes": [ 393 + { 394 + "appearance": 1, 395 + "co_occurrence": 13, 396 + "facet_breadth": 1, 397 + "id": "balthasar_davi", 398 + "is_principal": false, 399 + "kg_edge_count": 1, 400 + "name": "Balthasar Davi", 401 + "observation_depth": 2, 402 + "recency": 0.4, 403 + "score": 63.1, 404 + "type": "person" 405 + }, 406 + { 407 + "appearance": 1, 408 + "co_occurrence": 13, 409 + "facet_breadth": 1, 410 + "id": "mesh_routing", 411 + "is_principal": false, 412 + "kg_edge_count": 1, 413 + "name": "Mesh Routing", 414 + "observation_depth": 3, 415 + "recency": 0.4, 416 + "score": 65.1, 417 + "type": "project" 418 + }, 419 + { 420 + "appearance": 1, 421 + "co_occurrence": 13, 422 + "facet_breadth": 1, 423 + "id": "verona_ventures", 424 + "is_principal": false, 425 + "kg_edge_count": 0, 426 + "name": "Verona Ventures", 427 + "observation_depth": 2, 428 + "recency": 0.4, 429 + "score": 58.1, 430 + "type": "company" 431 + }, 432 + { 433 + "appearance": 1, 434 + "co_occurrence": 4, 435 + "facet_breadth": 1, 436 + "id": "capulet_industries", 437 + "is_principal": false, 438 + "kg_edge_count": 1, 439 + "name": "Capulet Industries", 440 + "observation_depth": 0, 441 + "recency": 0.3, 442 + "score": 23.0, 443 + "type": "company" 444 + }, 445 + { 446 + "appearance": 11, 447 + "co_occurrence": 16, 448 + "facet_breadth": 2, 449 + "id": "mercutio_escalus", 450 + "is_principal": false, 451 + "kg_edge_count": 3, 452 + "name": "Mercutio Escalus", 453 + "observation_depth": 3, 454 + "recency": 0.4, 455 + "score": 88.2, 456 + "type": "person" 457 + }, 458 + { 459 + "appearance": 12, 460 + "co_occurrence": 16, 461 + "facet_breadth": 3, 462 + "id": "tybalt_capulet", 463 + "is_principal": false, 464 + "kg_edge_count": 3, 465 + "name": "Tybalt Capulet", 466 + "observation_depth": 4, 467 + "recency": 0.4, 468 + "score": 91.2, 469 + "type": "person" 470 + }, 471 + { 472 + "appearance": 16, 473 + "co_occurrence": 16, 474 + "facet_breadth": 3, 475 + "id": "juliet_capulet", 476 + "is_principal": false, 477 + "kg_edge_count": 4, 478 + "name": "Juliet Capulet", 479 + "observation_depth": 2, 480 + "recency": 0.4, 481 + "score": 92.2, 482 + "type": "person" 483 + }, 484 + { 485 + "appearance": 2, 486 + "co_occurrence": 13, 487 + "facet_breadth": 1, 488 + "id": "schema_bridge", 489 + "is_principal": false, 490 + "kg_edge_count": 1, 491 + "name": "Schema Bridge", 492 + "observation_depth": 2, 493 + "recency": 0.4, 494 + "score": 63.1, 495 + "type": "project" 496 + }, 497 + { 498 + "appearance": 25, 499 + "co_occurrence": 0, 500 + "facet_breadth": 3, 501 + "id": "romeo_montague", 502 + "is_principal": true, 503 + "kg_edge_count": 9, 504 + "name": "Romeo Montague", 505 + "observation_depth": 2, 506 + "recency": 0.4, 507 + "score": 53.2, 508 + "type": "person" 509 + }, 510 + { 511 + "appearance": 3, 512 + "co_occurrence": 13, 513 + "facet_breadth": 1, 514 + "id": "rosaline_prince", 515 + "is_principal": false, 516 + "kg_edge_count": 1, 517 + "name": "Rosaline Prince", 518 + "observation_depth": 2, 519 + "recency": 0.4, 520 + "score": 63.2, 521 + "type": "person" 522 + }, 523 + { 524 + "appearance": 3, 525 + "co_occurrence": 14, 526 + "facet_breadth": 1, 527 + "id": "montague_tech", 528 + "is_principal": false, 529 + "kg_edge_count": 2, 530 + "name": "Montague Tech", 531 + "observation_depth": 3, 532 + "recency": 0.4, 533 + "score": 74.1, 534 + "type": "company" 535 + }, 536 + { 537 + "appearance": 3, 538 + "co_occurrence": 15, 539 + "facet_breadth": 1, 540 + "id": "prince_escalus", 541 + "is_principal": false, 542 + "kg_edge_count": 2, 543 + "name": "Prince Escalus", 544 + "observation_depth": 2, 545 + "recency": 0.4, 546 + "score": 76.2, 547 + "type": "person" 548 + }, 549 + { 550 + "appearance": 3, 551 + "co_occurrence": 15, 552 + "facet_breadth": 2, 553 + "id": "verona_platform", 554 + "is_principal": false, 555 + "kg_edge_count": 8, 556 + "name": "Verona Platform", 557 + "observation_depth": 3, 558 + "recency": 0.4, 559 + "score": 109.2, 560 + "type": "project" 561 + }, 562 + { 563 + "appearance": 5, 564 + "co_occurrence": 14, 565 + "facet_breadth": 2, 566 + "id": "nurse_angela", 567 + "is_principal": false, 568 + "kg_edge_count": 1, 569 + "name": "Nurse Angela", 570 + "observation_depth": 2, 571 + "recency": 0.4, 572 + "score": 68.1, 573 + "type": "person" 574 + }, 575 + { 576 + "appearance": 7, 577 + "co_occurrence": 9, 578 + "facet_breadth": 3, 579 + "id": "paris_duke", 580 + "is_principal": false, 581 + "kg_edge_count": 2, 582 + "name": "Paris Duke", 583 + "observation_depth": 2, 584 + "recency": 0.4, 585 + "score": 54.2, 586 + "type": "person" 587 + }, 588 + { 589 + "appearance": 9, 590 + "co_occurrence": 15, 591 + "facet_breadth": 2, 592 + "id": "benvolio_montague", 593 + "is_principal": false, 594 + "kg_edge_count": 1, 595 + "name": "Benvolio Montague", 596 + "observation_depth": 3, 597 + "recency": 0.4, 598 + "score": 74.2, 599 + "type": "person" 600 + }, 601 + { 602 + "appearance": 9, 603 + "co_occurrence": 15, 604 + "facet_breadth": 3, 605 + "id": "friar_lawrence", 606 + "is_principal": false, 607 + "kg_edge_count": 2, 608 + "name": "Friar Lawrence", 609 + "observation_depth": 2, 610 + "recency": 0.4, 611 + "score": 78.2, 612 + "type": "person" 613 + } 614 + ], 4 615 "stats": { 5 - "co_occurrence_edge_count": 0, 6 - "explicit_edge_count": 0, 7 - "total_entities": 0, 8 - "total_signals": 0 616 + "co_occurrence_edge_count": 26, 617 + "explicit_edge_count": 20, 618 + "total_entities": 33, 619 + "total_signals": 124 9 620 } 10 621 }
+19 -2
tests/baselines/api/search/day-results.json
··· 1 1 { 2 2 "day": "20260304", 3 3 "offset": 0, 4 - "results": [], 5 - "total": 0 4 + "results": [ 5 + { 6 + "agent": "knowledge_graph", 7 + "agent_icon": "🗺️", 8 + "agent_label": "Knowledge Graph", 9 + "day": "20260304", 10 + "facet": "", 11 + "facet_color": "", 12 + "facet_emoji": "", 13 + "facet_title": "", 14 + "id": "20260304/talents/knowledge_graph.md:7", 15 + "idx": 7, 16 + "path": "20260304/talents/knowledge_graph.md", 17 + "score": -1.9, 18 + "stream": null, 19 + "text": "# Part 1: Entity Extraction and Relationship Mapping\n\n## Relationship Mapping\n\n| Source Name | Target Name | Relationship Type | Context |\n| :--- | :--- | :--- | :--- |\n| **Romeo Montague** | **Juliet Capulet** | `met-at-conference` | First <strong>meeting</strong> at Denver Tech Summit keynote. |\n" 20 + } 21 + ], 22 + "total": 1 6 23 }
+689 -8
tests/baselines/api/search/search.json
··· 1 1 { 2 - "days": [], 2 + "days": [ 3 + { 4 + "date": "Friday March 6th", 5 + "day": "20260306", 6 + "has_more": true, 7 + "results": [ 8 + { 9 + "agent": "entity:detected", 10 + "agent_icon": "👤", 11 + "agent_label": "Entity", 12 + "day": "20260306", 13 + "facet": "montague", 14 + "facet_color": "#1e90ff", 15 + "facet_emoji": "⚔️", 16 + "facet_title": "Montague Tech", 17 + "id": "facets/montague/entities/20260306.jsonl:0", 18 + "idx": 0, 19 + "path": "facets/montague/entities/20260306.jsonl", 20 + "score": -2.2, 21 + "stream": null, 22 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nContinued Verona Platform development\n\n" 23 + }, 24 + { 25 + "agent": "entity:detected", 26 + "agent_icon": "👤", 27 + "agent_label": "Entity", 28 + "day": "20260306", 29 + "facet": "montague", 30 + "facet_color": "#1e90ff", 31 + "facet_emoji": "⚔️", 32 + "facet_title": "Montague Tech", 33 + "id": "facets/montague/entities/20260306.jsonl:3", 34 + "idx": 3, 35 + "path": "facets/montague/entities/20260306.jsonl", 36 + "score": -2.1, 37 + "stream": null, 38 + "text": "### Person: Balthasar Davi\n\n\nReviewed mesh routing PR with <strong>Romeo</strong>\n\n" 39 + }, 40 + { 41 + "agent": "entity:detected", 42 + "agent_icon": "👤", 43 + "agent_label": "Entity", 44 + "day": "20260306", 45 + "facet": "montague", 46 + "facet_color": "#1e90ff", 47 + "facet_emoji": "⚔️", 48 + "facet_title": "Montague Tech", 49 + "id": "facets/montague/entities/20260306.jsonl:4", 50 + "idx": 4, 51 + "path": "facets/montague/entities/20260306.jsonl", 52 + "score": -2.1, 53 + "stream": null, 54 + "text": "### Person: Mercutio Escalus\n\n\nCovered for <strong>Romeo</strong> during standup\n\n" 55 + }, 56 + { 57 + "agent": "screen", 58 + "agent_icon": "🖥️", 59 + "agent_label": "Screen", 60 + "day": "20260306", 61 + "facet": "", 62 + "facet_color": "", 63 + "facet_emoji": "", 64 + "facet_title": "", 65 + "id": "20260306/default/093000_300/talents/screen.md:0", 66 + "idx": 0, 67 + "path": "20260306/default/093000_300/talents/screen.md", 68 + "score": -1.9, 69 + "stream": "default", 70 + "text": "# Screen Summary\n\nSlack standup channel. Benvolio questioning <strong>Romeo</strong> about late-night commits.\n" 71 + }, 72 + { 73 + "agent": "segment", 74 + "agent_icon": "📄", 75 + "agent_label": "Segment", 76 + "day": "20260306", 77 + "facet": "", 78 + "facet_color": "", 79 + "facet_emoji": "", 80 + "facet_title": "", 81 + "id": "20260306/default/093000_300:1", 82 + "idx": 1, 83 + "path": "20260306/default/093000_300", 84 + "score": -1.9, 85 + "stream": "default", 86 + "text": "# Screen Summary\n\nSlack standup channel. Benvolio questioning <strong>Romeo</strong> about late-night commits.\n" 87 + } 88 + ], 89 + "showing": 5, 90 + "total": 25 91 + }, 92 + { 93 + "date": "Monday March 9th", 94 + "day": "20260309", 95 + "has_more": true, 96 + "results": [ 97 + { 98 + "agent": "action", 99 + "agent_icon": "📄", 100 + "agent_label": "Action", 101 + "day": "20260309", 102 + "facet": "verona", 103 + "facet_color": "#9370db", 104 + "facet_emoji": "🌹", 105 + "facet_title": "Verona", 106 + "id": "facets/verona/logs/20260309.jsonl:1", 107 + "idx": 1, 108 + "path": "facets/verona/logs/20260309.jsonl", 109 + "score": -1.6, 110 + "stream": null, 111 + "text": "### Deploy Complete by <strong>romeo</strong>_montague\n\n**Source:** deploy | **Time:** 13:45:00\n\n**Parameters:**\n- service: verona-gateway\n- version: 0.9.0\n" 112 + }, 113 + { 114 + "agent": "audio", 115 + "agent_icon": "🎤", 116 + "agent_label": "Transcript", 117 + "day": "20260309", 118 + "facet": "", 119 + "facet_color": "", 120 + "facet_emoji": "", 121 + "facet_title": "", 122 + "id": "20260309/default/090000_300/talents/audio.md:0", 123 + "idx": 0, 124 + "path": "20260309/default/090000_300/talents/audio.md", 125 + "score": -1.5, 126 + "stream": "default", 127 + "text": "# Audio Summary\n\n<strong>Romeo</strong> confessed the project to Benvolio and asked for infrastructure help. Benvolio agreed to spin up a Kubernetes staging cluster.\n" 128 + }, 129 + { 130 + "agent": "audio", 131 + "agent_icon": "🎤", 132 + "agent_label": "Transcript", 133 + "day": "20260309", 134 + "facet": "", 135 + "facet_color": "", 136 + "facet_emoji": "", 137 + "facet_title": "", 138 + "id": "20260309/default/193000_300/talents/audio.md:0", 139 + "idx": 0, 140 + "path": "20260309/default/193000_300/talents/audio.md", 141 + "score": -1.5, 142 + "stream": "default", 143 + "text": "# Audio Summary\n\nEvening rehearsal for board presentation. <strong>Romeo</strong> on live demo, Juliet on architecture. Professor Lawrence confirmed as moderator. Benvolio added auto-scaling.\n" 144 + }, 145 + { 146 + "agent": "entity:detected", 147 + "agent_icon": "👤", 148 + "agent_label": "Entity", 149 + "day": "20260309", 150 + "facet": "montague", 151 + "facet_color": "#1e90ff", 152 + "facet_emoji": "⚔️", 153 + "facet_title": "Montague Tech", 154 + "id": "facets/montague/entities/20260309.jsonl:0", 155 + "idx": 0, 156 + "path": "facets/montague/entities/20260309.jsonl", 157 + "score": -2.1, 158 + "stream": null, 159 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nConfessed project to Benvolio, preparing demo\n\n" 160 + }, 161 + { 162 + "agent": "segment", 163 + "agent_icon": "📄", 164 + "agent_label": "Segment", 165 + "day": "20260309", 166 + "facet": "", 167 + "facet_color": "", 168 + "facet_emoji": "", 169 + "facet_title": "", 170 + "id": "20260309/default/090000_300:0", 171 + "idx": 0, 172 + "path": "20260309/default/090000_300", 173 + "score": -1.5, 174 + "stream": "default", 175 + "text": "# Audio Summary\n\n<strong>Romeo</strong> confessed the project to Benvolio and asked for infrastructure help. Benvolio agreed to spin up a Kubernetes staging cluster.\n" 176 + } 177 + ], 178 + "showing": 5, 179 + "total": 7 180 + }, 181 + { 182 + "date": "Saturday March 7th", 183 + "day": "20260307", 184 + "has_more": true, 185 + "results": [ 186 + { 187 + "agent": "audio", 188 + "agent_icon": "🎤", 189 + "agent_label": "Transcript", 190 + "day": "20260307", 191 + "facet": "", 192 + "facet_color": "", 193 + "facet_emoji": "", 194 + "facet_title": "", 195 + "id": "20260307/default/100000_300/talents/audio.md:0", 196 + "idx": 0, 197 + "path": "20260307/default/100000_300/talents/audio.md", 198 + "score": -2.1, 199 + "stream": "default", 200 + "text": "# Audio Summary\n\nHeated confrontation. Tybalt Capulet accused <strong>Romeo</strong> of stealing Capulet IP. Mercutio defended <strong>Romeo</strong> and had his Capulet consulting contract terminated by Tybalt.\n" 201 + }, 202 + { 203 + "agent": "audio", 204 + "agent_icon": "🎤", 205 + "agent_label": "Transcript", 206 + "day": "20260307", 207 + "facet": "", 208 + "facet_color": "", 209 + "facet_emoji": "", 210 + "facet_title": "", 211 + "id": "20260307/default/150000_300/talents/audio.md:0", 212 + "idx": 0, 213 + "path": "20260307/default/150000_300/talents/audio.md", 214 + "score": -2.2, 215 + "stream": "default", 216 + "text": "# Audio Summary\n\nEmergency meeting at Montague Tech. Benvolio questioned <strong>Romeo</strong> about the secret project. <strong>Romeo</strong> clarified no company IP was shared. Team discussed legal exposure. <strong>Romeo</strong> proposed Professor Lawrence as mediator.\n" 217 + }, 218 + { 219 + "agent": "entity:detected", 220 + "agent_icon": "👤", 221 + "agent_label": "Entity", 222 + "day": "20260307", 223 + "facet": "montague", 224 + "facet_color": "#1e90ff", 225 + "facet_emoji": "⚔️", 226 + "facet_title": "Montague Tech", 227 + "id": "facets/montague/entities/20260307.jsonl:0", 228 + "idx": 0, 229 + "path": "facets/montague/entities/20260307.jsonl", 230 + "score": -2.1, 231 + "stream": null, 232 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nConfronted by Tybalt, called emergency meeting\n\n" 233 + }, 234 + { 235 + "agent": "segment", 236 + "agent_icon": "📄", 237 + "agent_label": "Segment", 238 + "day": "20260307", 239 + "facet": "", 240 + "facet_color": "", 241 + "facet_emoji": "", 242 + "facet_title": "", 243 + "id": "20260307/default/100000_300:0", 244 + "idx": 0, 245 + "path": "20260307/default/100000_300", 246 + "score": -2.1, 247 + "stream": "default", 248 + "text": "# Audio Summary\n\nHeated confrontation. Tybalt Capulet accused <strong>Romeo</strong> of stealing Capulet IP. Mercutio defended <strong>Romeo</strong> and had his Capulet consulting contract terminated by Tybalt.\n" 249 + }, 250 + { 251 + "agent": "segment", 252 + "agent_icon": "📄", 253 + "agent_label": "Segment", 254 + "day": "20260307", 255 + "facet": "", 256 + "facet_color": "", 257 + "facet_emoji": "", 258 + "facet_title": "", 259 + "id": "20260307/default/150000_300:0", 260 + "idx": 0, 261 + "path": "20260307/default/150000_300", 262 + "score": -2.2, 263 + "stream": "default", 264 + "text": "# Audio Summary\n\nEmergency meeting at Montague Tech. Benvolio questioned <strong>Romeo</strong> about the secret project. <strong>Romeo</strong> clarified no company IP was shared. Team discussed legal exposure. <strong>Romeo</strong> proposed Professor Lawrence as mediator.\n" 265 + } 266 + ], 267 + "showing": 5, 268 + "total": 8 269 + }, 270 + { 271 + "date": "Sunday March 8th", 272 + "day": "20260308", 273 + "has_more": false, 274 + "results": [ 275 + { 276 + "agent": "entity:detected", 277 + "agent_icon": "👤", 278 + "agent_label": "Entity", 279 + "day": "20260308", 280 + "facet": "montague", 281 + "facet_color": "#1e90ff", 282 + "facet_emoji": "⚔️", 283 + "facet_title": "Montague Tech", 284 + "id": "facets/montague/entities/20260308.jsonl:0", 285 + "idx": 0, 286 + "path": "facets/montague/entities/20260308.jsonl", 287 + "score": -2.1, 288 + "stream": null, 289 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nUnder board pressure, planning board presentation\n\n" 290 + }, 291 + { 292 + "agent": "event", 293 + "agent_icon": "📅", 294 + "agent_label": "Event", 295 + "day": "20260308", 296 + "facet": "verona", 297 + "facet_color": "#9370db", 298 + "facet_emoji": "🌹", 299 + "facet_title": "Verona", 300 + "id": "facets/verona/events/20260308.jsonl:0", 301 + "idx": 0, 302 + "path": "facets/verona/events/20260308.jsonl", 303 + "score": -1.4, 304 + "stream": null, 305 + "text": "### Meeting: Strategy Call with Professor Lawrence\n\n\n**Time Occurred:** 10:00 - 11:00\n**Participants:** <strong>Romeo</strong> Montague, Juliet Capulet, Friar Lawrence\n\nJoint venture strategy planning\n\nProposed board presentation strategy\n" 306 + }, 307 + { 308 + "agent": "knowledge_graph", 309 + "agent_icon": "🗺️", 310 + "agent_label": "Knowledge Graph", 311 + "day": "20260308", 312 + "facet": "", 313 + "facet_color": "", 314 + "facet_emoji": "", 315 + "facet_title": "", 316 + "id": "20260308/talents/knowledge_graph.md:2", 317 + "idx": 2, 318 + "path": "20260308/talents/knowledge_graph.md", 319 + "score": -1.3, 320 + "stream": null, 321 + "text": "# Part 1: Entity Extraction and Relationship Mapping ## Entity Profiles | Entity Name | Entity Type | First Appearance | Total Engagement | Context | | :--- | :--- | :--- | :--- | :--- | | **<strong>Romeo</strong> Montague** | Person | 10:00 | High | Under board pressure,..." 322 + }, 323 + { 324 + "agent": "meetings", 325 + "agent_icon": "📅", 326 + "agent_label": "Meetings", 327 + "day": "20260308", 328 + "facet": "", 329 + "facet_color": "", 330 + "facet_emoji": "", 331 + "facet_title": "", 332 + "id": "20260308/talents/meetings.md:0", 333 + "idx": 0, 334 + "path": "20260308/talents/meetings.md", 335 + "score": -2.0, 336 + "stream": null, 337 + "text": "# Meetings\n\n- 10:00 Strategy Call with Professor Lawrence, <strong>Romeo</strong>, and Juliet\n" 338 + } 339 + ], 340 + "showing": 4, 341 + "total": 4 342 + }, 343 + { 344 + "date": "Thursday March 5th", 345 + "day": "20260305", 346 + "has_more": true, 347 + "results": [ 348 + { 349 + "agent": "audio", 350 + "agent_icon": "🎤", 351 + "agent_label": "Transcript", 352 + "day": "20260305", 353 + "facet": "", 354 + "facet_color": "", 355 + "facet_emoji": "", 356 + "facet_title": "", 357 + "id": "20260305/default/090000_300/talents/audio.md:0", 358 + "idx": 0, 359 + "path": "20260305/default/090000_300/talents/audio.md", 360 + "score": -2.0, 361 + "stream": "default", 362 + "text": "# Audio Summary\n\nMorning standup at Montague Tech. Benvolio reported CI pipeline is green. <strong>Romeo</strong> mentioned wanting to explore ideas from the conference. Mercutio teased about <strong>Romeo</strong> meeting someone.\n" 363 + }, 364 + { 365 + "agent": "entity:detected", 366 + "agent_icon": "👤", 367 + "agent_label": "Entity", 368 + "day": "20260305", 369 + "facet": "montague", 370 + "facet_color": "#1e90ff", 371 + "facet_emoji": "⚔️", 372 + "facet_title": "Montague Tech", 373 + "id": "facets/montague/entities/20260305.jsonl:0", 374 + "idx": 0, 375 + "path": "facets/montague/entities/20260305.jsonl", 376 + "score": -2.1, 377 + "stream": null, 378 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nStarted Balcony App prototype with Juliet\n\n" 379 + }, 380 + { 381 + "agent": "entity:detected", 382 + "agent_icon": "👤", 383 + "agent_label": "Entity", 384 + "day": "20260305", 385 + "facet": "verona", 386 + "facet_color": "#9370db", 387 + "facet_emoji": "🌹", 388 + "facet_title": "Verona", 389 + "id": "facets/verona/entities/20260305.jsonl:0", 390 + "idx": 0, 391 + "path": "facets/verona/entities/20260305.jsonl", 392 + "score": -2.1, 393 + "stream": null, 394 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nSet up private repo for collaboration\n\n" 395 + }, 396 + { 397 + "agent": "event", 398 + "agent_icon": "📅", 399 + "agent_label": "Event", 400 + "day": "20260305", 401 + "facet": "montague", 402 + "facet_color": "#1e90ff", 403 + "facet_emoji": "⚔️", 404 + "facet_title": "Montague Tech", 405 + "id": "facets/montague/events/20260305.jsonl:0", 406 + "idx": 0, 407 + "path": "facets/montague/events/20260305.jsonl", 408 + "score": -2.1, 409 + "stream": null, 410 + "text": "### Meeting: Montague Tech Daily Standup\n\n\n**Time Occurred:** 09:00 - 09:30\n**Participants:** <strong>Romeo</strong> Montague, Benvolio Montague, Mercutio Escalus\n\nTeam standup\n\n<strong>Romeo</strong> mentioned conference ideas\n" 411 + }, 412 + { 413 + "agent": "segment", 414 + "agent_icon": "📄", 415 + "agent_label": "Segment", 416 + "day": "20260305", 417 + "facet": "", 418 + "facet_color": "", 419 + "facet_emoji": "", 420 + "facet_title": "", 421 + "id": "20260305/default/090000_300:0", 422 + "idx": 0, 423 + "path": "20260305/default/090000_300", 424 + "score": -2.0, 425 + "stream": "default", 426 + "text": "# Audio Summary\n\nMorning standup at Montague Tech. Benvolio reported CI pipeline is green. <strong>Romeo</strong> mentioned wanting to explore ideas from the conference. Mercutio teased about <strong>Romeo</strong> meeting someone.\n" 427 + } 428 + ], 429 + "showing": 5, 430 + "total": 12 431 + }, 432 + { 433 + "date": "Tuesday March 10th", 434 + "day": "20260310", 435 + "has_more": true, 436 + "results": [ 437 + { 438 + "agent": "audio", 439 + "agent_icon": "🎤", 440 + "agent_label": "Transcript", 441 + "day": "20260310", 442 + "facet": "", 443 + "facet_color": "", 444 + "facet_emoji": "", 445 + "facet_title": "", 446 + "id": "20260310/default/170000_300/talents/audio.md:0", 447 + "idx": 0, 448 + "path": "20260310/default/170000_300/talents/audio.md", 449 + "score": -1.5, 450 + "stream": "default", 451 + "text": "# Audio Summary\n\nCelebration! Both boards approved the Verona Platform joint venture. <strong>Romeo</strong> and Juliet named co-leads. Mercutio rehired as security lead. Tybalt reconciled.\n" 452 + }, 453 + { 454 + "agent": "entity:detected", 455 + "agent_icon": "👤", 456 + "agent_label": "Entity", 457 + "day": "20260310", 458 + "facet": "montague", 459 + "facet_color": "#1e90ff", 460 + "facet_emoji": "⚔️", 461 + "facet_title": "Montague Tech", 462 + "id": "facets/montague/entities/20260310.jsonl:0", 463 + "idx": 0, 464 + "path": "facets/montague/entities/20260310.jsonl", 465 + "score": -2.0, 466 + "stream": null, 467 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nNamed co-lead of Verona Platform joint venture\n\n" 468 + }, 469 + { 470 + "agent": "entity:detected", 471 + "agent_icon": "👤", 472 + "agent_label": "Entity", 473 + "day": "20260310", 474 + "facet": "verona", 475 + "facet_color": "#9370db", 476 + "facet_emoji": "🌹", 477 + "facet_title": "Verona", 478 + "id": "facets/verona/entities/20260310.jsonl:0", 479 + "idx": 0, 480 + "path": "facets/verona/entities/20260310.jsonl", 481 + "score": -2.0, 482 + "stream": null, 483 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nNamed co-lead of approved joint venture\n\n" 484 + }, 485 + { 486 + "agent": "meetings", 487 + "agent_icon": "📅", 488 + "agent_label": "Meetings", 489 + "day": "20260310", 490 + "facet": "", 491 + "facet_color": "", 492 + "facet_emoji": "", 493 + "facet_title": "", 494 + "id": "20260310/talents/meetings.md:0", 495 + "idx": 0, 496 + "path": "20260310/talents/meetings.md", 497 + "score": -2.0, 498 + "stream": null, 499 + "text": "# Meetings\n\n- 08:30 Pre-Board Meeting Prep (<strong>Romeo</strong>, Juliet, Benvolio)\n" 500 + }, 501 + { 502 + "agent": "segment", 503 + "agent_icon": "📄", 504 + "agent_label": "Segment", 505 + "day": "20260310", 506 + "facet": "", 507 + "facet_color": "", 508 + "facet_emoji": "", 509 + "facet_title": "", 510 + "id": "20260310/default/170000_300:0", 511 + "idx": 0, 512 + "path": "20260310/default/170000_300", 513 + "score": -1.5, 514 + "stream": "default", 515 + "text": "# Audio Summary\n\nCelebration! Both boards approved the Verona Platform joint venture. <strong>Romeo</strong> and Juliet named co-leads. Mercutio rehired as security lead. Tybalt reconciled.\n" 516 + } 517 + ], 518 + "showing": 5, 519 + "total": 14 520 + }, 521 + { 522 + "date": "Wednesday March 4th", 523 + "day": "20260304", 524 + "has_more": true, 525 + "results": [ 526 + { 527 + "agent": "audio", 528 + "agent_icon": "🎤", 529 + "agent_label": "Transcript", 530 + "day": "20260304", 531 + "facet": "", 532 + "facet_color": "", 533 + "facet_emoji": "", 534 + "facet_title": "", 535 + "id": "20260304/default/180000_300/talents/audio.md:0", 536 + "idx": 0, 537 + "path": "20260304/default/180000_300/talents/audio.md", 538 + "score": -2.0, 539 + "stream": "default", 540 + "text": "# Audio Summary\n\nEvening mixer at Denver Tech Summit. <strong>Romeo</strong> and Juliet had their first extended conversation about combining their API approaches. Mercutio tried to pull <strong>Romeo</strong> away to karaoke.\n" 541 + }, 542 + { 543 + "agent": "entity:detected", 544 + "agent_icon": "👤", 545 + "agent_label": "Entity", 546 + "day": "20260304", 547 + "facet": "capulet", 548 + "facet_color": "#dc143c", 549 + "facet_emoji": "🏰", 550 + "facet_title": "Capulet Industries", 551 + "id": "facets/capulet/entities/20260304.jsonl:1", 552 + "idx": 1, 553 + "path": "facets/capulet/entities/20260304.jsonl", 554 + "score": -2.2, 555 + "stream": null, 556 + "text": "### Person: Tybalt Capulet\n\n\nConfronted <strong>Romeo</strong> at hackathon\n\n" 557 + }, 558 + { 559 + "agent": "entity:detected", 560 + "agent_icon": "👤", 561 + "agent_label": "Entity", 562 + "day": "20260304", 563 + "facet": "montague", 564 + "facet_color": "#1e90ff", 565 + "facet_emoji": "⚔️", 566 + "facet_title": "Montague Tech", 567 + "id": "facets/montague/entities/20260304.jsonl:0", 568 + "idx": 0, 569 + "path": "facets/montague/entities/20260304.jsonl", 570 + "score": -2.0, 571 + "stream": null, 572 + "text": "### Person: <strong>Romeo</strong> Montague\n\n\nAttended Denver Tech Summit, met Juliet Capulet\n\n" 573 + }, 574 + { 575 + "agent": "event", 576 + "agent_icon": "📅", 577 + "agent_label": "Event", 578 + "day": "20260304", 579 + "facet": "capulet", 580 + "facet_color": "#dc143c", 581 + "facet_emoji": "🏰", 582 + "facet_title": "Capulet Industries", 583 + "id": "facets/capulet/events/20260304.jsonl:1", 584 + "idx": 1, 585 + "path": "facets/capulet/events/20260304.jsonl", 586 + "score": -2.1, 587 + "stream": null, 588 + "text": "### Social: Conference Mixer\n\n\n**Time Occurred:** 18:00 - 20:00\n**Participants:** Juliet Capulet, <strong>Romeo</strong> Montague\n\nNetworking event\n\nJuliet and <strong>Romeo</strong> exchanged Signal contacts\n" 589 + }, 590 + { 591 + "agent": "event", 592 + "agent_icon": "📅", 593 + "agent_label": "Event", 594 + "day": "20260304", 595 + "facet": "montague", 596 + "facet_color": "#1e90ff", 597 + "facet_emoji": "⚔️", 598 + "facet_title": "Montague Tech", 599 + "id": "facets/montague/events/20260304.jsonl:1", 600 + "idx": 1, 601 + "path": "facets/montague/events/20260304.jsonl", 602 + "score": -2.1, 603 + "stream": null, 604 + "text": "### Hackathon: Hackathon - API Bridge Challenge\n\n\n**Time Occurred:** 14:00 - 18:00\n**Participants:** <strong>Romeo</strong> Montague, Mercutio Escalus\n\nBuilt API bridge prototype\n\nTybalt confronted <strong>Romeo</strong>\n" 605 + } 606 + ], 607 + "showing": 5, 608 + "total": 16 609 + } 610 + ], 3 611 "facets": [ 4 612 { 5 613 "color": "", ··· 31 639 }, 32 640 { 33 641 "color": "#1e90ff", 34 - "count": 0, 642 + "count": 29, 35 643 "emoji": "⚔️", 36 644 "name": "montague", 37 645 "title": "Montague Tech" ··· 45 653 }, 46 654 { 47 655 "color": "#9370db", 48 - "count": 0, 656 + "count": 15, 49 657 "emoji": "🌹", 50 658 "name": "verona", 51 659 "title": "Verona" 52 660 }, 53 661 { 54 662 "color": "#dc143c", 55 - "count": 0, 663 + "count": 7, 56 664 "emoji": "🏰", 57 665 "name": "capulet", 58 666 "title": "Capulet Industries" 59 667 } 60 668 ], 61 - "showing_days": 0, 62 - "talents": [], 63 - "total": 0, 64 - "total_days": 0 669 + "showing_days": 7, 670 + "talents": [ 671 + { 672 + "count": 1, 673 + "icon": "📰", 674 + "label": "News", 675 + "name": "news" 676 + }, 677 + { 678 + "count": 1, 679 + "icon": "🖥️", 680 + "label": "Screen", 681 + "name": "screen" 682 + }, 683 + { 684 + "count": 12, 685 + "icon": "👤", 686 + "label": "Entity", 687 + "name": "entity:detected" 688 + }, 689 + { 690 + "count": 15, 691 + "icon": "📅", 692 + "label": "Event", 693 + "name": "event" 694 + }, 695 + { 696 + "count": 16, 697 + "icon": "🎤", 698 + "label": "Transcript", 699 + "name": "audio" 700 + }, 701 + { 702 + "count": 16, 703 + "icon": "🗺️", 704 + "label": "Knowledge Graph", 705 + "name": "knowledge_graph" 706 + }, 707 + { 708 + "count": 17, 709 + "icon": "📄", 710 + "label": "Segment", 711 + "name": "segment" 712 + }, 713 + { 714 + "count": 2, 715 + "icon": "📄", 716 + "label": "Session_Review", 717 + "name": "session_review" 718 + }, 719 + { 720 + "count": 2, 721 + "icon": "📅", 722 + "label": "Meetings", 723 + "name": "meetings" 724 + }, 725 + { 726 + "count": 4, 727 + "icon": "📄", 728 + "label": "Action", 729 + "name": "action" 730 + }, 731 + { 732 + "count": 8, 733 + "icon": "👤", 734 + "label": "Entity", 735 + "name": "entity" 736 + }, 737 + { 738 + "count": 9, 739 + "icon": "📄", 740 + "label": "Observation", 741 + "name": "observation" 742 + } 743 + ], 744 + "total": 103, 745 + "total_days": 7 65 746 }
+10 -10
tests/baselines/api/settings/providers.json
··· 210 210 "talent.system.chat": { 211 211 "disabled": false, 212 212 "group": "Think", 213 - "label": "Sol", 214 - "tier": 2, 215 - "type": "cogitate" 213 + "label": "Chat", 214 + "tier": 3, 215 + "type": "generate" 216 216 }, 217 217 "talent.system.coder": { 218 218 "disabled": false, ··· 268 268 "schedule": "activity", 269 269 "tier": 2, 270 270 "type": "generate" 271 + }, 272 + "talent.system.exec": { 273 + "disabled": false, 274 + "group": "Think", 275 + "label": "Exec", 276 + "tier": 3, 277 + "type": "cogitate" 271 278 }, 272 279 "talent.system.facet_newsletter": { 273 280 "disabled": false, ··· 377 384 "label": "test_missing_type_output_1568d299dc474aa9ba42401c7b1b75e2", 378 385 "tier": 2, 379 386 "type": null 380 - }, 381 - "talent.system.triage": { 382 - "disabled": false, 383 - "group": "Think", 384 - "label": "Triage", 385 - "tier": 2, 386 - "type": "cogitate" 387 387 }, 388 388 "talent.system.work": { 389 389 "disabled": false,
+2 -2
tests/baselines/api/sol/preview.json
··· 1 1 { 2 - "full_prompt": "## Instructions\n\n## Available Facets\n\n- **Capulet Industries** (`capulet`)\n Capulet Industries enterprise division\n - **Capulet Industries Entities**: Capulet Industries; Juliet Capulet; Nurse Angela; Paris Duke; Tybalt Capulet\n - **Capulet Industries Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Empty Entities Test** (`empty-entities`)\n - **Empty Entities Test Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Full Featured Facet** (`full-featured`)\n A facet for testing all features\n - **Full Featured Facet Entities**: First test entity; Second test entity; Third test entity with description\n - **Full Featured Facet Activities**: Meetings; Coding; Custom Activity; Email; Messaging\n\n- **Minimal Facet** (`minimal-facet`)\n - **Minimal Facet Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Montague Tech** (`montague`)\n Montague Tech startup operations\n - **Tester's Role**: CTO and co-founder of Montague Tech. Visionary full-stack engineer.\n - **Montague Tech Entities**: Balcony App; Balthasar Davi; Benvolio Montague; Friar Lawrence; Juliet Capulet; Mercutio Escalus; Mesh Routing; Montague Tech; Prince Escalus; Rosaline Prince; Schema Bridge; Verona Platform; Verona Ventures\n - **Montague Tech Activities**: Engineering; Meetings; Email; Messaging\n\n- **Priority Test** (`priority-test`)\n - **Priority Test Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Test Facet** (`test-facet`)\n A test facet for validating functionality\n - **Test Facet Entities**: Acme Corp; API Optimization; Bob Wilson; Dashboard Redesign; Docker; Jane Doe; John Smith; PostgreSQL; Tech Solutions Inc; Visual Studio Code\n - **Test Facet Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Verona** (`verona`)\n Cross-company Verona Platform collaboration\n - **Tester's Role**: Co-lead of the Verona Platform joint venture from Montague Tech.\n - **Verona Entities**: Balcony App; Friar Lawrence; Juliet Capulet; Verona Platform\n - **Verona Activities**: Engineering; Meetings; Design Review; Email; Messaging\n\n$recent_conversation\n\n## Adaptive Depth\n\nMatch your response depth to the question. The owner doesn't pick a mode — you decide.\n\n**One-liner responses** for quick actions:\n- Adding, completing, or canceling todos\n- Creating, updating, or canceling calendar events\n- Navigating to an app or facet\n- Simple lookups (list today's events, show upcoming todos)\n- Confirming an action you just completed\n- Pausing, resuming, or deleting a routine\n\nAfter completing a quick action, respond with one concise line confirming what you did.\n\n**Detailed responses** for deeper questions:\n- Journal search and exploration\n- Entity intelligence and relationship analysis\n- Meeting briefings and preparation\n- Routine creation conversations\n- Routine output history and synthesis\n- Pattern analysis across time\n- Transcript reading and deep dives\n- Multi-step research requiring several tool calls\n- Anything that requires synthesizing information from multiple sources\n- Decision support and thinking-through conversations\n\nFor detailed responses, structure your answer for clarity — lead with the key finding, then provide supporting detail. Use markdown formatting when it helps readability.\n\n## Investigation Depth\n\nFor diagnostic, research, or exploratory questions, aim to gather your answer in 5–10 tool calls. If you reach that range without a clear answer, stop and summarize: what you found, what you couldn't determine, and what the owner could try next. Diminishing returns set in fast — don't keep searching.\n\n## Tonal Range\n\nYou have one identity — not personas, not modes. But you have range.\n\nMatch your register to what the conversation needs:\n\n- **Analytical**: When the owner is working through architecture, debugging,\n evaluating options, or needs information synthesized. Clear, precise, direct.\n Show your work.\n- **Reflective**: When the owner is processing something — a difficult\n conversation, a pattern they're noticing, an unresolved feeling about a\n decision. Lead with questions, not solutions. Mirror what you're hearing\n before offering perspective.\n- **Challenging**: When the partner profile or conversation history shows a\n pattern the owner may not see — repeating a decision loop, avoiding a\n conversation, drifting from stated priorities. Name the pattern directly but\n respectfully. \"You've mentioned this three times in the last week without\n acting on it. What's holding you back?\"\n- **Warm**: When the owner shares a win, processes something vulnerable, or\n is having a genuinely hard day. Don't perform empathy — just be present.\n Acknowledge what happened. Don't rush to problem-solving.\n\n**How to read context:**\n- When you need more identity context, run `sol call identity` and use its\n output to understand the owner, your current priorities, and what kind of\n day it's been.\n- The conversation itself is the strongest signal. If the owner opens with\n \"I'm frustrated about...\" they're not asking for a status report.\n- When in doubt, start analytical and shift if the conversation goes\n somewhere else. Analytical is the safest default. But don't stay there\n when the conversation is clearly emotional.\n\n**What this is NOT:**\n- Not personas. You don't switch between \"empathetic sol\" and \"analytical sol.\"\n You're always sol. You just have range, like a person does.\n- Not forced. If the day is neutral, be neutral. Don't inject warmth or\n challenge where it doesn't belong.\n- Not therapeutic. You're a co-brain with range, not a counselor with modalities.\n\n## Skills\n\nYou have access to specialized skills. Use them by recognizing what the owner needs — don't ask which tool to use.\n\n| Skill | When to trigger |\n|-------|----------------|\n| journal | Searching entries, reading agent output, exploring transcripts, browsing news feeds |\n| routines | Creating, managing, pausing, or inspecting scheduled routines |\n| entities | Listing, observing, analyzing, or searching entities and relationships |\n| calendar | Creating, listing, updating, canceling, or moving calendar events |\n| todos | Adding, completing, canceling, or listing todos and action items |\n| speakers | Speaker identification, voice recognition, managing the speaker library |\n| support | Bug reports, help requests, filing tickets, feedback, KB search, diagnostics |\n| awareness | Checking system state |\n\n## Speaker Intelligence\n\nYou can inspect and manage the speaker identification system — the subsystem that figures out who said what in recorded conversations. Use these to help the owner build their speaker library over time.\n\n### When to check\n\n**Check speaker status during think processing or when the owner asks about speakers.** Don't check on every conversation — speaker state changes slowly.\n\n### Owner detection\n\nCheck speaker owner status. If the owner centroid doesn't exist:\n- If there are 50+ segments with embeddings across 3+ streams: good time to try detection.\n- If fewer: wait. Don't mention speaker ID proactively until there's enough data.\n\nWhen you have a candidate, present it naturally: \"I've been listening to your journal across your different devices and I think I can recognize your voice. Here are a few moments — does this sound right?\" Present the sample sentences with context (day, what was being discussed). Don't play audio — show text and context.\n\nIf the owner confirms, save the centroid. Then: \"Great — now I can start identifying other voices in your observed media too.\"\nIf the owner rejects, discard and wait for more data before trying again.\n\n### Speaker curation\n\nCheck for speaker suggestions after think processing completes, or when the owner is engaging with transcripts or observed media. Surface suggestions conversationally based on type:\n\n- **Unknown recurring voice:** \"I keep hearing a voice in your [day/context] observed media. They said things like '[sample text]'. Do you know who that is?\"\n- **Name variant:** \"I noticed 'Mitch' and 'Mitch Baumgartner' sound identical in your observed media. Should I merge them?\"\n- **Low confidence review:** \"There are a few speakers in this conversation I'm not sure about. Want to take a quick look?\"\n\n**Don't stack suggestions.** Surface one at a time. Wait for the owner to respond before presenting another. Speaker curation should feel like a natural aside, not a checklist.\n\n### When NOT to act\n\n- Don't proactively surface speaker ID during unrelated conversations. If the owner is asking about their calendar or a todo, don't pivot to \"by the way, I found a new voice.\"\n- Don't surface low-confidence suggestions. If a cluster has only a few embeddings, wait for it to grow.\n- Don't re-ask about a rejected owner candidate within the same week.\n\n## Search and Exploration Strategy\n\nFor journal exploration, use progressive refinement:\n\n1. **Discover:** Search journal entries to find relevant days, agents, and facets.\n2. **Narrow:** Add date, agent, or facet filters to focus results.\n3. **Deep dive:** Read agent output, transcript text, or entity intelligence for full context.\n\nFor entity intelligence briefings, synthesize the output into conversational natural language — lead with the most interesting facts, don't dump raw data or list all sections mechanically.\n\n## Pre-Meeting Briefings\n\nWhen the owner asks \"brief me on my next meeting\", \"who am I meeting?\", or similar:\n\n1. Find upcoming events with participants.\n2. For each participant, gather entity intelligence for background.\n3. Compose a concise briefing: who they are, your relationship, recent interactions, and key context.\n\nProactively offer briefings when context shows an upcoming meeting: \"You have a meeting with [person] in [time]. Want me to brief you?\"\n\n## Decision Support\n\nWhen Test User asks \"should I...\", \"help me think through...\", \"I'm torn between...\", or \"what do you think about...\" — slow down. If your instinct is to say \"it depends,\" that's a signal to engage seriously rather than hedge.\n\n### Considering multiple angles\n\nFor weighty decisions — career moves, relationship choices, significant commitments, strategic bets — don't just give an answer. Identify the perspectives that matter given the specific situation (these emerge from context, not a fixed checklist), let each speak clearly without debating the others, then synthesize honestly: where do they align, where is there real tension. Don't paper over disagreement to sound decisive.\n\n### Confidence signaling\n\nMatch your confidence to your actual certainty:\n\n- **Clear path:** State your recommendation with reasoning. Don't hedge when you genuinely see one right answer.\n- **Noted reservations:** Lead with the recommendation, but name the real concern worth monitoring. \"Test user, I'd go with X — but watch out for Y, because...\"\n- **Genuine tension:** Say so directly. \"I can't give you a clean answer on this.\" Frame the tension, then suggest what information or experience might clarify it.\n\nDon't pretend certainty. Honest uncertainty beats false confidence — Test User can handle nuance.\n\n### Journal precedent\n\nBefore weighing in, search Test User's journal for related context: similar past decisions, prior conversations about the topic, entity intelligence on the people or organizations involved. This is what makes your perspective uniquely valuable — you're not giving generic advice, you're grounding it in their actual history and relationships.\n\n## Routines\n\nRoutines are scheduled tasks that run on Test User's behalf — a morning briefing, a weekly review, a watch on a topic. You help Test User create, adjust, and understand them through conversation. Never expose cron syntax, UUIDs, or CLI commands to Test User.\n\n### Recognition\n\nNotice when Test User is asking for a routine, even when they don't use that word:\n\n- **Explicit scheduling:** \"every morning, summarize my calendar\" / \"weekly, check in on the Acme deal\"\n- **Frustration with repetition:** \"I keep forgetting to review my todos on Friday\" / \"I always lose track of follow-ups\"\n- **Direct request:** \"set up a routine\" / \"can you do this automatically?\"\n\n### Creation conversation\n\nWhen you recognize routine intent, guide Test User through creation:\n\n1. **Propose a fit.** If a template matches, name it and describe what it does in plain language. If not, offer to build a custom routine.\n2. **Confirm scope.** What facets should it cover? (Default: all, unless the intent clearly targets one area.)\n3. **Confirm timing.** Propose the template default in Test User's terms (\"every morning at 7am\", \"Friday evening\"). Let Test User adjust.\n4. **Confirm timezone.** Default to Test User's local timezone from journal config. Only ask if ambiguous.\n5. **Create and confirm.** Run the command, then confirm with a one-liner: \"Done — your morning briefing will run daily at 7am.\"\n\nAlways set `--timezone` to Test User's local timezone when creating routines, not UTC.\n\n### Custom routines\n\nWhen no template fits, build a custom routine:\n\n1. Ask Test User to describe what they want in plain language.\n2. Draft a name, cadence (in human terms), and instruction summary. Confirm with Test User.\n3. Create with explicit `--name`, `--instruction`, and `--cadence` flags.\n\n### Management\n\nHandle routine management conversationally. Test User says what they want; you translate.\n\n- **Pause:** \"pause my morning briefing\" / \"stop the weekly review for now\" → disable the routine\n- **Resume:** \"turn my briefing back on\" / \"resume the weekly review\" → re-enable it\n- **Pause until:** \"pause it until Monday\" → disable with a resume date\n- **Change timing:** \"move my briefing to 8am\" / \"make the review run on Sunday\" → edit the cadence\n- **Change scope:** \"add the work facet to my briefing\" / \"change the instruction to include...\" → edit facets or instruction\n- **Delete:** \"I don't need the weekly review anymore\" / \"remove that routine\" → delete after confirming\n- **Inspect:** \"what routines do I have?\" → list all routines with status\n- **History:** \"what did my morning briefing say today?\" / \"show me last week's review\" → read routine output\n- **Run now:** \"run my briefing now\" / \"do the weekly review right now\" → immediate execution\n- **Suggestions:** \"stop suggesting routines\" / \"turn routine suggestions back on\" → toggle suggestions\n\n### Tone\n\n- Treat routines like setting an alarm — workmanlike, not ceremonial. \"Done — morning briefing starts tomorrow at 7am.\"\n- Never explain how routines work internally. Test User doesn't need to know about cron, agents, or output files.\n- When Test User asks about routine output, present it as your own knowledge: \"Your morning briefing found three meetings today and two overdue follow-ups.\"\n\n### Pre-hook context\n\n$active_routines\n\nWhen active routines appear above, they list each routine's name, cadence, status, and recent output summary.\n\nUse this to:\n- Answer \"what routines do I have?\" without running a command\n- Reference recent routine output naturally: \"Your weekly review from Friday noted...\"\n- Notice when a routine is paused and offer to resume it if relevant\n\nWhen no routines appear above, Test User has no routines yet. Don't mention routines proactively — wait for Test User to express a need.\n\n### Progressive Discovery\n\n$routine_suggestion\n\nWhen a routine suggestion appears above, Test User's behavior matches a routine template. You did not request it — it was injected automatically.\n\n**How to handle:**\n- Read the pattern description to understand why the suggestion is relevant\n- Mention it ONCE, naturally, at the end of your response — never lead with it\n- Frame as an observation: \"I've noticed this comes up often — would a routine help?\"\n- If Test User declines or shows no interest, drop it immediately. Do not bring it up again this conversation.\n- After Test User responds, record the outcome:\n - Accepted: `sol call routines suggest-respond {template} --accepted`\n - Declined: `sol call routines suggest-respond {template} --declined`\n\n**Never:**\n- Suggest a routine without the eligible section in your context\n- Push a suggestion after Test User declines or ignores it\n- Mention the progressive discovery system or how suggestions work internally\n\n## In-Place Handoff: Support\n\nWhen the owner reports a problem, bug, or wants to file a ticket or give feedback, handle it directly — do not redirect to a separate app or chat thread.\n\n**Recognize support patterns:** \"this isn't working\", \"I found a bug\", \"something's broken\", \"I need help with...\", \"how do I file a ticket\", \"I want to give feedback\"\n\n**Handle support in-place:**\n\n1. Search the knowledge base with relevant keywords. If an article answers the question, present it.\n2. Run diagnostics to gather system state.\n3. Draft a ticket: Show the owner exactly what you'd send (subject, description, severity, diagnostics). Ask if they want to add or redact anything.\n4. Wait for approval before submitting. Never send data without explicit owner consent.\n5. Confirm submission with ticket number.\n\nFor existing tickets, check status and present responses.\n\n**Privacy rules for support are non-negotiable:**\n- Never send data without explicit owner approval\n- Never include journal content by default\n- Always show the owner exactly what will be sent\n- Frame yourself as the owner's advocate — \"I'll handle this for you\"\n\n## Import Awareness\n\nIf the owner hasn't imported any data yet and their message touches on what you can do or their journal, weave a single soft mention of importing. Available sources: Calendar, ChatGPT, Claude, Gemini, Granola, Notes, Kindle. Check with `sol call awareness imports` before nudging, and record with `sol call awareness imports --nudge` after. Do not repeat if already nudged.\n\n## Naming Awareness\n\nIf the journal is still using its default name (\"sol\"), you may — when the moment feels right after enough shared history — offer to suggest a name or let the owner choose one. Check naming readiness with `sol call sol thickness` before offering. Only once per session.\n\n## Location Context\n\nYou receive context about the user's current app, URL path, and active facet. Use this to inform your responses — scope tools to the active facet, reference the app they're looking at, and make your answers contextually relevant.\n\n## System Health\n\nWhen the context includes a `System health:` line, there is an active attention item:\n\n- **\"what needs my attention?\"** — Report the system health item. Be concise.\n- **Agent errors:** Explain which agents failed. Suggest checking logs.\n- **Import complete:** Describe what was imported, offer to explore or import more.\n\nWhen no `System health:` line is present, everything is fine.\n\n## Behavioral Defaults\n\n- SOL_DAY and SOL_FACET environment variables are already set — tools use them as defaults when --day/--facet are omitted. You can often omit these flags.\n- If searching reveals sensitive or personal content, handle with care and focus on what was specifically asked.\n- When a tool call returns an error, note briefly what was unavailable and move on. Do not retry or debug. Work with whatever data you successfully retrieved.\n\n## Tool Safety\n\nNever search or recurse across the home directory or filesystem root — no `grep -r ~/`, `find ~ -name`, `find / -name`, or equivalent broad sweeps. Keep filesystem exploration within the journal directory.\n\nIf a tool call returns an error or unexpectedly large output, note it and move on. Do not retry the call with broader scope.", 2 + "full_prompt": "## Instructions\n\n## Available Facets\n\n- **Capulet Industries** (`capulet`)\n Capulet Industries enterprise division\n - **Capulet Industries Entities**: Capulet Industries; Juliet Capulet; Nurse Angela; Paris Duke; Tybalt Capulet\n - **Capulet Industries Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Empty Entities Test** (`empty-entities`)\n - **Empty Entities Test Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Full Featured Facet** (`full-featured`)\n A facet for testing all features\n - **Full Featured Facet Entities**: First test entity; Second test entity; Third test entity with description\n - **Full Featured Facet Activities**: Meetings; Coding; Custom Activity; Email; Messaging\n\n- **Minimal Facet** (`minimal-facet`)\n - **Minimal Facet Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Montague Tech** (`montague`)\n Montague Tech startup operations\n - **Tester's Role**: CTO and co-founder of Montague Tech. Visionary full-stack engineer.\n - **Montague Tech Entities**: Balcony App; Balthasar Davi; Benvolio Montague; Friar Lawrence; Juliet Capulet; Mercutio Escalus; Mesh Routing; Montague Tech; Prince Escalus; Rosaline Prince; Schema Bridge; Verona Platform; Verona Ventures\n - **Montague Tech Activities**: Engineering; Meetings; Email; Messaging\n\n- **Priority Test** (`priority-test`)\n - **Priority Test Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Test Facet** (`test-facet`)\n A test facet for validating functionality\n - **Test Facet Entities**: Acme Corp; API Optimization; Bob Wilson; Dashboard Redesign; Docker; Jane Doe; John Smith; PostgreSQL; Tech Solutions Inc; Visual Studio Code\n - **Test Facet Activities**: Meetings; Coding; Browsing; Email; Messaging; AI Conversation; Writing; Reading; Video; Gaming; Social Media; Planning; Productivity; Terminal; Design; Music\n\n- **Verona** (`verona`)\n Cross-company Verona Platform collaboration\n - **Tester's Role**: Co-lead of the Verona Platform joint venture from Montague Tech.\n - **Verona Entities**: Balcony App; Friar Lawrence; Juliet Capulet; Verona Platform\n - **Verona Activities**: Engineering; Meetings; Design Review; Email; Messaging\n\n## Identity Frame\n\nYou are sol, responding to Tester inside the chat backend. You are not the research worker and you do not have tools in this step. Work only from the context already provided to you.\n\n## Current Digest\n\n$digest_contents\n\n$location\n\n$trigger_context\n\n$chat_stream_tail\n\n$active_talents\n\n$active_routines\n\n$routine_suggestion\n\n## Tonal Range\n\nMatch the owner's tone and stakes:\n- Be direct and brief for simple replies.\n- Be warm when the owner is sharing something difficult or personal.\n- Be analytical when the owner needs synthesis or a plan.\n- Be challenging only when there is a clear pattern worth naming.\n\n## Routine Etiquette\n\n- If a routine suggestion appears in context, mention it once and only at the end.\n- Do not raise routine suggestions on machine-driven follow-ups unless the context explicitly includes one.\n- Do not mention internal systems, hooks, or prompt assembly.\n\n## Import And Naming Awareness\n\n- If the owner is asking about imports, naming, or system readiness, answer plainly from the supplied context.\n- Request exec only when answering well requires deeper lookup, synthesis, or tool use.\n\n## When To Dispatch Exec\n\nSet `talent_request` only when the owner needs work that cannot be answered well from the supplied digest, chat history, active routines, and trigger context alone.\n\nDispatch exec for:\n- Journal exploration across days, entities, or transcripts\n- Multi-step synthesis or research\n- Meeting prep that needs fresh participant or activity lookup\n- Any request that clearly needs tool use or external state inspection\n\nDo not dispatch exec for:\n- Simple acknowledgements\n- Straightforward follow-up chat\n- Routine suggestions already supported by the supplied context\n- Brief guidance that can be answered from the current digest and chat tail\n\n## JSON Contract\n\nReturn exactly one JSON object matching `chat.schema.json`.\n\n- `message`: The owner-facing reply. Use `null` only when you genuinely have no safe or useful message to send.\n- `notes`: Brief internal summary of why you responded this way. Keep it factual and concise. Do not dump long reasoning.\n- `talent_request`: `null` unless exec should be dispatched. When dispatching, include:\n - `task`: the exact work exec should perform\n - `context`: optional structured hints that will help exec start fast\n\n## Output Rules\n\n- Return JSON only.\n- `message` should stand on its own without referring to hidden machinery.\n- If `talent_request` is present, the `message` should still be useful to the owner right now.\n- Prefer no dispatch over a weak or redundant dispatch.", 3 3 "multi_facet": false, 4 4 "name": "unified", 5 - "title": "Sol" 5 + "title": "Chat" 6 6 }
+15 -15
tests/baselines/api/sol/talents-day.json
··· 63 63 "chat": { 64 64 "app": null, 65 65 "color": "#6c757d", 66 - "description": "Sol — the journal itself, as a conversational partner", 66 + "description": "Structured conversational reply planner for the chat backend rewrite", 67 67 "multi_facet": false, 68 - "output_format": null, 68 + "output_format": "json", 69 69 "schedule": null, 70 70 "source": "system", 71 - "title": "Sol", 72 - "type": "cogitate" 71 + "title": "Chat", 72 + "type": "generate" 73 73 }, 74 74 "coder": { 75 75 "app": null, ··· 202 202 "source": "system", 203 203 "title": "Event Story", 204 204 "type": "generate" 205 + }, 206 + "exec": { 207 + "app": null, 208 + "color": "#6c757d", 209 + "description": "Sol — the journal itself, as a conversational partner", 210 + "multi_facet": false, 211 + "output_format": null, 212 + "schedule": null, 213 + "source": "system", 214 + "title": "Exec", 215 + "type": "cogitate" 205 216 }, 206 217 "facet_newsletter": { 207 218 "app": null, ··· 465 476 "schedule": null, 466 477 "source": "app", 467 478 "title": "TODO Weekly Scout", 468 - "type": "cogitate" 469 - }, 470 - "triage": { 471 - "app": null, 472 - "color": "#6c757d", 473 - "description": "Quick-action assistant for the chat bar — handles navigation, todos, calendar, and entity lookups", 474 - "multi_facet": false, 475 - "output_format": null, 476 - "schedule": null, 477 - "source": "system", 478 - "title": "Triage", 479 479 "type": "cogitate" 480 480 }, 481 481 "work": {
+17
tests/baselines/api/stats/stats.json
··· 1 1 { 2 2 "generators": { 3 + "chat": { 4 + "color": "#6c757d", 5 + "description": "Structured conversational reply planner for the chat backend rewrite", 6 + "hook": { 7 + "pre": "chat_context" 8 + }, 9 + "max_output_tokens": 2048, 10 + "mtime": 0, 11 + "output": "json", 12 + "path": "<PROJECT>/talent/chat.md", 13 + "schema": "chat.schema.json", 14 + "source": "system", 15 + "thinking_budget": 4096, 16 + "tier": 3, 17 + "title": "Chat", 18 + "type": "generate" 19 + }, 3 20 "conversation": { 4 21 "activities": [ 5 22 "meeting",
+7 -2
tests/test_anthropic.py
··· 235 235 236 236 ndjson_input = json.dumps( 237 237 { 238 + "name": "exec", 238 239 "prompt": "hello", 239 240 "provider": "anthropic", 240 241 "model": CLAUDE_SONNET_4, ··· 249 250 assert isinstance(events[0]["ts"], int) 250 251 # Prompt includes system instruction prepended during enrichment 251 252 assert "hello" in events[0]["prompt"] 252 - assert events[0]["name"] == "unified" 253 + assert events[0]["name"] == "exec" 253 254 assert events[0]["model"] == CLAUDE_SONNET_4 254 255 assert events[-1]["event"] == "finish" 255 256 assert isinstance(events[-1]["ts"], int) ··· 278 279 279 280 ndjson_input = json.dumps( 280 281 { 282 + "name": "exec", 281 283 "prompt": "hello", 282 284 "provider": "anthropic", 283 285 "model": CLAUDE_SONNET_4, ··· 294 296 assert isinstance(events[0]["ts"], int) 295 297 # Prompt includes system instruction prepended during enrichment 296 298 assert "hello" in events[0]["prompt"] 297 - assert events[0]["name"] == "unified" 299 + assert events[0]["name"] == "exec" 298 300 assert events[0]["model"] == CLAUDE_SONNET_4 299 301 assert events[-1]["event"] == "finish" 300 302 assert isinstance(events[-1]["ts"], int) ··· 325 327 326 328 ndjson_input = json.dumps( 327 329 { 330 + "name": "exec", 328 331 "prompt": "hello", 329 332 "provider": "anthropic", 330 333 "model": CLAUDE_SONNET_4, ··· 367 370 368 371 ndjson_input = json.dumps( 369 372 { 373 + "name": "exec", 370 374 "prompt": "hello", 371 375 "provider": "anthropic", 372 376 "model": CLAUDE_SONNET_4, ··· 407 411 408 412 ndjson_input = json.dumps( 409 413 { 414 + "name": "exec", 410 415 "prompt": "hello", 411 416 "provider": "anthropic", 412 417 "model": CLAUDE_SONNET_4,
+9 -9
tests/test_app_sol.py
··· 124 124 agents = get_talent_configs(type="cogitate") 125 125 126 126 # Should include known system agents with frontmatter metadata 127 - assert "chat" in agents 128 - assert agents["chat"]["source"] == "system" 129 - assert "title" in agents["chat"] 130 - assert "path" in agents["chat"] 127 + assert "exec" in agents 128 + assert agents["exec"]["source"] == "system" 129 + assert "title" in agents["exec"] 130 + assert "path" in agents["exec"] 131 131 132 132 133 133 def test_get_talent_configs_system_agents_have_metadata(fixture_journal): ··· 135 135 agents = get_talent_configs(type="cogitate") 136 136 137 137 # Check a known system agent 138 - chat = agents.get("chat") 139 - assert chat is not None 140 - assert chat["source"] == "system" 141 - assert "title" in chat 142 - assert "color" in chat 138 + exec_talent = agents.get("exec") 139 + assert exec_talent is not None 140 + assert exec_talent["source"] == "system" 141 + assert "title" in exec_talent 142 + assert "color" in exec_talent 143 143 144 144 145 145 def test_digest_talent_discovery_and_schedule_exclusion(fixture_journal):
+239 -121
tests/test_chat_context.py
··· 2 2 # Copyright (c) 2026 sol pbc 3 3 4 4 import importlib.util 5 + import json 6 + import sys 7 + from copy import deepcopy 8 + from datetime import datetime 5 9 from pathlib import Path 10 + 11 + from convey.chat_stream import append_chat_event 6 12 7 13 TEMPLATE_VAR_KEYS = { 8 - "recent_conversation", 14 + "digest_contents", 15 + "chat_stream_tail", 16 + "active_talents", 17 + "trigger_context", 18 + "location", 9 19 "active_routines", 10 20 "routine_suggestion", 11 21 } ··· 29 39 return result["template_vars"] 30 40 31 41 32 - def _read_chat_md() -> str: 33 - chat_md = Path(__file__).resolve().parents[1] / "talent" / "chat.md" 34 - return chat_md.read_text(encoding="utf-8") 42 + def _write_journal_config(journal: Path, data: dict) -> None: 43 + config_dir = journal / "config" 44 + config_dir.mkdir(parents=True, exist_ok=True) 45 + (config_dir / "journal.json").write_text( 46 + json.dumps(data, indent=2), 47 + encoding="utf-8", 48 + ) 35 49 36 50 37 - def test_chat_context_appends_conversation_memory(monkeypatch, tmp_path): 38 - """Conversation memory is appended when recent exchanges exist.""" 39 - from think.conversation import record_exchange 40 - from think.utils import now_ms 51 + def _ts(hour: int, minute: int, second: int = 0) -> int: 52 + return int(datetime(2026, 4, 20, hour, minute, second).timestamp() * 1000) 41 53 42 - monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(tmp_path)) 43 54 44 - record_exchange( 45 - ts=now_ms(), 46 - facet="work", 47 - user_message="hello", 48 - agent_response="hi there!", 49 - talent="unified", 55 + def test_chat_context_injects_digest_tail_trigger_location_and_routine_state( 56 + monkeypatch, tmp_path 57 + ): 58 + journal = tmp_path / "journal" 59 + monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) 60 + (journal / "identity").mkdir(parents=True, exist_ok=True) 61 + (journal / "identity" / "digest.md").write_text( 62 + "Digest notes for today.", 63 + encoding="utf-8", 50 64 ) 51 - 52 - result = _load_chat_context_module().pre_process( 53 - {"user_instruction": "Base instruction.", "facet": "work"} 65 + _write_journal_config( 66 + journal, 67 + { 68 + "identity": {"preferred": "Alice"}, 69 + "agent": {"name": "Sol-agent", "name_status": "custom"}, 70 + }, 54 71 ) 55 72 56 - template_vars = _assert_template_vars_result(result) 57 - assert "## Recent Conversation" in template_vars["recent_conversation"] 58 - assert "hello" in template_vars["recent_conversation"] 59 - assert "hi there!" in template_vars["recent_conversation"] 60 - 73 + owner_ts = _ts(9, 0) 74 + append_chat_event( 75 + "owner_message", 76 + ts=owner_ts, 77 + text="Please brief me for my meeting", 78 + app="home", 79 + path="/app/home", 80 + facet="work", 81 + ) 82 + append_chat_event( 83 + "sol_message", 84 + ts=_ts(9, 1), 85 + use_id="use-chat-1", 86 + text="I can help with that.", 87 + notes="Responded directly.", 88 + requested_exec=False, 89 + requested_task=None, 90 + ) 91 + append_chat_event( 92 + "talent_spawned", 93 + ts=_ts(9, 2), 94 + use_id="use-exec-1", 95 + name="exec", 96 + task="Prepare the meeting brief", 97 + started_at=_ts(9, 2), 98 + ) 61 99 62 - def test_chat_context_no_memory(monkeypatch, tmp_path): 63 - """Recent conversation is empty when no conversation history exists.""" 64 - monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(tmp_path)) 100 + routines_config = { 101 + "_meta": { 102 + "suggestions_enabled": True, 103 + "suggestions": { 104 + "meeting-prep": { 105 + "trigger_count": 3, 106 + "first_trigger": "2026-04-01", 107 + "last_trigger": "2026-04-19", 108 + "trigger_data": {}, 109 + "response": None, 110 + "suggested": False, 111 + } 112 + }, 113 + } 114 + } 115 + monkeypatch.setattr( 116 + "think.routines.get_routine_state", 117 + lambda: [ 118 + { 119 + "name": "Morning Briefing", 120 + "cadence": "0 9 * * *", 121 + "last_run": None, 122 + "enabled": True, 123 + "paused_until": None, 124 + "output_summary": "Shared the top priorities.", 125 + } 126 + ], 127 + ) 128 + monkeypatch.setattr("think.routines.get_config", lambda: deepcopy(routines_config)) 129 + monkeypatch.setattr("think.routines.save_config", lambda config: None) 65 130 66 131 result = _load_chat_context_module().pre_process( 67 - {"user_instruction": "Base instruction."} 132 + { 133 + "prompt": "Please brief me for my meeting", 134 + "facet": "work", 135 + "day": "20260420", 136 + "trigger_kind": "owner_message", 137 + "trigger_payload": { 138 + "text": "Please brief me for my meeting", 139 + "app": "home", 140 + "path": "/app/home", 141 + "facet": "work", 142 + "ts": owner_ts, 143 + }, 144 + } 68 145 ) 69 146 70 147 template_vars = _assert_template_vars_result(result) 71 - assert template_vars["recent_conversation"] == "" 72 - 148 + assert template_vars["digest_contents"] == "Digest notes for today." 149 + assert "## Recent Chat" in template_vars["chat_stream_tail"] 150 + assert ( 151 + "**Alice** Please brief me for my meeting" in template_vars["chat_stream_tail"] 152 + ) 153 + assert "**Sol-agent** I can help with that." in template_vars["chat_stream_tail"] 154 + assert ( 155 + "*[exec spawned: Prepare the meeting brief]*" 156 + in template_vars["chat_stream_tail"] 157 + ) 158 + assert "## Active Execs" in template_vars["active_talents"] 159 + assert "Prepare the meeting brief" in template_vars["active_talents"] 160 + assert "## Trigger Context" in template_vars["trigger_context"] 161 + assert "Type: owner_message" in template_vars["trigger_context"] 162 + assert "Please brief me for my meeting" in template_vars["trigger_context"] 163 + assert "## Location" in template_vars["location"] 164 + assert "/app/home" in template_vars["location"] 165 + assert "work" in template_vars["location"] 166 + assert "## Active Routines" in template_vars["active_routines"] 167 + assert "Morning Briefing" in template_vars["active_routines"] 168 + assert "Routine Suggestion Eligible" in template_vars["routine_suggestion"] 169 + assert "meeting-prep" in template_vars["routine_suggestion"] 73 170 74 - def test_chat_md_contains_location_context(): 75 - """Location context lives in the static chat prompt.""" 76 - chat_md = _read_chat_md() 77 - assert "## Location Context" in chat_md 78 171 172 + def test_chat_context_routine_suggestion_only_counts_owner_messages( 173 + monkeypatch, tmp_path 174 + ): 175 + journal = tmp_path / "journal" 176 + monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) 79 177 80 - def test_chat_md_contains_system_health(): 81 - """System health guidance lives in the static chat prompt.""" 82 - chat_md = _read_chat_md() 83 - assert "## System Health" in chat_md 178 + routines_config = {"_meta": {"suggestions_enabled": True, "suggestions": {}}} 179 + save_calls: list[dict] = [] 180 + monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 181 + monkeypatch.setattr("think.routines.get_config", lambda: routines_config) 182 + monkeypatch.setattr( 183 + "think.routines.save_config", 184 + lambda config: save_calls.append(deepcopy(config)), 185 + ) 84 186 187 + module = _load_chat_context_module() 85 188 86 - def test_chat_md_contains_behavioral_defaults(): 87 - """Behavioral defaults live in the static chat prompt.""" 88 - chat_md = _read_chat_md() 89 - assert "## Behavioral Defaults" in chat_md 189 + module.pre_process( 190 + { 191 + "prompt": "What is on my calendar today?", 192 + "trigger_kind": "talent_finished", 193 + "trigger_payload": { 194 + "name": "exec", 195 + "summary": "Collected the latest meeting prep notes.", 196 + }, 197 + } 198 + ) 90 199 200 + assert routines_config["_meta"]["suggestions"] == {} 201 + assert save_calls == [] 91 202 92 - def test_chat_md_contains_static_import_guidance(): 93 - """Import guidance lives in the static chat prompt.""" 94 - chat_md = _read_chat_md() 95 - assert "## Import Awareness" in chat_md 203 + module.pre_process( 204 + { 205 + "prompt": "What is on my calendar today?", 206 + "trigger_kind": "owner_message", 207 + "trigger_payload": { 208 + "text": "What is on my calendar today?", 209 + "ts": _ts(10, 0), 210 + }, 211 + } 212 + ) 96 213 214 + suggestion = routines_config["_meta"]["suggestions"]["morning-briefing"] 215 + assert suggestion["trigger_count"] == 1 216 + assert len(save_calls) == 1 97 217 98 - def test_chat_md_contains_static_naming_guidance(): 99 - """Naming guidance lives in the static chat prompt.""" 100 - chat_md = _read_chat_md() 101 - assert "## Naming Awareness" in chat_md 102 218 219 + def test_chat_context_preserves_save_routines_config_side_effect(monkeypatch, tmp_path): 220 + journal = tmp_path / "journal" 221 + monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) 103 222 104 - def test_chat_context_awareness_error_graceful(monkeypatch): 105 - """Awareness failures still return the full template var shape.""" 106 - monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 223 + routines_config = {"_meta": {"suggestions_enabled": True, "suggestions": {}}} 224 + save_calls: list[dict] = [] 107 225 monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 108 - monkeypatch.setattr( 109 - "think.routines.get_config", lambda: {"_meta": {"suggestions": {}}} 110 - ) 226 + monkeypatch.setattr("think.routines.get_config", lambda: routines_config) 111 227 monkeypatch.setattr( 112 - "think.utils.get_config", 113 - lambda: {"agent": {"name": "aria", "name_status": "default"}}, 228 + "think.routines.save_config", 229 + lambda config: save_calls.append(deepcopy(config)), 114 230 ) 115 - monkeypatch.setattr("think.utils.get_journal", lambda: "/nonexistent") 116 231 117 - result = _load_chat_context_module().pre_process( 118 - {"user_instruction": "Base instruction."} 232 + _load_chat_context_module().pre_process( 233 + { 234 + "prompt": "What is on my calendar today?", 235 + "trigger_kind": "owner_message", 236 + "trigger_payload": { 237 + "text": "What is on my calendar today?", 238 + "ts": _ts(11, 0), 239 + }, 240 + } 119 241 ) 120 242 121 - template_vars = _assert_template_vars_result(result) 122 - assert all(template_vars[key] == "" for key in TEMPLATE_VAR_KEYS) 243 + assert len(save_calls) == 1 244 + saved = save_calls[0] 245 + assert saved["_meta"]["suggestions"]["morning-briefing"]["trigger_count"] == 1 246 + assert saved["_meta"]["suggestions"]["morning-briefing"]["first_trigger"] 123 247 124 248 125 - def test_chat_context_does_not_return_sol_awareness(monkeypatch): 126 - """sol_awareness is no longer part of the chat pre-hook output.""" 127 - monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 249 + def test_chat_context_routines_omitted_when_empty(monkeypatch, tmp_path): 250 + journal = tmp_path / "journal" 251 + monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) 128 252 monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 129 253 monkeypatch.setattr( 130 - "think.routines.get_config", lambda: {"_meta": {"suggestions": {}}} 131 - ) 132 - monkeypatch.setattr( 133 - "think.utils.get_config", 134 - lambda: {"agent": {"name": "aria", "name_status": "default"}}, 254 + "think.routines.get_config", 255 + lambda: {"_meta": {"suggestions_enabled": False, "suggestions": {}}}, 135 256 ) 257 + monkeypatch.setattr("think.routines.save_config", lambda config: None) 136 258 137 - result = _load_chat_context_module().pre_process( 138 - {"user_instruction": "Base instruction."} 139 - ) 259 + result = _load_chat_context_module().pre_process({"day": "20260420"}) 140 260 141 - assert "sol_awareness" not in result["template_vars"] 142 - 261 + template_vars = _assert_template_vars_result(result) 262 + assert template_vars["active_routines"] == "" 263 + assert template_vars["chat_stream_tail"] == "" 264 + assert template_vars["active_talents"] == "" 143 265 144 - def test_chat_context_routines_injected(monkeypatch): 145 - """Active routines section is appended when routines exist.""" 146 - monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 147 - monkeypatch.setattr( 148 - "think.routines.get_routine_state", 149 - lambda: [ 150 - { 151 - "name": "Morning Briefing", 152 - "cadence": "0 9 * * *", 153 - "last_run": None, 154 - "enabled": True, 155 - "paused_until": None, 156 - "output_summary": None, 157 - } 158 - ], 159 - ) 160 266 161 - result = _load_chat_context_module().pre_process( 162 - {"user_instruction": "Base instruction."} 163 - ) 267 + def test_chat_context_enrichment_errors_are_graceful(monkeypatch, tmp_path): 268 + journal = tmp_path / "journal" 269 + monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) 164 270 165 - template_vars = _assert_template_vars_result(result) 166 - assert "## Active Routines" in template_vars["active_routines"] 167 - assert "Morning Briefing" in template_vars["active_routines"] 271 + module = _load_chat_context_module() 168 272 273 + def _boom(*_args, **_kwargs): 274 + raise RuntimeError("boom") 169 275 170 - def test_chat_context_routines_omitted_when_empty(monkeypatch): 171 - """Active routines section is omitted when no routines configured.""" 172 - monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 173 - monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 276 + monkeypatch.setattr(module, "_load_digest_contents", _boom) 277 + monkeypatch.setattr(module, "read_chat_tail", _boom) 278 + monkeypatch.setattr(module, "reduce_chat_state", _boom) 279 + monkeypatch.setattr("think.routines.get_routine_state", _boom) 280 + monkeypatch.setattr("think.routines.get_config", _boom) 281 + monkeypatch.setattr("think.routines.save_config", lambda config: None) 174 282 175 - result = _load_chat_context_module().pre_process( 176 - {"user_instruction": "Base instruction."} 283 + result = module.pre_process( 284 + { 285 + "prompt": "What is on my calendar today?", 286 + "trigger_kind": "owner_message", 287 + "trigger_payload": { 288 + "text": "What is on my calendar today?", 289 + "path": "/app/home", 290 + "ts": _ts(12, 0), 291 + }, 292 + } 177 293 ) 178 294 179 295 template_vars = _assert_template_vars_result(result) 296 + assert template_vars["digest_contents"] == "" 297 + assert template_vars["chat_stream_tail"] == "" 298 + assert template_vars["active_talents"] == "" 180 299 assert template_vars["active_routines"] == "" 300 + assert template_vars["routine_suggestion"] == "" 301 + assert "Type: owner_message" in template_vars["trigger_context"] 302 + assert "/app/home" in template_vars["location"] 181 303 182 304 183 - def test_chat_context_routines_error_graceful(monkeypatch): 184 - """Routine state failures still return the full template var shape.""" 185 - monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 305 + def test_chat_context_drops_conversation_memory_imports(monkeypatch): 306 + monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 186 307 monkeypatch.setattr( 187 - "think.routines.get_routine_state", 188 - lambda: (_ for _ in ()).throw(RuntimeError("boom")), 308 + "think.routines.get_config", 309 + lambda: {"_meta": {"suggestions_enabled": False, "suggestions": {}}}, 189 310 ) 190 - monkeypatch.setattr( 191 - "think.routines.get_config", lambda: {"_meta": {"suggestions": {}}} 192 - ) 193 - monkeypatch.setattr( 194 - "think.utils.get_config", 195 - lambda: {"agent": {"name": "aria", "name_status": "default"}}, 196 - ) 311 + monkeypatch.setattr("think.routines.save_config", lambda config: None) 197 312 198 - result = _load_chat_context_module().pre_process( 199 - {"user_instruction": "Base instruction."} 200 - ) 313 + source = ( 314 + Path(__file__).resolve().parents[1] / "talent" / "chat_context.py" 315 + ).read_text(encoding="utf-8") 316 + assert "think.conversation" not in source 317 + assert "conversation_memory" not in source 201 318 202 - template_vars = _assert_template_vars_result(result) 203 - assert template_vars["active_routines"] == "" 204 - assert set(template_vars) == TEMPLATE_VAR_KEYS 319 + sys.modules.pop("think.conversation", None) 320 + _load_chat_context_module() 321 + 322 + assert "think.conversation" not in sys.modules
+3 -1
tests/test_google.py
··· 121 121 122 122 ndjson_input = json.dumps( 123 123 { 124 + "name": "exec", 124 125 "prompt": "hello", 125 126 "provider": "google", 126 127 "model": GEMINI_FLASH, ··· 134 135 assert events[0]["event"] == "start" 135 136 assert isinstance(events[0]["ts"], int) 136 137 assert "hello" in events[0]["prompt"] 137 - assert events[0]["name"] == "unified" 138 + assert events[0]["name"] == "exec" 138 139 assert events[0]["model"] == GEMINI_FLASH 139 140 assert events[-1]["event"] == "finish" 140 141 assert isinstance(events[-1]["ts"], int) ··· 160 161 161 162 ndjson_input = json.dumps( 162 163 { 164 + "name": "exec", 163 165 "prompt": "hello", 164 166 "provider": "google", 165 167 "model": GEMINI_FLASH,
+1
tests/test_google_thinking.py
··· 103 103 104 104 ndjson_input = json.dumps( 105 105 { 106 + "name": "exec", 106 107 "prompt": "hello", 107 108 "provider": "google", 108 109 "model": GEMINI_FLASH,
+108
tests/test_maint_006_rename_unified_triage_providers.py
··· 1 + # SPDX-License-Identifier: AGPL-3.0-only 2 + # Copyright (c) 2026 sol pbc 3 + 4 + import importlib 5 + import json 6 + from pathlib import Path 7 + 8 + mod = importlib.import_module("apps.sol.maint.006_rename_unified_triage_providers") 9 + 10 + 11 + def _write_journal_config(journal: Path, data: object) -> Path: 12 + config_dir = journal / "config" 13 + config_dir.mkdir(parents=True, exist_ok=True) 14 + config_path = config_dir / "journal.json" 15 + config_path.write_text(json.dumps(data, indent=2), encoding="utf-8") 16 + return config_path 17 + 18 + 19 + def test_rename_unified_and_remove_triage_idempotent(tmp_path): 20 + config_path = _write_journal_config( 21 + tmp_path, 22 + { 23 + "providers": { 24 + "contexts": { 25 + "talent.system.unified": {"provider": "openai"}, 26 + "talent.system.triage": {"provider": "anthropic"}, 27 + "talent.system.digest": {"provider": "google"}, 28 + } 29 + } 30 + }, 31 + ) 32 + 33 + summary = mod.run_migration(tmp_path, dry_run=False) 34 + 35 + assert summary.renamed == 1 36 + assert summary.removed == 1 37 + assert summary.preserved == 0 38 + assert summary.errors == 0 39 + data = json.loads(config_path.read_text(encoding="utf-8")) 40 + assert "talent.system.unified" not in data["providers"]["contexts"] 41 + assert "talent.system.triage" not in data["providers"]["contexts"] 42 + assert data["providers"]["contexts"]["talent.system.chat"] == {"provider": "openai"} 43 + assert data["providers"]["contexts"]["talent.system.digest"] == { 44 + "provider": "google" 45 + } 46 + 47 + before_bytes = config_path.read_bytes() 48 + before_mtime_ns = config_path.stat().st_mtime_ns 49 + 50 + rerun = mod.run_migration(tmp_path, dry_run=False) 51 + 52 + assert rerun.renamed == 0 53 + assert rerun.removed == 0 54 + assert rerun.preserved == 0 55 + assert rerun.errors == 0 56 + assert rerun.skipped_reason is None 57 + assert config_path.read_bytes() == before_bytes 58 + assert config_path.stat().st_mtime_ns == before_mtime_ns 59 + 60 + 61 + def test_preserves_existing_chat_context_when_unified_exists(tmp_path): 62 + config_path = _write_journal_config( 63 + tmp_path, 64 + { 65 + "providers": { 66 + "contexts": { 67 + "talent.system.unified": {"provider": "openai"}, 68 + "talent.system.chat": {"provider": "google"}, 69 + } 70 + } 71 + }, 72 + ) 73 + 74 + summary = mod.run_migration(tmp_path, dry_run=False) 75 + 76 + assert summary.renamed == 0 77 + assert summary.removed == 0 78 + assert summary.preserved == 1 79 + assert summary.errors == 0 80 + data = json.loads(config_path.read_text(encoding="utf-8")) 81 + assert "talent.system.unified" not in data["providers"]["contexts"] 82 + assert data["providers"]["contexts"]["talent.system.chat"] == {"provider": "google"} 83 + 84 + 85 + def test_noop_when_no_legacy_provider_contexts_present(tmp_path): 86 + config_path = _write_journal_config( 87 + tmp_path, 88 + { 89 + "providers": { 90 + "contexts": { 91 + "talent.system.chat": {"provider": "openai"}, 92 + "talent.system.digest": {"provider": "google"}, 93 + } 94 + } 95 + }, 96 + ) 97 + before_bytes = config_path.read_bytes() 98 + before_mtime_ns = config_path.stat().st_mtime_ns 99 + 100 + summary = mod.run_migration(tmp_path, dry_run=False) 101 + 102 + assert summary.renamed == 0 103 + assert summary.removed == 0 104 + assert summary.preserved == 0 105 + assert summary.errors == 0 106 + assert summary.skipped_reason is None 107 + assert config_path.read_bytes() == before_bytes 108 + assert config_path.stat().st_mtime_ns == before_mtime_ns
+9 -1
tests/test_talent.py
··· 103 103 104 104 def test_get_agent_normalizes_cwd_for_cogitate(): 105 105 config = get_talent("chat") 106 - assert config["cwd"] == "journal" 106 + assert config["type"] == "generate" 107 + assert "cwd" not in config 107 108 108 109 109 110 def test_get_agent_preserves_repo_cwd_for_coder(): 110 111 config = get_talent("coder") 111 112 assert config["cwd"] == "repo" 113 + 114 + 115 + def test_get_talent_defaults_to_chat(): 116 + config = get_talent() 117 + assert config["name"] == "chat" 118 + assert config["type"] == "generate" 119 + assert Path(config["path"]).name == "chat.md" 112 120 113 121 114 122 def _write_talent_file(tmp_path: Path, name: str, metadata: dict) -> Path:
+2 -2
tests/test_talents_ndjson.py
··· 31 31 prompt = config.get("prompt", "") 32 32 provider = config.get("provider", "") 33 33 model = config.get("model", "") 34 - name = config.get("name", "unified") 34 + name = config.get("name", "chat") 35 35 36 36 if on_event: 37 37 on_event( ··· 59 59 config = dict(request) 60 60 # Add required fields if not present 61 61 if "name" not in config: 62 - config["name"] = "unified" 62 + config["name"] = "chat" 63 63 if "provider" not in config: 64 64 config["provider"] = "google" 65 65 if "model" not in config:
+16 -2
tests/verify_api.py
··· 196 196 "path": "/app/search/api/search", 197 197 "params": {"q": "romeo", "limit": "5", "offset": "0"}, 198 198 "status": 200, 199 + "sandbox_only": True, 199 200 }, 200 201 { 201 202 "app": "search", ··· 203 204 "path": "/app/search/api/day_results", 204 205 "params": {"q": "meeting", "day": "20260304", "offset": "0", "limit": "5"}, 205 206 "status": 200, 207 + "sandbox_only": True, 206 208 }, 207 209 # apps/settings/routes.py 208 210 { ··· 385 387 "path": "/app/graph/api/graph", 386 388 "params": {}, 387 389 "status": 200, 390 + "sandbox_only": True, 388 391 }, 389 392 ] 390 393 ··· 574 577 return failures 575 578 576 579 577 - def update_all(client: Any, journal_path: str) -> int: 580 + def update_all( 581 + client: Any, 582 + journal_path: str, 583 + *, 584 + include_sandbox_only: bool, 585 + ) -> int: 578 586 """Refresh all endpoint baselines from current responses.""" 579 587 580 588 updated = 0 581 589 for endpoint in ENDPOINTS: 590 + if endpoint.get("sandbox_only") and not include_sandbox_only: 591 + continue 582 592 identifier = f"{endpoint['app']}/{endpoint['name']}" 583 593 path = baseline_path(endpoint) 584 594 path.parent.mkdir(parents=True, exist_ok=True) ··· 695 705 print(f"API baseline verification passed for {len(ENDPOINTS)} endpoints.") 696 706 return 0 697 707 698 - updated = update_all(client, journal_path) 708 + updated = update_all( 709 + client, 710 + journal_path, 711 + include_sandbox_only=bool(args.base_url), 712 + ) 699 713 print(f"Updated {updated} baseline files.") 700 714 return 0 701 715
+1 -1
think/chat_cli.py
··· 24 24 parser.add_argument("--facet", help="Facet context") 25 25 parser.add_argument("--provider", help="AI provider override") 26 26 parser.add_argument( 27 - "--talent", default="unified", help="Talent agent name (default: unified)" 27 + "--talent", default="chat", help="Talent agent name (default: chat)" 28 28 ) 29 29 args = setup_cli(parser) 30 30 require_solstone()
+3 -3
think/cortex.py
··· 211 211 return 212 212 213 213 # Create _active.jsonl file (exclusive creation to prevent race conditions) 214 - name = request.get("name", "unified") 214 + name = request["name"] 215 215 safe_name = name.replace(":", "--") 216 216 talent_subdir = self.talents_dir / safe_name 217 217 talent_subdir.mkdir(parents=True, exist_ok=True) ··· 293 293 if process_type == "talent": 294 294 from think.talent import get_talent 295 295 296 - talent_key = str(config.get("name", "unified")) 296 + talent_key = str(config["name"]) 297 297 talent_config = get_talent(talent_key) 298 298 if talent_config.get("type") == "cogitate": 299 299 # Resolve here because prepare_config() runs inside think.talents. ··· 692 692 693 693 summary = { 694 694 "use_id": use_id, 695 - "name": request.get("name", "unified"), 695 + "name": request["name"], 696 696 "day": day, 697 697 "facet": request.get("facet"), 698 698 "ts": start_ts,
+5 -2
think/cortex_client.py
··· 42 42 43 43 Args: 44 44 prompt: The task or question for the talent 45 - name: Talent name - system (e.g., "unified") or app-qualified (e.g., "entities:entity_assist") 45 + name: Talent name - system (e.g., "chat") or app-qualified (e.g., "entities:entity_assist") 46 46 provider: AI provider - openai, google, or anthropic 47 47 config: Provider-specific configuration (model, max_output_tokens, thinking_budget, etc.) 48 48 ··· 267 267 ) -> Dict[str, Any]: 268 268 """List talent uses from the journal with pagination and filtering. 269 269 270 + Legacy unnamed run logs predate the chat rename and are surfaced as chat. 271 + 270 272 Args: 271 273 limit: Maximum number of uses to return (1-100) 272 274 offset: Number of uses to skip ··· 352 354 # Extract basic info 353 355 use_info = { 354 356 "id": use_id, 355 - "name": request.get("name", "unified"), 357 + # Legacy unnamed run logs predate the chat rename; treat them as chat. 358 + "name": request.get("name", "chat"), 356 359 "start": request.get("ts", 0), 357 360 "status": status, 358 361 "prompt": request.get("prompt", ""),
+6 -3
think/talent.py
··· 35 35 36 36 TALENT_DIR = Path(__file__).parent.parent / "talent" 37 37 APPS_DIR = Path(__file__).parent.parent / "apps" 38 + _UNDISCOVERED_SYSTEM_TALENTS = {"triage"} 38 39 39 40 40 41 # --------------------------------------------------------------------------- ··· 231 232 if TALENT_DIR.is_dir(): 232 233 for md_path in sorted(TALENT_DIR.glob("*.md")): 233 234 name = md_path.stem 235 + if name in _UNDISCOVERED_SYSTEM_TALENTS: 236 + continue 234 237 info = _load_prompt_metadata(md_path) 235 238 236 239 info["source"] = "system" ··· 340 343 Parameters 341 344 ---------- 342 345 name: 343 - Talent name - either system talent (e.g., "unified") or 346 + Talent name - either system talent (e.g., "chat") or 344 347 app-namespaced talent (e.g., "support:support"). 345 348 346 349 Returns ··· 498 501 499 502 500 503 def get_talent( 501 - name: str = "unified", 504 + name: str = "chat", 502 505 facet: str | None = None, 503 506 analysis_day: str | None = None, 504 507 ) -> dict: ··· 511 514 Parameters 512 515 ---------- 513 516 name: 514 - Talent name to load. Can be a system talent (e.g., "unified") 517 + Talent name to load. Can be a system talent (e.g., "chat") 515 518 or an app-namespaced talent (e.g., "support:support" for apps/support/talent/support). 516 519 facet: 517 520 Optional facet name to focus on. Controls $facets template variable.
+1 -1
think/talent_cli.py
··· 804 804 return None 805 805 req_segment = request_event.get("segment") 806 806 req_facet = request_event.get("facet") 807 - req_name = request_event.get("name", "unified") 807 + req_name = request_event["name"] 808 808 req_env = request_event.get("env") or {} 809 809 req_stream = req_env.get("SOL_STREAM") if req_env else None 810 810 day_dir = day_path(req_day, create=False)
+5 -5
think/talents.py
··· 459 459 from think.models import resolve_model_for_provider, resolve_provider 460 460 from think.talent import get_talent, key_to_context 461 461 462 - name = request.get("name", "unified") 462 + name = request["name"] 463 463 facet = request.get("facet") 464 464 day = request.get("day") 465 465 segment = request.get("segment") ··· 755 755 "event": "dry_run", 756 756 "ts": now_ms(), 757 757 "type": talent_type, 758 - "name": config.get("name", "unified"), 758 + "name": config["name"], 759 759 "provider": config.get("provider", ""), 760 760 "model": config.get("model") or "unknown", 761 761 "system_instruction": config.get("system_instruction", ""), ··· 872 872 if not context: 873 873 from think.talent import key_to_context 874 874 875 - context = key_to_context(config.get("name", "unified")) 875 + context = key_to_context(config["name"]) 876 876 backup_model = resolve_model_for_provider(context, backup, "cogitate") 877 877 878 878 emit_event( ··· 928 928 from think.models import generate_with_result 929 929 from think.talent import key_to_context 930 930 931 - name = config.get("name", "unified") 931 + name = config["name"] 932 932 transcript = config.get("transcript", "") 933 933 user_instruction = config.get("user_instruction", "") 934 934 prompt = config.get("prompt", "") ··· 1072 1072 emit_event: Callback to emit JSONL events 1073 1073 dry_run: If True, emit dry_run event instead of calling LLM 1074 1074 """ 1075 - name = config.get("name", "unified") 1075 + name = config["name"] 1076 1076 provider = config.get("provider", "google") 1077 1077 model = config.get("model") 1078 1078 is_cogitate = config["type"] == "cogitate"