personal memory agent
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

sol/agents: read day list from fattened day index

Move per-use field derivation out of the listing path and into the
writer that already opens the same file once at use-completion. The
day-index entry is now a complete summary; api_talents_day reads it
directly with no per-completed-use file opens.

Same on-the-wire JSON shape. Same lode also fixes legacy day indexes
(agent_id schema) which previously returned empty.

- think/cortex.py: _append_day_index now writes thinking_count,
tool_count, cost, error_message, output_file, prompt alongside the
existing 10 fields. Single forward pass over the completed file.
- apps/sol/routes.py: _get_uses_for_day reads the day index directly,
with a single dual-key fallback for legacy agent_id entries.
_parse_use_file deleted; get_use_end_state no longer called from
the listing path. New _parse_active_use_file handles the bounded
active glob.
- tests/test_app_sol.py: covers index-only sufficiency, legacy
agent_id schema, and tolerated blanks for pre-existing entries.

Pre-existing entries that lack the new fields render as null.
No retroactive bulk migration; no lazy upgrade path. Founder-pulled
older days will gain rich data only when those uses run again.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>

+312 -86
+90 -81
apps/sol/routes.py
··· 143 143 return result 144 144 145 145 146 - def _parse_use_file(use_file: Path) -> dict[str, Any] | None: 147 - """Parse a use JSONL file and extract metadata. 146 + def _get_use_day(use_file: Path) -> str: 147 + """Get the logical day for a use from its request event. 148 148 149 - Returns dict with: id, name, start, status, prompt, facet, failed, 150 - runtime_seconds, thinking_count, tool_count, cost, model, provider, 151 - error_message. 152 - Returns None if file cannot be parsed. 149 + Prefers the ``day`` field from the request event (the day being processed) 150 + over the use_id timestamp (when the agent actually ran). This ensures 151 + overnight think uses appear under the day they processed. 153 152 """ 154 - from think.cortex_client import get_use_end_state 153 + use_id = use_file.stem.replace("_active", "") 154 + try: 155 + with open(use_file, "r") as f: 156 + first_line = f.readline().strip() 157 + if first_line: 158 + request_event = json.loads(first_line) 159 + req_day = request_event.get("day") 160 + if req_day: 161 + return req_day 162 + except (json.JSONDecodeError, IOError): 163 + pass 164 + return _use_id_to_day(use_id) 165 + 155 166 167 + def _parse_active_use_file(use_file: Path) -> dict[str, Any] | None: 168 + """Parse an active use JSONL file for the day listing.""" 156 169 try: 157 170 with open(use_file, "r") as f: 158 171 lines = f.readlines() ··· 168 181 if request_event.get("event") != "request": 169 182 return None 170 183 171 - is_active = "_active.jsonl" in use_file.name 172 - use_id = use_file.stem.replace("_active", "") 184 + thinking_count = 0 185 + tool_count = 0 186 + model = None 187 + provider = request_event.get("provider") 173 188 174 - # Parse events using shared helper 175 - event_data = _parse_use_events(lines[1:]) 189 + for line in lines[1:]: 190 + line = line.strip() 191 + if not line: 192 + continue 193 + try: 194 + event = json.loads(line) 195 + except json.JSONDecodeError: 196 + continue 176 197 177 - use_info: dict[str, Any] = { 198 + event_type = event.get("event") 199 + if event_type == "thinking": 200 + thinking_count += 1 201 + elif event_type == "tool_start": 202 + tool_count += 1 203 + elif event_type == "start": 204 + model = event.get("model") 205 + provider = provider or event.get("provider") 206 + 207 + output_file = None 208 + if request_event.get("output"): 209 + out_path = _resolve_output_path(request_event, state.journal_root) 210 + if out_path and out_path.exists(): 211 + req_day = request_event.get("day") 212 + day_dir = Path(state.journal_root) / req_day if req_day else None 213 + try: 214 + if day_dir and out_path.is_relative_to(day_dir): 215 + output_file = str(out_path.relative_to(day_dir)) 216 + else: 217 + output_file = str(out_path.relative_to(state.journal_root)) 218 + except ValueError: 219 + output_file = None 220 + 221 + use_id = use_file.stem.replace("_active", "") 222 + return { 178 223 "id": use_id, 179 224 "name": request_event["name"], 180 225 "start": request_event.get("ts", 0), 181 - "status": "running" if is_active else "completed", 226 + "status": "running", 182 227 "prompt": request_event.get("prompt", ""), 183 228 "facet": request_event.get("facet"), 184 229 "failed": False, 185 230 "runtime_seconds": None, 186 - "thinking_count": event_data["thinking_count"], 187 - "tool_count": event_data["tool_count"], 231 + "thinking_count": thinking_count, 232 + "tool_count": tool_count, 188 233 "cost": None, 189 - "model": event_data["model"], 190 - "provider": request_event.get("provider") or event_data.get("provider"), 191 - "error_message": event_data["error_message"], 234 + "model": model, 235 + "provider": provider, 236 + "error_message": None, 237 + "output_file": output_file, 192 238 } 193 - 194 - # Check for output file (generators only) 195 - output_file = None 196 - req_output = request_event.get("output") 197 - if req_output: 198 - out_path = _resolve_output_path(request_event, state.journal_root) 199 - if out_path and out_path.exists(): 200 - req_day = request_event.get("day") 201 - day_dir = Path(state.journal_root) / req_day if req_day else None 202 - if day_dir and out_path.is_relative_to(day_dir): 203 - output_file = str(out_path.relative_to(day_dir)) 204 - else: 205 - output_file = str(out_path.relative_to(state.journal_root)) 206 - use_info["output_file"] = output_file 207 - 208 - # For completed uses, determine end state and calculate cost 209 - if not is_active: 210 - end_state = get_use_end_state(use_id) 211 - use_info["failed"] = end_state in ("error", "unknown") 212 - 213 - # Calculate runtime from finish or error timestamp 214 - end_ts = event_data["finish_ts"] or event_data["error_ts"] 215 - if end_ts and use_info["start"]: 216 - use_info["runtime_seconds"] = (end_ts - use_info["start"]) / 1000.0 217 - 218 - # Calculate cost 219 - use_info["cost"] = calc_agent_cost(event_data["model"], event_data["usage"]) 220 - 221 - return use_info 222 - except (json.JSONDecodeError, IOError): 239 + except (json.JSONDecodeError, OSError): 223 240 return None 224 241 225 242 226 - def _get_use_day(use_file: Path) -> str: 227 - """Get the logical day for a use from its request event. 228 - 229 - Prefers the ``day`` field from the request event (the day being processed) 230 - over the use_id timestamp (when the agent actually ran). This ensures 231 - overnight think uses appear under the day they processed. 232 - """ 233 - use_id = use_file.stem.replace("_active", "") 234 - try: 235 - with open(use_file, "r") as f: 236 - first_line = f.readline().strip() 237 - if first_line: 238 - request_event = json.loads(first_line) 239 - req_day = request_event.get("day") 240 - if req_day: 241 - return req_day 242 - except (json.JSONDecodeError, IOError): 243 - pass 244 - return _use_id_to_day(use_id) 245 - 246 - 247 243 def _get_uses_for_day(day: str, facet_filter: str | None = None) -> list[dict]: 248 244 """Get all talent uses for a specific day. 249 245 ··· 260 256 if not talents_dir.exists(): 261 257 return [] 262 258 263 - uses = [] 259 + uses: list[dict[str, Any]] = [] 264 260 265 261 # Read day index for completed uses 266 262 day_index_path = talents_dir / f"{day}.jsonl" ··· 280 276 if facet_filter is not None and entry.get("facet") != facet_filter: 281 277 continue 282 278 283 - # Locate the actual file for full parsing 284 - use_id = entry.get("use_id", "") 285 - name = entry["name"] 286 - safe_name = name.replace(":", "--") 287 - use_file = talents_dir / safe_name / f"{use_id}.jsonl" 288 - if not use_file.exists(): 279 + use_id = entry.get("use_id") or entry.get("agent_id") 280 + if not use_id: 289 281 continue 290 282 291 - use_info = _parse_use_file(use_file) 292 - if use_info: 293 - uses.append(use_info) 283 + status = entry.get("status") 284 + uses.append( 285 + { 286 + "id": use_id, 287 + "name": entry.get("name"), 288 + "start": entry.get("ts"), 289 + "status": status, 290 + "prompt": entry.get("prompt"), 291 + "facet": entry.get("facet"), 292 + "failed": status in ("error", "unknown"), 293 + "runtime_seconds": entry.get("runtime_seconds"), 294 + "thinking_count": entry.get("thinking_count"), 295 + "tool_count": entry.get("tool_count"), 296 + "cost": entry.get("cost"), 297 + "model": entry.get("model"), 298 + "provider": entry.get("provider"), 299 + "error_message": entry.get("error_message"), 300 + "output_file": entry.get("output_file"), 301 + } 302 + ) 294 303 except OSError as exc: 295 304 logging.warning("Failed to read use day index %s: %s", day_index_path, exc) 296 305 ··· 301 310 if _get_use_day(use_file) != day: 302 311 continue 303 312 304 - use_info = _parse_use_file(use_file) 313 + use_info = _parse_active_use_file(use_file) 305 314 if not use_info: 306 315 continue 307 316 ··· 311 320 uses.append(use_info) 312 321 313 322 # Sort by start time (newest first) 314 - uses.sort(key=lambda x: x["start"], reverse=True) 323 + uses.sort(key=lambda x: x.get("start") or 0, reverse=True) 315 324 return uses 316 325 317 326
+162
tests/test_app_sol.py
··· 323 323 assert resp.status_code == 404 324 324 325 325 326 + @pytest.fixture 327 + def sol_listing_client(tmp_path, monkeypatch): 328 + """Create a sol app client backed by a temporary journal.""" 329 + from flask import Flask 330 + 331 + from apps.sol.routes import sol_bp 332 + from convey import state 333 + 334 + app = Flask(__name__) 335 + app.register_blueprint(sol_bp) 336 + 337 + talents_dir = tmp_path / "talents" 338 + talents_dir.mkdir() 339 + 340 + monkeypatch.setattr(state, "journal_root", str(tmp_path)) 341 + monkeypatch.setattr("apps.sol.routes.get_facets", lambda: {}) 342 + monkeypatch.setattr("apps.sol.routes._build_talents_meta", lambda: {}) 343 + 344 + return app.test_client(), talents_dir 345 + 346 + 347 + def _write_day_index(talents_dir: Path, day: str, entries: list[dict]) -> Path: 348 + path = talents_dir / f"{day}.jsonl" 349 + lines = [json.dumps(entry) + "\n" for entry in entries] 350 + path.write_text("".join(lines), encoding="utf-8") 351 + return path 352 + 353 + 354 + class TestApiTalentsDayListing: 355 + """Tests for day-index-backed talent listing.""" 356 + 357 + def test_index_only_entry_returns_full_summary(self, sol_listing_client): 358 + """A complete day-index entry is enough without a per-use file.""" 359 + client, talents_dir = sol_listing_client 360 + day = "20990101" 361 + entry = { 362 + "use_id": "4070908800001", 363 + "name": "flow", 364 + "day": day, 365 + "facet": "work", 366 + "ts": 4070908800000, 367 + "status": "error", 368 + "runtime_seconds": 12.3, 369 + "provider": "google", 370 + "model": "gemini-2.5-flash", 371 + "schedule": "daily", 372 + "thinking_count": 4, 373 + "tool_count": 2, 374 + "cost": 0.0123, 375 + "error_message": "rate limited", 376 + "output_file": "talents/flow.md", 377 + "prompt": "Summarize the day", 378 + } 379 + _write_day_index(talents_dir, day, [entry]) 380 + 381 + resp = client.get(f"/app/sol/api/talents/{day}") 382 + 383 + assert resp.status_code == 200 384 + uses = resp.get_json()["uses"] 385 + assert len(uses) == 1 386 + assert uses[0] == { 387 + "id": "4070908800001", 388 + "name": "flow", 389 + "start": 4070908800000, 390 + "status": "error", 391 + "prompt": "Summarize the day", 392 + "facet": "work", 393 + "failed": True, 394 + "runtime_seconds": 12.3, 395 + "thinking_count": 4, 396 + "tool_count": 2, 397 + "cost": 0.0123, 398 + "model": "gemini-2.5-flash", 399 + "provider": "google", 400 + "error_message": "rate limited", 401 + "output_file": "talents/flow.md", 402 + } 403 + 404 + def test_legacy_agent_id_entry_returns_with_blank_new_fields( 405 + self, sol_listing_client 406 + ): 407 + """Legacy agent_id day-index entries are visible with missing fields blank.""" 408 + client, talents_dir = sol_listing_client 409 + day = "20990102" 410 + agent_id = "4070995200001" 411 + _write_day_index( 412 + talents_dir, 413 + day, 414 + [ 415 + { 416 + "agent_id": agent_id, 417 + "name": "entities", 418 + "day": day, 419 + "facet": "personal", 420 + "ts": 4070995200000, 421 + "status": "completed", 422 + "runtime_seconds": 8.4, 423 + "provider": "google", 424 + "model": "gemini-2.5-flash-lite", 425 + } 426 + ], 427 + ) 428 + 429 + resp = client.get(f"/app/sol/api/talents/{day}") 430 + 431 + assert resp.status_code == 200 432 + use = resp.get_json()["uses"][0] 433 + assert use["id"] == agent_id 434 + assert use["failed"] is False 435 + for field in ( 436 + "thinking_count", 437 + "tool_count", 438 + "cost", 439 + "error_message", 440 + "output_file", 441 + "prompt", 442 + ): 443 + assert use[field] is None 444 + 445 + def test_current_thin_entry_returns_without_rewriting_index( 446 + self, sol_listing_client 447 + ): 448 + """Current thin use_id entries return with missing fields blank.""" 449 + client, talents_dir = sol_listing_client 450 + day = "20990103" 451 + index_path = _write_day_index( 452 + talents_dir, 453 + day, 454 + [ 455 + { 456 + "use_id": "4071081600001", 457 + "name": "knowledge_graph", 458 + "day": day, 459 + "facet": None, 460 + "ts": 4071081600000, 461 + "status": "completed", 462 + "runtime_seconds": 9.1, 463 + "provider": "anthropic", 464 + "model": "claude-sonnet-4-5", 465 + "schedule": "daily", 466 + } 467 + ], 468 + ) 469 + before = index_path.read_bytes() 470 + 471 + resp = client.get(f"/app/sol/api/talents/{day}") 472 + 473 + assert resp.status_code == 200 474 + use = resp.get_json()["uses"][0] 475 + assert use["id"] == "4071081600001" 476 + for field in ( 477 + "thinking_count", 478 + "tool_count", 479 + "cost", 480 + "error_message", 481 + "output_file", 482 + "prompt", 483 + ): 484 + assert use[field] is None 485 + assert index_path.read_bytes() == before 486 + 487 + 326 488 class TestApiUpdatedDays: 327 489 """Tests for api_updated_days endpoint.""" 328 490
+60 -5
think/cortex.py
··· 29 29 from typing import Any, Dict, Optional 30 30 31 31 from think.callosum import CallosumConnection 32 + from think.models import calc_agent_cost 32 33 from think.runner import _atomic_symlink 34 + from think.talent import get_output_path 33 35 from think.talents import TALENT_EXECUTION_MODULE 34 36 from think.utils import get_journal, get_project_root, get_rev, now_ms 35 37 ··· 662 664 except Exception as e: 663 665 self.logger.error(f"Failed to complete talent file {use_id}: {e}") 664 666 667 + def _summarize_output_file(self, request: Dict[str, Any]) -> str | None: 668 + """Return the API-facing output path if it exists at completion time.""" 669 + if not request.get("output"): 670 + return None 671 + 672 + try: 673 + if request.get("output_path"): 674 + out_path = Path(request["output_path"]) 675 + else: 676 + req_day = request.get("day") 677 + if not req_day: 678 + return None 679 + day_dir = self.talents_dir.parent / req_day 680 + req_env = request.get("env") or {} 681 + out_path = get_output_path( 682 + day_dir, 683 + request["name"], 684 + segment=request.get("segment"), 685 + output_format=request.get("output"), 686 + facet=request.get("facet"), 687 + stream=req_env.get("SOL_STREAM"), 688 + ) 689 + 690 + if not out_path.exists(): 691 + return None 692 + 693 + req_day = request.get("day") 694 + day_dir = self.talents_dir.parent / req_day if req_day else None 695 + if day_dir and out_path.is_relative_to(day_dir): 696 + return str(out_path.relative_to(day_dir)) 697 + return str(out_path.relative_to(self.talents_dir.parent)) 698 + except (OSError, ValueError, KeyError): 699 + return None 700 + 665 701 def _append_day_index( 666 702 self, use_id: str, request: Dict[str, Any], completed_path: Path 667 703 ) -> None: ··· 677 713 678 714 start_ts = request.get("ts", 0) 679 715 680 - # Read last few lines to find finish/error event for runtime 716 + thinking_count = 0 717 + tool_count = 0 718 + finish_usage = None 719 + error_message = None 720 + model = None 681 721 runtime_seconds = None 682 722 status = "completed" 683 723 try: 684 724 with open(completed_path, "r") as f: 685 725 lines = f.readlines() 686 - for line in reversed(lines[-10:]): 726 + for line in lines: 687 727 line = line.strip() 688 728 if not line: 689 729 continue 690 730 try: 691 731 event = json.loads(line) 692 732 event_type = event.get("event") 733 + if event_type == "thinking": 734 + thinking_count += 1 735 + elif event_type == "tool_start": 736 + tool_count += 1 737 + elif event_type == "start": 738 + model = event.get("model") 739 + 693 740 if event_type == "finish": 741 + status = "completed" 742 + finish_usage = event.get("usage") 694 743 end_ts = event.get("ts", 0) 695 744 if end_ts and start_ts: 696 745 runtime_seconds = round((end_ts - start_ts) / 1000.0, 1) 697 - break 698 746 if event_type == "error": 699 747 status = "error" 748 + msg = event.get("error", "") 749 + error_message = msg[:200] if msg else None 700 750 end_ts = event.get("ts", 0) 701 751 if end_ts and start_ts: 702 752 runtime_seconds = round((end_ts - start_ts) / 1000.0, 1) 703 - break 704 753 except json.JSONDecodeError: 705 754 continue 706 755 except Exception: ··· 715 764 "status": status, 716 765 "runtime_seconds": runtime_seconds, 717 766 "provider": request.get("provider"), 718 - "model": request.get("model"), 767 + "model": model, 719 768 "schedule": request.get("schedule"), 769 + "thinking_count": thinking_count, 770 + "tool_count": tool_count, 771 + "cost": calc_agent_cost(model, finish_usage), 772 + "error_message": error_message if status == "error" else None, 773 + "output_file": self._summarize_output_file(request), 774 + "prompt": request.get("prompt", ""), 720 775 } 721 776 722 777 day_index_path = self.talents_dir / f"{day}.jsonl"