personal memory agent
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

chore: gate `make test` on format check and sweep accumulated drift

Adds a `format-check` target (ruff format --check, no modifications)
and makes `test` depend on it — catches unformatted changes before
the test suite runs, so agent sessions committing without `make format`
fail loudly instead of accumulating silently.

One-time sweep over 54 files (1134 insertions / 437 deletions) brings
the tree back to a clean baseline. All changes are pure ruff-format
output — no lint fixes, no semantic changes. Drift had built up
because there was no gate: no pre-commit hook and no pre-test check,
only `make ci` (which isn't in the routine loop).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>

+1134 -437
+6 -2
Makefile
··· 1 1 # solstone Makefile 2 2 # Python-based AI-driven desktop journaling toolkit 3 3 4 - .PHONY: install uninstall test test-apps test-app test-only test-integration test-integration-only test-all format ci clean clean-install coverage watch versions update update-prices pre-commit skills dev all sail sandbox sandbox-stop install-pinchtab verify-browser update-browser-baselines review verify-api update-api-baselines install-service uninstall-service 4 + .PHONY: install uninstall test test-apps test-app test-only test-integration test-integration-only test-all format format-check ci clean clean-install coverage watch versions update update-prices pre-commit skills dev all sail sandbox sandbox-stop install-pinchtab verify-browser update-browser-baselines review verify-api update-api-baselines install-service uninstall-service 5 5 6 6 # Default target - install package in editable mode 7 7 all: install ··· 291 291 RUFF := $(VENV_BIN)/ruff 292 292 MYPY := $(VENV_BIN)/mypy 293 293 294 + # Check formatting without modifying files — gates `make test` 295 + format-check: .installed 296 + @$(RUFF) format --check . || { echo "Run 'make format' to fix formatting"; exit 1; } 297 + 294 298 # Run core tests (excluding integration and app tests) 295 - test: .installed 299 + test: .installed format-check 296 300 @echo "Running core tests..." 297 301 $(TEST_ENV) $(PYTEST) tests/ -q --cov=. --ignore=tests/integration 298 302
+4 -2
apps/entities/call.py
··· 154 154 155 155 src_obs = load_observations(from_facet, entity_name) 156 156 dst_obs = load_observations(to_facet, entity_name) 157 - 157 + 158 158 existing_keys = {(o["content"], o.get("observed_at")) for o in dst_obs} 159 159 merged = list(dst_obs) + [ 160 160 o ··· 386 386 return 387 387 388 388 # Validate uniqueness across all entities in facet 389 - entities = load_entities(facet, day=None, include_detached=True, include_blocked=True) 389 + entities = load_entities( 390 + facet, day=None, include_detached=True, include_blocked=True 391 + ) 390 392 391 393 conflict = validate_aka_uniqueness( 392 394 aka_value, entities, exclude_entity_name=resolved_name
+6 -2
apps/entities/talent/entity_observer.py
··· 51 51 if not isinstance(observations, dict) or not observations: 52 52 return None 53 53 54 - valid_entity_ids = {entity.get("id") for entity in load_entities(facet) if entity.get("id")} 54 + valid_entity_ids = { 55 + entity.get("id") for entity in load_entities(facet) if entity.get("id") 56 + } 55 57 56 58 for entity_id, items in observations.items(): 57 59 if entity_id not in valid_entity_ids: ··· 72 74 if not content: 73 75 continue 74 76 if content.lower() in existing: 75 - logger.debug("Skipping duplicate observation for %s: %s", entity_id, content[:60]) 77 + logger.debug( 78 + "Skipping duplicate observation for %s: %s", entity_id, content[:60] 79 + ) 76 80 continue 77 81 add_observation(facet, entity_id, content, day) 78 82 existing.add(content.lower())
+1 -3
apps/graph/routes.py
··· 112 112 113 113 # Map edge entity names to node IDs 114 114 node_ids = {n["id"] for n in nodes} 115 - name_to_id = _build_name_to_node_id( 116 - conn, node_ids, since=since, facet=facet 117 - ) 115 + name_to_id = _build_name_to_node_id(conn, node_ids, since=since, facet=facet) 118 116 edges = [] 119 117 for e in explicit_edges + co_occurrence_edges: 120 118 from_id = name_to_id.get(e["from_name"], e["from_name"])
+3 -1
apps/home/routes.py
··· 556 556 summary = meta.get("activity_type", "") 557 557 typical_time = meta.get("typical_time", "") 558 558 if typical_time: 559 - summary = f"{summary} · {typical_time}" if summary else typical_time 559 + summary = ( 560 + f"{summary} · {typical_time}" if summary else typical_time 561 + ) 560 562 561 563 observations = meta.get("observations", 0) 562 564 last_seen_str = meta.get("last_seen", "")
+34 -11
apps/import/call.py
··· 92 92 os.chmod(config_path, 0o600) 93 93 94 94 95 - def merge_entity_fields(target: EntityDict, source: EntityDict) -> tuple[EntityDict, list[str]]: 95 + def merge_entity_fields( 96 + target: EntityDict, source: EntityDict 97 + ) -> tuple[EntityDict, list[str]]: 96 98 merged: EntityDict = dict(target) 97 99 pre_merge_snapshot = dict(merged) 98 100 ··· 204 206 except json.JSONDecodeError as exc: 205 207 raise ValueError(f"Invalid JSONL at line {line_number}: {exc.msg}") from exc 206 208 if not isinstance(item, dict): 207 - raise ValueError(f"Invalid JSONL at line {line_number}: item must be an object") 209 + raise ValueError( 210 + f"Invalid JSONL at line {line_number}: item must be an object" 211 + ) 208 212 items.append(item) 209 213 return items 210 214 ··· 283 287 @app.command("list-staged") 284 288 def list_staged( 285 289 source: str = typer.Option(..., "--source", help="Import source name."), 286 - area: str | None = typer.Option(None, "--area", help="Area: entities, facets, or config."), 290 + area: str | None = typer.Option( 291 + None, "--area", help="Area: entities, facets, or config." 292 + ), 287 293 ) -> None: 288 294 _, _, state_dir = _resolve_source(source) 289 295 ··· 337 343 source_id: str = typer.Argument(help="Source entity ID."), 338 344 action: str = typer.Argument(help="Action: merge, create, or skip."), 339 345 source: str = typer.Option(..., "--source", help="Import source name."), 340 - target: str | None = typer.Option(None, "--target", help="Target entity ID for merge."), 346 + target: str | None = typer.Option( 347 + None, "--target", help="Target entity ID for merge." 348 + ), 341 349 ) -> None: 342 350 _, _, state_dir = _resolve_source(source) 343 351 ··· 408 416 if reason == "id_collision" or journal_entity_path(final_id).exists(): 409 417 allocated = _allocate_slug(str(created_entity.get("name", ""))) 410 418 if allocated is None: 411 - _fail(f"Unable to allocate a slug for '{created_entity.get('name', '')}'.") 419 + _fail( 420 + f"Unable to allocate a slug for '{created_entity.get('name', '')}'." 421 + ) 412 422 final_id = allocated 413 423 created_entity["id"] = final_id 414 424 ··· 450 460 451 461 @app.command("resolve-facet") 452 462 def resolve_facet( 453 - staged_file: str = typer.Argument(help="Staged file path relative to facets/staged/."), 463 + staged_file: str = typer.Argument( 464 + help="Staged file path relative to facets/staged/." 465 + ), 454 466 action: str = typer.Argument(help="Action: apply or skip."), 455 467 source: str = typer.Option(..., "--source", help="Import source name."), 456 468 ) -> None: ··· 505 517 id_map = entities_state.get("id_map", {}) 506 518 source_entity_id = str(payload.get("source_entity_id", "")) 507 519 if source_entity_id not in id_map: 508 - _fail(f"Entity {source_entity_id} has no mapping yet. Run entity review first.") 520 + _fail( 521 + f"Entity {source_entity_id} has no mapping yet. Run entity review first." 522 + ) 509 523 510 524 source_path = str(payload.get("source_path", "")) 511 525 source_data = str(payload.get("source_data", "")) ··· 544 558 merged_observations.append(item) 545 559 save_observations(facet_name, entity_id, merged_observations) 546 560 elif file_type in {"detected_entities", "activity_records"}: 547 - existing_items = _parse_jsonl_text(target_path.read_text(encoding="utf-8")) if target_path.exists() else [] 561 + existing_items = ( 562 + _parse_jsonl_text(target_path.read_text(encoding="utf-8")) 563 + if target_path.exists() 564 + else [] 565 + ) 548 566 existing_ids = {item.get("id") for item in existing_items} 549 - new_items = [item for item in remapped_data if item.get("id") not in existing_ids] 567 + new_items = [ 568 + item for item in remapped_data if item.get("id") not in existing_ids 569 + ] 550 570 _append_jsonl_items(target_path, new_items) 551 571 else: 552 572 _fail(f"Unsupported staged facet file type '{file_type}'.") ··· 569 589 target_path = Path(get_journal()) / "facets" / facet_name / "facet.json" 570 590 target_path.parent.mkdir(parents=True, exist_ok=True) 571 591 target_path.write_text( 572 - json.dumps(payload.get("source_content"), indent=2, ensure_ascii=False) + "\n", 592 + json.dumps(payload.get("source_content"), indent=2, ensure_ascii=False) 593 + + "\n", 573 594 encoding="utf-8", 574 595 ) 575 596 staged_path.unlink() ··· 603 624 @app.command("resolve-config-all") 604 625 def resolve_config_all( 605 626 source: str = typer.Option(..., "--source", help="Import source name."), 606 - category: str = typer.Option(..., "--category", help="Category: transferable or preference."), 627 + category: str = typer.Option( 628 + ..., "--category", help="Category: transferable or preference." 629 + ), 607 630 ) -> None: 608 631 _, _, state_dir = _resolve_source(source) 609 632
+93 -29
apps/import/facet_ingest.py
··· 74 74 75 75 if file_type == "entity_relationship": 76 76 if len(parts) != 3 or parts[0] != "entities" or parts[2] != "entity.json": 77 - raise ValueError("entity_relationship path must be entities/<id>/entity.json") 77 + raise ValueError( 78 + "entity_relationship path must be entities/<id>/entity.json" 79 + ) 78 80 return path, {"entity_id": parts[1]} 79 81 80 82 if file_type == "entity_observations": ··· 89 91 return path, {"entity_id": parts[1]} 90 92 91 93 if file_type == "detected_entities": 92 - if len(parts) != 2 or parts[0] != "entities" or not _DAY_JSONL_RE.match(parts[1]): 94 + if ( 95 + len(parts) != 2 96 + or parts[0] != "entities" 97 + or not _DAY_JSONL_RE.match(parts[1]) 98 + ): 93 99 raise ValueError("detected_entities path must be entities/YYYYMMDD.jsonl") 94 100 return path, {"day_file": parts[1]} 95 101 ··· 99 105 return path, {} 100 106 101 107 if file_type == "activity_records": 102 - if len(parts) != 2 or parts[0] != "activities" or not _DAY_JSONL_RE.match(parts[1]): 108 + if ( 109 + len(parts) != 2 110 + or parts[0] != "activities" 111 + or not _DAY_JSONL_RE.match(parts[1]) 112 + ): 103 113 raise ValueError("activity_records path must be activities/YYYYMMDD.jsonl") 104 114 return path, {"day_file": parts[1]} 105 115 106 116 if file_type == "activity_output": 107 - if len(parts) < 4 or parts[0] != "activities" or not re.match(r"^\d{8}$", parts[1]): 117 + if ( 118 + len(parts) < 4 119 + or parts[0] != "activities" 120 + or not re.match(r"^\d{8}$", parts[1]) 121 + ): 108 122 raise ValueError( 109 123 "activity_output path must be activities/YYYYMMDD/<activity_id>/..." 110 124 ) ··· 116 130 return path, {"day_file": parts[1]} 117 131 118 132 if file_type == "calendar": 119 - if len(parts) != 2 or parts[0] != "calendar" or not _DAY_JSONL_RE.match(parts[1]): 133 + if ( 134 + len(parts) != 2 135 + or parts[0] != "calendar" 136 + or not _DAY_JSONL_RE.match(parts[1]) 137 + ): 120 138 raise ValueError("calendar path must be calendar/YYYYMMDD.jsonl") 121 139 return path, {"day_file": parts[1]} 122 140 ··· 152 170 except json.JSONDecodeError as exc: 153 171 raise ValueError(f"Invalid JSONL at line {line_number}: {exc.msg}") from exc 154 172 if not isinstance(value, dict): 155 - raise ValueError(f"Invalid JSONL at line {line_number}: item must be an object") 173 + raise ValueError( 174 + f"Invalid JSONL at line {line_number}: item must be an object" 175 + ) 156 176 items.append(value) 157 177 return items 158 178 ··· 166 186 unmapped: list[str] = [] 167 187 168 188 def add(entity_id: str) -> None: 169 - if entity_id and _remap_entity_id(entity_id, id_map) is None and entity_id not in unmapped: 189 + if ( 190 + entity_id 191 + and _remap_entity_id(entity_id, id_map) is None 192 + and entity_id not in unmapped 193 + ): 170 194 unmapped.append(entity_id) 171 195 172 196 if file_type in {"entity_relationship", "entity_observations"}: ··· 200 224 entity_id: str, 201 225 source_data: str, 202 226 ) -> Path: 203 - target_path = staged_dir / facet_name / file_type / _sanitize_stage_name(relative_path) 227 + target_path = ( 228 + staged_dir / facet_name / file_type / _sanitize_stage_name(relative_path) 229 + ) 204 230 target_path.parent.mkdir(parents=True, exist_ok=True) 205 231 payload = { 206 232 "reason": "unmapped_entity", ··· 226 252 source_content: Any, 227 253 target_content: Any, 228 254 ) -> Path: 229 - target_path = staged_dir / facet_name / "facet_json" / _sanitize_stage_name(relative_path) 255 + target_path = ( 256 + staged_dir / facet_name / "facet_json" / _sanitize_stage_name(relative_path) 257 + ) 230 258 target_path.parent.mkdir(parents=True, exist_ok=True) 231 259 payload = { 232 260 "reason": "facet_json_conflict", ··· 258 286 source_content = _parse_json_bytes(raw_bytes) 259 287 if not target_path.exists() or new_facet: 260 288 _write_bytes(target_path, raw_bytes) 261 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 289 + return { 290 + "status": "written", 291 + "reason": "new_facet" if new_facet else "overlap_merged", 292 + } 262 293 263 294 target_content = json.loads(target_path.read_text(encoding="utf-8")) 264 295 if target_content == source_content: ··· 294 325 295 326 merged_relationship = {**source_relationship, **target_relationship} 296 327 save_facet_relationship(facet_name, entity_id, merged_relationship) 297 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 328 + return { 329 + "status": "written", 330 + "reason": "new_facet" if new_facet else "overlap_merged", 331 + } 298 332 299 333 300 334 def _merge_observations( ··· 307 341 source_observations = _parse_jsonl_bytes(raw_bytes) 308 342 target_observations = [] if new_facet else load_observations(facet_name, entity_id) 309 343 seen = { 310 - (item.get("content", ""), item.get("observed_at")) for item in target_observations 344 + (item.get("content", ""), item.get("observed_at")) 345 + for item in target_observations 311 346 } 312 347 merged_observations = list(target_observations) 313 348 for item in source_observations: ··· 318 353 merged_observations.append(item) 319 354 320 355 save_observations(facet_name, entity_id, merged_observations) 321 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 356 + return { 357 + "status": "written", 358 + "reason": "new_facet" if new_facet else "overlap_merged", 359 + } 322 360 323 361 324 362 def _merge_detected_entities( ··· 337 375 continue 338 376 new_items.append(item) 339 377 _append_jsonl(target_path, new_items) 340 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 378 + return { 379 + "status": "written", 380 + "reason": "new_facet" if new_facet else "overlap_merged", 381 + } 341 382 342 383 343 384 def _merge_activity_config( ··· 351 392 existing_ids = {item.get("id") for item in target_items} 352 393 new_items = [item for item in source_items if item.get("id") not in existing_ids] 353 394 _append_jsonl(target_path, new_items) 354 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 395 + return { 396 + "status": "written", 397 + "reason": "new_facet" if new_facet else "overlap_merged", 398 + } 355 399 356 400 357 401 def _merge_activity_records( ··· 365 409 existing_ids = {item.get("id") for item in target_items} 366 410 new_items = [item for item in source_items if item.get("id") not in existing_ids] 367 411 _append_jsonl(target_path, new_items) 368 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 412 + return { 413 + "status": "written", 414 + "reason": "new_facet" if new_facet else "overlap_merged", 415 + } 369 416 370 417 371 418 def _merge_activity_output( ··· 378 425 if output_dir.exists(): 379 426 return {"status": "skipped", "reason": "output_dir_exists"} 380 427 _write_bytes(target_path, raw_bytes) 381 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 428 + return { 429 + "status": "written", 430 + "reason": "new_facet" if new_facet else "overlap_merged", 431 + } 382 432 383 433 384 434 def _merge_todos( ··· 396 446 if (item["text"], item.get("created_at")) not in seen 397 447 ] 398 448 _append_jsonl(target_path, new_items) 399 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 449 + return { 450 + "status": "written", 451 + "reason": "new_facet" if new_facet else "overlap_merged", 452 + } 400 453 401 454 402 455 def _merge_calendar( ··· 409 462 target_items = [] if new_facet else _read_jsonl(target_path) 410 463 seen = {(item["title"], item.get("start")) for item in target_items} 411 464 new_items = [ 412 - item 413 - for item in source_items 414 - if (item["title"], item.get("start")) not in seen 465 + item for item in source_items if (item["title"], item.get("start")) not in seen 415 466 ] 416 467 _append_jsonl(target_path, new_items) 417 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 468 + return { 469 + "status": "written", 470 + "reason": "new_facet" if new_facet else "overlap_merged", 471 + } 418 472 419 473 420 474 def _merge_news( ··· 426 480 if target_path.exists(): 427 481 return {"status": "skipped", "reason": "news_exists"} 428 482 _write_bytes(target_path, raw_bytes) 429 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 483 + return { 484 + "status": "written", 485 + "reason": "new_facet" if new_facet else "overlap_merged", 486 + } 430 487 431 488 432 489 def _merge_logs( ··· 437 494 ) -> dict[str, Any]: 438 495 source_items = _parse_jsonl_bytes(raw_bytes) 439 496 _append_jsonl(target_path, source_items) 440 - return {"status": "written", "reason": "new_facet" if new_facet else "overlap_merged"} 497 + return { 498 + "status": "written", 499 + "reason": "new_facet" if new_facet else "overlap_merged", 500 + } 441 501 442 502 443 503 def _remap_entity_ids( ··· 503 563 def _serialize_jsonl(items: list[dict[str, Any]]) -> bytes: 504 564 if not items: 505 565 return b"" 506 - return "".join(json.dumps(item, ensure_ascii=False) + "\n" for item in items).encode( 507 - "utf-8" 508 - ) 566 + return "".join( 567 + json.dumps(item, ensure_ascii=False) + "\n" for item in items 568 + ).encode("utf-8") 509 569 510 570 511 571 def process_facet( ··· 575 635 parsed_data = _parse_json_bytes(raw_bytes) 576 636 577 637 if file_type in _ENTITY_FILE_TYPES: 578 - unmapped = _check_unmapped_entities(parsed_data, id_map, file_type, path_info) 638 + unmapped = _check_unmapped_entities( 639 + parsed_data, id_map, file_type, path_info 640 + ) 579 641 if unmapped: 580 642 staged_path = _stage_unmapped_entity( 581 643 staged_dir, ··· 659 721 elif file_type == "todos": 660 722 merge_result = _merge_todos(target_path, raw_bytes, new_facet=new_facet) 661 723 elif file_type == "calendar": 662 - merge_result = _merge_calendar(target_path, raw_bytes, new_facet=new_facet) 724 + merge_result = _merge_calendar( 725 + target_path, raw_bytes, new_facet=new_facet 726 + ) 663 727 elif file_type == "news": 664 728 merge_result = _merge_news(target_path, raw_bytes, new_facet=new_facet) 665 729 elif file_type == "logs":
+31 -14
apps/import/ingest.py
··· 31 31 save_journal_entity, 32 32 ) 33 33 from think.entities.matching import find_matching_entity 34 - from think.utils import DEFAULT_STREAM 34 + from think.utils import DEFAULT_STREAM, day_path 35 35 36 36 from .journal_sources import ( 37 37 get_state_directory, ··· 204 204 205 205 original_segment_key = segment_key 206 206 arc_key = f"{stream}/{segment_key}" 207 - day_dir = journal_root / day 207 + day_dir = day_path(day) 208 208 stream_dir = day_dir / stream 209 209 segment_dir = stream_dir / segment_key 210 210 action = "copied" ··· 396 396 ) 397 397 continue 398 398 399 - match = find_matching_entity(entity_data["name"], list(target_entities.values())) 399 + match = find_matching_entity( 400 + entity_data["name"], list(target_entities.values()) 401 + ) 400 402 401 403 if match is not None and match.is_high_confidence: 402 404 target_id = str(match["id"]) ··· 404 406 pre_merge_snapshot = dict(target_entity) 405 407 406 408 aka_by_lower: dict[str, str] = {} 407 - for values in (target_entity.get("aka", []), entity_data.get("aka", [])): 409 + for values in ( 410 + target_entity.get("aka", []), 411 + entity_data.get("aka", []), 412 + ): 408 413 if not isinstance(values, list): 409 414 continue 410 415 for value in values: ··· 414 419 if key not in aka_by_lower: 415 420 aka_by_lower[key] = str(value) 416 421 if aka_by_lower: 417 - target_entity["aka"] = sorted(aka_by_lower.values(), key=str.lower) 422 + target_entity["aka"] = sorted( 423 + aka_by_lower.values(), key=str.lower 424 + ) 418 425 419 426 merged_emails: list[str] = [] 420 427 seen_emails: set[str] = set() ··· 439 446 source_created = entity_data.get("created_at") 440 447 target_created = target_entity.get("created_at") 441 448 if source_created is not None and target_created is not None: 442 - target_entity["created_at"] = min(source_created, target_created) 449 + target_entity["created_at"] = min( 450 + source_created, target_created 451 + ) 443 452 elif source_created is not None: 444 453 target_entity["created_at"] = source_created 445 454 ··· 516 525 "staged_at": datetime.now(timezone.utc).isoformat(), 517 526 } 518 527 (staged_dir / f"{source_id}.json").write_text( 519 - json.dumps(staged_payload, indent=2, ensure_ascii=False) + "\n", 528 + json.dumps(staged_payload, indent=2, ensure_ascii=False) 529 + + "\n", 520 530 encoding="utf-8", 521 531 ) 522 532 staged += 1 ··· 543 553 "staged_at": datetime.now(timezone.utc).isoformat(), 544 554 } 545 555 (staged_dir / f"{source_id}.json").write_text( 546 - json.dumps(staged_payload, indent=2, ensure_ascii=False) + "\n", 556 + json.dumps(staged_payload, indent=2, ensure_ascii=False) 557 + + "\n", 547 558 encoding="utf-8", 548 559 ) 549 560 staged += 1 ··· 585 596 586 597 entity_state["received"][source_id] = content_hash 587 598 except Exception as exc: 588 - entity_id = entity_data.get("id", "") if isinstance(entity_data, dict) else "" 599 + entity_id = ( 600 + entity_data.get("id", "") if isinstance(entity_data, dict) else "" 601 + ) 589 602 errors.append({"entity_id": entity_id, "error": str(exc)}) 590 603 591 604 _write_state_atomic(state_path, entity_state) ··· 682 695 normalized_files: list[dict[str, str]] = [] 683 696 for file_idx, file_meta in enumerate(files): 684 697 if not isinstance(file_meta, dict): 685 - return jsonify({"error": "Facet file metadata must be an object"}), 400 698 + return jsonify( 699 + {"error": "Facet file metadata must be an object"} 700 + ), 400 686 701 687 702 path_value = file_meta.get("path") 688 703 type_value = file_meta.get("type") 689 704 if not isinstance(path_value, str) or not isinstance(type_value, str): 690 705 return ( 691 - jsonify({"error": "Facet file metadata must include path and type"}), 706 + jsonify( 707 + {"error": "Facet file metadata must include path and type"} 708 + ), 692 709 400, 693 710 ) 694 711 ··· 731 748 if written_facets: 732 749 source = g.journal_source 733 750 source.setdefault("stats", {}) 734 - source["stats"]["facets_received"] = ( 735 - source["stats"].get("facets_received", 0) + len(written_facets) 736 - ) 751 + source["stats"]["facets_received"] = source["stats"].get( 752 + "facets_received", 0 753 + ) + len(written_facets) 737 754 save_journal_source(source) 738 755 739 756 return jsonify(
+3 -1
apps/observer/tests/test_observer_client.py
··· 108 108 109 109 assert result.success is True 110 110 assert client._key == "registered-key" 111 - assert mock_session.post.call_args_list[0][0][0].endswith("/app/observer/api/create") 111 + assert mock_session.post.call_args_list[0][0][0].endswith( 112 + "/app/observer/api/create" 113 + ) 112 114 config = json.loads((mock_journal / "config" / "journal.json").read_text()) 113 115 assert config["observe"]["observer"]["key"] == "registered-key" 114 116
+3 -1
apps/photos/call.py
··· 73 73 if not matched: 74 74 return 75 75 76 - conn.execute("DELETE FROM entity_signals WHERE signal_type='photo_cooccurrence'") 76 + conn.execute( 77 + "DELETE FROM entity_signals WHERE signal_type='photo_cooccurrence'" 78 + ) 77 79 78 80 signal_count = 0 79 81 for cluster in clusters:
+1
apps/photos/reader.py
··· 7 7 def read_face_clusters(db_path: str) -> list[dict]: 8 8 conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) 9 9 try: 10 + 10 11 def resolve_table(preferred: str, fallback: str) -> str: 11 12 for table_name in (preferred, fallback): 12 13 row = conn.execute(
+9 -3
apps/photos/tests/test_call.py
··· 15 15 16 16 17 17 def _create_photos_db( 18 - db_path: Path, people: list[tuple[int, str | None]], faces: list[tuple[int, int, int]] 18 + db_path: Path, 19 + people: list[tuple[int, str | None]], 20 + faces: list[tuple[int, int, int]], 19 21 ) -> None: 20 22 conn = sqlite3.connect(db_path) 21 23 try: ··· 217 219 from think.indexer.journal import get_entity_strength 218 220 219 221 results = get_entity_strength() 220 - alice = next((r for r in results if r.get("entity_id") == "alice_johnson"), None) 222 + alice = next( 223 + (r for r in results if r.get("entity_id") == "alice_johnson"), None 224 + ) 221 225 assert alice is not None 222 226 assert "photo_count" in alice 223 227 assert alice["photo_count"] == 2 ··· 258 262 monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal_dir)) 259 263 monkeypatch.setattr(sys, "platform", "darwin") 260 264 261 - result = runner.invoke(call_app, ["photos", "sync", "--library", str(photos_db)]) 265 + result = runner.invoke( 266 + call_app, ["photos", "sync", "--library", str(photos_db)] 267 + ) 262 268 assert result.exit_code == 0 263 269 assert "Found 1 named face clusters." in result.output
+11 -6
apps/settings/call.py
··· 242 242 key_validation[provider] = result 243 243 244 244 providers_config = config.get("providers", {}) 245 - if ( 246 - providers_config.get("google_backend") == "vertex" 247 - and providers_config.get("vertex_credentials") 245 + if providers_config.get("google_backend") == "vertex" and providers_config.get( 246 + "vertex_credentials" 248 247 ): 249 248 result = validate_vertex_credentials(providers_config["vertex_credentials"]) 250 249 result["timestamp"] = datetime.now(timezone.utc).isoformat() ··· 286 285 for provider in providers_list 287 286 } 288 287 vertex_creds_path = providers_config.get("vertex_credentials") 289 - vertex_creds_configured = bool(vertex_creds_path and Path(vertex_creds_path).exists()) 288 + vertex_creds_configured = bool( 289 + vertex_creds_path and Path(vertex_creds_path).exists() 290 + ) 290 291 provider_status = build_provider_status(providers_list, vertex_creds_configured) 291 292 result = { 292 293 "providers": providers_list, ··· 307 308 backup: str | None = typer.Option(None, "--backup", help="Backup provider."), 308 309 ) -> None: 309 310 """Set generate provider defaults.""" 310 - typer.echo(json.dumps(_set_provider_type("generate", provider, tier, backup), indent=2)) 311 + typer.echo( 312 + json.dumps(_set_provider_type("generate", provider, tier, backup), indent=2) 313 + ) 311 314 312 315 313 316 @providers_app.command("set-cogitate") ··· 317 320 backup: str | None = typer.Option(None, "--backup", help="Backup provider."), 318 321 ) -> None: 319 322 """Set cogitate provider defaults.""" 320 - typer.echo(json.dumps(_set_provider_type("cogitate", provider, tier, backup), indent=2)) 323 + typer.echo( 324 + json.dumps(_set_provider_type("cogitate", provider, tier, backup), indent=2) 325 + ) 321 326 322 327 323 328 @providers_app.command("set-auth")
+1 -3
apps/settings/routes.py
··· 472 472 except Exception: 473 473 pass 474 474 475 - provider_status = build_provider_status( 476 - providers_list, vertex_creds_configured 477 - ) 475 + provider_status = build_provider_status(providers_list, vertex_creds_configured) 478 476 479 477 return jsonify( 480 478 {
+6 -2
apps/settings/tests/test_call.py
··· 77 77 def test_keys_clear(self, settings_env): 78 78 tmp_path, _config = settings_env() 79 79 80 - result = runner.invoke(call_app, ["settings", "keys", "clear", "GOOGLE_API_KEY"]) 80 + result = runner.invoke( 81 + call_app, ["settings", "keys", "clear", "GOOGLE_API_KEY"] 82 + ) 81 83 82 84 assert result.exit_code == 0 83 85 saved = json.loads((tmp_path / "config" / "journal.json").read_text()) ··· 281 283 encoding="utf-8", 282 284 ) 283 285 284 - with patch("think.providers.google.validate_vertex_credentials") as mock_validate: 286 + with patch( 287 + "think.providers.google.validate_vertex_credentials" 288 + ) as mock_validate: 285 289 result = runner.invoke( 286 290 call_app, 287 291 [
+14 -10
apps/speakers/bootstrap.py
··· 113 113 existing_meta_strings = data["metadata"] 114 114 existing_meta_dicts = [json.loads(m) for m in existing_meta_strings] 115 115 except (FileNotFoundError, ValueError, np.lib.npyio.NpzFile) as e: 116 - logger.warning(f"Failed to load existing voiceprints for {entity_id} from {npz_path}: {e}. Starting fresh.") 116 + logger.warning( 117 + f"Failed to load existing voiceprints for {entity_id} from {npz_path}: {e}. Starting fresh." 118 + ) 117 119 existing_emb = np.empty((0, 256), dtype=np.float32) 118 120 existing_meta_dicts = [] 119 - except Exception as e: # Catch other potential errors during loading 120 - logger.error(f"Unexpected error loading existing voiceprints for {entity_id} from {npz_path}: {e}") 121 + except Exception as e: # Catch other potential errors during loading 122 + logger.error( 123 + f"Unexpected error loading existing voiceprints for {entity_id} from {npz_path}: {e}" 124 + ) 121 125 raise 122 126 else: 123 127 existing_emb = np.empty((0, 256), dtype=np.float32) ··· 134 138 if new_emb_list: 135 139 new_emb_np = np.vstack(new_emb_list) 136 140 combined_emb = ( 137 - np.vstack([existing_emb, new_emb_np]) if len(existing_emb) > 0 else new_emb_np 141 + np.vstack([existing_emb, new_emb_np]) 142 + if len(existing_emb) > 0 143 + else new_emb_np 138 144 ) 139 145 # Combine the metadata dictionaries 140 146 combined_meta_dicts = existing_meta_dicts + new_meta_dicts 141 - else: # Should not happen if new_items is not empty, but for safety 147 + else: # Should not happen if new_items is not empty, but for safety 142 148 combined_emb = existing_emb 143 149 combined_meta_dicts = existing_meta_dicts 144 150 ··· 146 152 try: 147 153 # Import the utility function 148 154 from apps.speakers.voiceprint_io import save_voiceprints_safely 149 - 155 + 150 156 save_voiceprints_safely( 151 157 npz_path=npz_path, 152 158 embeddings=combined_emb, 153 - metadata=combined_meta_dicts # Pass metadata as a list of dicts 159 + metadata=combined_meta_dicts, # Pass metadata as a list of dicts 154 160 ) 155 161 return len(new_items) 156 162 except Exception as e: ··· 882 888 others = [e for eid, e in all_entities.items() if eid != entity_id] 883 889 conflict = find_matching_entity(name, others) 884 890 if conflict: 885 - return { 886 - "error": f"Name '{name}' conflicts with entity '{conflict['id']}'" 887 - } 891 + return {"error": f"Name '{name}' conflicts with entity '{conflict['id']}'"} 888 892 889 893 existing_aka = set(entity.get("aka", [])) 890 894 already_present = name in existing_aka
+16 -6
apps/speakers/voiceprint_io.py
··· 20 20 logger = logging.getLogger(__name__) 21 21 22 22 23 - def save_voiceprints_safely(npz_path: Path, embeddings: np.ndarray, metadata: dict) -> None: 23 + def save_voiceprints_safely( 24 + npz_path: Path, embeddings: np.ndarray, metadata: dict 25 + ) -> None: 24 26 """ 25 27 Safely saves voiceprint data to an NPZ file with file locking and integrity check. 26 28 ··· 60 62 tmp_path.rename(npz_path) 61 63 else: 62 64 # This should ideally not happen if np.savez_compressed succeeded 63 - raise FileNotFoundError(f"Temporary voiceprint file not found: {tmp_path}") 65 + raise FileNotFoundError( 66 + f"Temporary voiceprint file not found: {tmp_path}" 67 + ) 64 68 65 69 # --- Integrity Check --- 66 70 try: ··· 70 74 # For now, assume standard numpy savz_compressed data. 71 75 with np.load(npz_path, allow_pickle=False) as data: 72 76 # Basic check: ensure expected keys exist 73 - if 'embeddings' not in data or 'metadata' not in data: 74 - raise ValueError("Missing 'embeddings' or 'metadata' keys in loaded NPZ.") 75 - logger.info(f"Successfully wrote and verified voiceprint file: {npz_path}") 77 + if "embeddings" not in data or "metadata" not in data: 78 + raise ValueError( 79 + "Missing 'embeddings' or 'metadata' keys in loaded NPZ." 80 + ) 81 + logger.info( 82 + f"Successfully wrote and verified voiceprint file: {npz_path}" 83 + ) 76 84 77 85 except (FileNotFoundError, ValueError, np.lib.npyio.NpzFile) as e: 78 86 logger.error( ··· 95 103 try: 96 104 tmp_path.unlink() 97 105 except OSError as rm_err: 98 - logger.error(f"Failed to clean up temporary file {tmp_path}: {rm_err}") 106 + logger.error( 107 + f"Failed to clean up temporary file {tmp_path}: {rm_err}" 108 + ) 99 109 raise e # Re-raise the original exception 100 110 101 111 finally:
+3 -1
apps/transcripts/routes.py
··· 99 99 return error_response("Day not found", 404) 100 100 101 101 audio_ranges, screen_ranges, segments = scan_day(day) 102 - return jsonify({"audio": audio_ranges, "screen": screen_ranges, "segments": segments}) 102 + return jsonify( 103 + {"audio": audio_ranges, "screen": screen_ranges, "segments": segments} 104 + ) 103 105 104 106 105 107 @transcripts_bp.route("/api/serve_file/<day>/<path:encoded_path>")
+2 -6
convey/apps.py
··· 226 226 return attention.placeholder_text 227 227 imports = awareness_current.get("imports", {}) 228 228 if not imports.get("has_imported") and day_count < 3: 229 - return ( 230 - "Bring in past conversations, calendar, or notes to give me context..." 231 - ) 229 + return "Bring in past conversations, calendar, or notes to give me context..." 232 230 if awareness_current.get("journal", {}).get("first_daily_ready"): 233 231 if day_count < 2: 234 232 return "Your first daily analysis is ready — ask me what I found..." 235 233 if day_count >= 7: 236 - return ( 237 - "Ask me about your day, search your journal, or explore insights..." 238 - ) 234 + return "Ask me about your day, search your journal, or explore insights..." 239 235 return "Your daily analysis is ready — ask about today or anything in your journal..." 240 236 return "Capture is running — your first daily analysis will be ready soon..." 241 237
+1 -3
convey/system.py
··· 102 102 observers = list_observers() 103 103 # Filter to active (non-revoked, enabled) observers 104 104 active = [ 105 - o 106 - for o in observers 107 - if not o.get("revoked", False) and o.get("enabled", True) 105 + o for o in observers if not o.get("revoked", False) and o.get("enabled", True) 108 106 ] 109 107 110 108 if not active:
+2 -2
observe/describe.py
··· 310 310 except av.error.InvalidDataError as e: 311 311 logger.error( 312 312 f"Invalid video data error for {self.video_path}: {e}. Skipping video.", 313 - exc_info=True 313 + exc_info=True, 314 314 ) 315 315 return [] 316 316 except Exception as e: 317 317 logger.error( 318 318 f"Unexpected error processing video {self.video_path}: {e}", 319 - exc_info=True 319 + exc_info=True, 320 320 ) 321 321 raise 322 322 return self.qualified_frames
+3 -1
observe/observer_cli.py
··· 438 438 sub.add_parser("list", help="List all registered observers") 439 439 440 440 # rename 441 - p_rename = sub.add_parser("rename", help="Rename an observer (affects future streams)") 441 + p_rename = sub.add_parser( 442 + "rename", help="Rename an observer (affects future streams)" 443 + ) 442 444 p_rename.add_argument("identifier", help="Observer name or key prefix") 443 445 p_rename.add_argument("new_name", help="New name for the observer") 444 446
+4 -2
observe/observer_client.py
··· 206 206 data["platform"] = self._platform 207 207 if meta: 208 208 data["meta"] = json.dumps(meta) 209 - 209 + 210 210 headers = {} 211 211 if self._key: 212 212 headers["Authorization"] = f"Bearer {self._key}" 213 - logger.debug(f"Sending Authorization header: Bearer {self._key[:8]}...") 213 + logger.debug( 214 + f"Sending Authorization header: Bearer {self._key[:8]}..." 215 + ) 214 216 215 217 response = self._session.post( 216 218 url,
+12 -3
tests/test_activity_state_machine.py
··· 483 483 sm.update(_sense(content_type="meeting"), "090500_300", "20260304") 484 484 485 485 rec = sm.get_completed_activities()[0] 486 - required = {"id", "activity", "segments", "level_avg", "description", 487 - "active_entities", "created_at"} 486 + required = { 487 + "id", 488 + "activity", 489 + "segments", 490 + "level_avg", 491 + "description", 492 + "active_entities", 493 + "created_at", 494 + } 488 495 assert required.issubset(rec.keys()) 489 496 # No internal _fields should leak 490 497 assert not any(k.startswith("_") for k in rec.keys()) ··· 497 504 {"type": "Person", "name": "Alice", "context": "dev"}, 498 505 {"type": "Tool", "name": "VSCode", "context": "editor"}, 499 506 ] 500 - sm.update(_sense(content_type="coding", entities=entities), "090000_300", "20260304") 507 + sm.update( 508 + _sense(content_type="coding", entities=entities), "090000_300", "20260304" 509 + ) 501 510 sm.update(_sense(content_type="meeting"), "090500_300", "20260304") 502 511 503 512 rec = sm.get_completed_activities()[0]
+9 -3
tests/test_agents_check.py
··· 418 418 monkeypatch.setattr("think.providers.PROVIDER_REGISTRY", fake_registry) 419 419 monkeypatch.setattr("think.models.PROVIDER_DEFAULTS", fake_defaults) 420 420 monkeypatch.setattr(agents, "get_journal", lambda: str(tmp_path)) 421 - monkeypatch.setattr(agents, "_check_generate", lambda *_args: ("skip", "not configured")) 421 + monkeypatch.setattr( 422 + agents, "_check_generate", lambda *_args: ("skip", "not configured") 423 + ) 422 424 423 425 async def mock_check_cogitate(*_args): 424 426 return "skip", "not configured" ··· 458 460 monkeypatch.setattr("think.providers.PROVIDER_REGISTRY", fake_registry) 459 461 monkeypatch.setattr("think.models.PROVIDER_DEFAULTS", fake_defaults) 460 462 monkeypatch.setattr(agents, "get_journal", lambda: str(tmp_path)) 461 - monkeypatch.setattr(agents, "_check_generate", lambda *_args: ("skip", "not configured")) 463 + monkeypatch.setattr( 464 + agents, "_check_generate", lambda *_args: ("skip", "not configured") 465 + ) 462 466 463 467 async def mock_check_cogitate(*_args): 464 468 return "fail", "FAIL: broken" ··· 527 531 assert exc_info.value.code == 0 528 532 payload = json.loads((tmp_path / "health" / "agents.json").read_text()) 529 533 summary = payload["summary"] 530 - assert summary["total"] == summary["passed"] + summary["skipped"] + summary["failed"] 534 + assert ( 535 + summary["total"] == summary["passed"] + summary["skipped"] + summary["failed"] 536 + ) 531 537 assert summary["passed"] == 6 532 538 assert summary["skipped"] == 6 533 539 assert summary["failed"] == 0
+1 -3
tests/test_app_sol.py
··· 303 303 304 304 def test_missing_file_returns_404(self, agents_client): 305 305 """Non-existent file returns 404.""" 306 - resp = agents_client.get( 307 - "/app/sol/api/output/20260214/agents/nonexistent.md" 308 - ) 306 + resp = agents_client.get("/app/sol/api/output/20260214/agents/nonexistent.md") 309 307 assert resp.status_code == 404
+1
tests/test_awareness.py
··· 185 185 assert state["journal"]["first_daily_ready"] is True 186 186 assert state["journal"]["first_daily_ready_at"] == "20260308T14:00:00" 187 187 188 + 188 189 class TestComputeThickness: 189 190 """Tests for compute_thickness().""" 190 191
+6 -2
tests/test_chat_context.py
··· 94 94 """Awareness failures still return the full template var shape.""" 95 95 monkeypatch.setattr("think.conversation.build_memory_context", lambda **kw: "") 96 96 monkeypatch.setattr("think.routines.get_routine_state", lambda: []) 97 - monkeypatch.setattr("think.routines.get_config", lambda: {"_meta": {"suggestions": {}}}) 97 + monkeypatch.setattr( 98 + "think.routines.get_config", lambda: {"_meta": {"suggestions": {}}} 99 + ) 98 100 monkeypatch.setattr( 99 101 "think.utils.get_config", 100 102 lambda: {"agent": {"name": "aria", "name_status": "default"}}, ··· 182 184 "think.routines.get_routine_state", 183 185 lambda: (_ for _ in ()).throw(RuntimeError("boom")), 184 186 ) 185 - monkeypatch.setattr("think.routines.get_config", lambda: {"_meta": {"suggestions": {}}}) 187 + monkeypatch.setattr( 188 + "think.routines.get_config", lambda: {"_meta": {"suggestions": {}}} 189 + ) 186 190 monkeypatch.setattr( 187 191 "think.utils.get_config", 188 192 lambda: {"agent": {"name": "aria", "name_status": "default"}},
+1
tests/test_convey_apps.py
··· 34 34 assert response.status_code == 200 35 35 return mock_spawn 36 36 37 + 37 38 # --- Placeholder resolution --- 38 39 39 40
+94 -38
tests/test_dream_activity.py
··· 458 458 class TestActivityPersistenceRoundTrip: 459 459 """Full round-trip: state machine → append → load → field verification.""" 460 460 461 - def _sense(self, content_type="coding", density="active", facets=None, 462 - summary="Working.", entities=None): 461 + def _sense( 462 + self, 463 + content_type="coding", 464 + density="active", 465 + facets=None, 466 + summary="Working.", 467 + entities=None, 468 + ): 463 469 if facets is None: 464 470 facets = [{"facet": "work", "activity": content_type, "level": "high"}] 465 471 return { ··· 486 492 sm.update(self._sense(content_type="coding"), "090500_300", "20260304") 487 493 sm.update(self._sense(content_type="coding"), "091000_300", "20260304") 488 494 # End via type change 489 - changes = sm.update(self._sense(content_type="meeting"), "091500_300", "20260304") 495 + changes = sm.update( 496 + self._sense(content_type="meeting"), "091500_300", "20260304" 497 + ) 490 498 491 499 ended = [c for c in changes if c.get("state") == "ended"] 492 500 assert len(ended) == 1 ··· 546 554 547 555 sm = ActivityStateMachine() 548 556 sm.update(self._sense(content_type="coding"), "090000_300", "20260304") 549 - changes = sm.update(self._sense(content_type="meeting"), "090500_300", "20260304") 557 + changes = sm.update( 558 + self._sense(content_type="meeting"), "090500_300", "20260304" 559 + ) 550 560 551 561 ended = [c for c in changes if c.get("state") == "ended"] 552 562 rec = sm.get_completed_activities()[0] ··· 572 582 sm = ActivityStateMachine() 573 583 # Activity 1 ends 574 584 sm.update(self._sense(content_type="coding"), "090000_300", "20260304") 575 - changes1 = sm.update(self._sense(content_type="meeting"), "090500_300", "20260304") 585 + changes1 = sm.update( 586 + self._sense(content_type="meeting"), "090500_300", "20260304" 587 + ) 576 588 facet_by_id = { 577 589 c["id"]: c.get("_facet", "__") 578 590 for c in changes1 ··· 583 595 append_activity_record(facet_by_id[rec["id"]], "20260304", rec) 584 596 585 597 # Activity 2 continues (no ending) 586 - changes2 = sm.update(self._sense(content_type="meeting"), "091000_300", "20260304") 598 + changes2 = sm.update( 599 + self._sense(content_type="meeting"), "091000_300", "20260304" 600 + ) 587 601 # No ended changes in this update 588 602 facet_by_id2 = { 589 603 c["id"]: c.get("_facet", "__") ··· 613 627 ] 614 628 sm = ActivityStateMachine() 615 629 sm.update( 616 - self._sense(content_type="coding", entities=entities, 617 - summary="Pair programming with Alice"), 618 - "090000_300", "20260304", 630 + self._sense( 631 + content_type="coding", 632 + entities=entities, 633 + summary="Pair programming with Alice", 634 + ), 635 + "090000_300", 636 + "20260304", 619 637 ) 620 638 sm.update(self._sense(content_type="meeting"), "090500_300", "20260304") 621 639 ··· 641 659 monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", tmpdir) 642 660 643 661 sm = ActivityStateMachine() 644 - sm.update(self._sense(content_type="coding", facets=[]), 645 - "090000_300", "20260304") 662 + sm.update( 663 + self._sense(content_type="coding", facets=[]), "090000_300", "20260304" 664 + ) 646 665 changes = sm.update(self._sense(density="idle"), "090500_300", "20260304") 647 666 648 667 facet_by_id = { ··· 725 744 sm = ActivityStateMachine() 726 745 sm.update( 727 746 { 728 - "density": "active", "content_type": "coding", 729 - "activity_summary": "test", "entities": [], 730 - "facets": [{"facet": "work", "activity": "coding", "level": "high"}], 731 - "meeting_detected": False, "speakers": [], "recommend": {}, 747 + "density": "active", 748 + "content_type": "coding", 749 + "activity_summary": "test", 750 + "entities": [], 751 + "facets": [ 752 + {"facet": "work", "activity": "coding", "level": "high"} 753 + ], 754 + "meeting_detected": False, 755 + "speakers": [], 756 + "recommend": {}, 732 757 }, 733 - "090000_300", "20260304", 758 + "090000_300", 759 + "20260304", 734 760 ) 735 761 sm.update( 736 762 { 737 - "density": "active", "content_type": "meeting", 738 - "activity_summary": "standup", "entities": [], 739 - "facets": [{"facet": "work", "activity": "meeting", "level": "medium"}], 740 - "meeting_detected": True, "speakers": [], "recommend": {}, 763 + "density": "active", 764 + "content_type": "meeting", 765 + "activity_summary": "standup", 766 + "entities": [], 767 + "facets": [ 768 + {"facet": "work", "activity": "meeting", "level": "medium"} 769 + ], 770 + "meeting_detected": True, 771 + "speakers": [], 772 + "recommend": {}, 741 773 }, 742 - "090500_300", "20260304", 774 + "090500_300", 775 + "20260304", 743 776 ) 744 777 rec = sm.get_completed_activities()[0] 745 778 append_activity_record("work", "20260304", rec) ··· 774 807 sm = ActivityStateMachine() 775 808 sm.update( 776 809 { 777 - "density": "active", "content_type": "coding", 778 - "activity_summary": "first", "entities": [], 779 - "facets": [{"facet": "work", "activity": "coding", "level": "high"}], 780 - "meeting_detected": False, "speakers": [], "recommend": {}, 810 + "density": "active", 811 + "content_type": "coding", 812 + "activity_summary": "first", 813 + "entities": [], 814 + "facets": [ 815 + {"facet": "work", "activity": "coding", "level": "high"} 816 + ], 817 + "meeting_detected": False, 818 + "speakers": [], 819 + "recommend": {}, 781 820 }, 782 - "090000_300", "20260304", 821 + "090000_300", 822 + "20260304", 783 823 ) 784 824 changes1 = sm.update( 785 825 { 786 - "density": "active", "content_type": "meeting", 787 - "activity_summary": "second", "entities": [], 788 - "facets": [{"facet": "work", "activity": "meeting", "level": "medium"}], 789 - "meeting_detected": True, "speakers": [], "recommend": {}, 826 + "density": "active", 827 + "content_type": "meeting", 828 + "activity_summary": "second", 829 + "entities": [], 830 + "facets": [ 831 + {"facet": "work", "activity": "meeting", "level": "medium"} 832 + ], 833 + "meeting_detected": True, 834 + "speakers": [], 835 + "recommend": {}, 790 836 }, 791 - "090500_300", "20260304", 837 + "090500_300", 838 + "20260304", 792 839 ) 793 840 # Persist first completed 794 841 facet_by_id = { 795 842 c["id"]: c.get("_facet", "__") 796 - for c in changes1 if c.get("state") == "ended" 843 + for c in changes1 844 + if c.get("state") == "ended" 797 845 } 798 846 for rec in sm.get_completed_activities(): 799 847 if rec["id"] in facet_by_id: ··· 804 852 805 853 changes2 = sm.update( 806 854 { 807 - "density": "active", "content_type": "coding", 808 - "activity_summary": "third", "entities": [], 809 - "facets": [{"facet": "work", "activity": "coding", "level": "high"}], 810 - "meeting_detected": False, "speakers": [], "recommend": {}, 855 + "density": "active", 856 + "content_type": "coding", 857 + "activity_summary": "third", 858 + "entities": [], 859 + "facets": [ 860 + {"facet": "work", "activity": "coding", "level": "high"} 861 + ], 862 + "meeting_detected": False, 863 + "speakers": [], 864 + "recommend": {}, 811 865 }, 812 - "091000_300", "20260304", 866 + "091000_300", 867 + "20260304", 813 868 ) 814 869 facet_by_id2 = { 815 870 c["id"]: c.get("_facet", "__") 816 - for c in changes2 if c.get("state") == "ended" 871 + for c in changes2 872 + if c.get("state") == "ended" 817 873 } 818 874 for rec in sm.get_completed_activities(): 819 875 if rec["id"] in facet_by_id2:
+3 -1
tests/test_entity_ingest.py
··· 442 442 443 443 def test_stats_update(ingest_env): 444 444 env = ingest_env 445 - save_journal_entity({"id": "alice_johnson", "name": "Alice Johnson", "type": "Person"}) 445 + save_journal_entity( 446 + {"id": "alice_johnson", "name": "Alice Johnson", "type": "Person"} 447 + ) 446 448 447 449 response = _post_entities( 448 450 env["client"],
+15 -3
tests/test_entity_observer_context.py
··· 72 72 assert result 73 73 assert "Juliet Capulet" in result 74 74 assert "Knowledge Graph" in result 75 - assert "Prepared revenue projections for Verona Platform board presentation" in result 75 + assert ( 76 + "Prepared revenue projections for Verona Platform board presentation" in result 77 + ) 76 78 77 79 78 80 def test_assemble_observer_context_no_kg(tmp_path): ··· 125 127 _attach_entity(tmp_path, facet, entity_id, "Alice Johnson") 126 128 _write_jsonl( 127 129 tmp_path / "facets" / facet / "entities" / f"{day}.jsonl", 128 - [{"id": entity_id, "type": "Person", "name": "Alice Johnson", "description": ""}], 130 + [ 131 + { 132 + "id": entity_id, 133 + "type": "Person", 134 + "name": "Alice Johnson", 135 + "description": "", 136 + } 137 + ], 129 138 ) 130 139 _write_jsonl( 131 140 tmp_path / _obs_path(facet, entity_id), ··· 283 292 "observations": { 284 293 "alice_johnson": [ 285 294 {"content": "Prefers morning meetings", "reasoning": "dupe"}, 286 - {"content": "Expert in distributed systems", "reasoning": "new"}, 295 + { 296 + "content": "Expert in distributed systems", 297 + "reasoning": "new", 298 + }, 287 299 ] 288 300 }, 289 301 "skipped": [],
+174 -59
tests/test_facet_ingest.py
··· 72 72 }, 73 73 "received": {}, 74 74 } 75 - ( 76 - get_state_directory(key_prefix) / "entities" / "state.json" 77 - ).write_text(json.dumps(entity_state, indent=2), encoding="utf-8") 75 + (get_state_directory(key_prefix) / "entities" / "state.json").write_text( 76 + json.dumps(entity_state, indent=2), encoding="utf-8" 77 + ) 78 78 79 79 app = Flask(__name__) 80 80 app.config["TESTING"] = True ··· 136 136 ] 137 137 138 138 139 - def _read_staged(key_prefix: str, facet: str, file_type: str, relative_path: str) -> dict: 139 + def _read_staged( 140 + key_prefix: str, facet: str, file_type: str, relative_path: str 141 + ) -> dict: 140 142 staged_name = relative_path.replace("/", "__") + ".staged.json" 141 143 staged_path = ( 142 144 get_state_directory(key_prefix) ··· 154 156 155 157 156 158 def _jsonl_bytes(items: list[dict]) -> bytes: 157 - return "".join(json.dumps(item, ensure_ascii=False) + "\n" for item in items).encode( 158 - "utf-8" 159 - ) 159 + return "".join( 160 + json.dumps(item, ensure_ascii=False) + "\n" for item in items 161 + ).encode("utf-8") 160 162 161 163 162 164 def _read_json(path: Path) -> dict: ··· 289 291 { 290 292 "name": "personal", 291 293 "files": [ 292 - {"path": "facet.json", "type": "facet_json", "content": _json_bytes({"title": "Personal"})}, 294 + { 295 + "path": "facet.json", 296 + "type": "facet_json", 297 + "content": _json_bytes({"title": "Personal"}), 298 + }, 293 299 { 294 300 "path": "entities/same_entity/entity.json", 295 301 "type": "entity_relationship", 296 - "content": _json_bytes({"description": "Close contact", "attached_at": 100}), 302 + "content": _json_bytes( 303 + {"description": "Close contact", "attached_at": 100} 304 + ), 297 305 }, 298 306 { 299 307 "path": "entities/same_entity/observations.jsonl", ··· 359 367 ] 360 368 metadata, file_map = _build_request(facets) 361 369 362 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 370 + response = _post_facets( 371 + env["client"], env["key"], env["key_prefix"], metadata, file_map 372 + ) 363 373 364 374 assert response.status_code == 200 365 375 assert response.get_json() == { ··· 380 390 assert _read_jsonl_file( 381 391 facet_root / "entities" / "same_entity" / "observations.jsonl" 382 392 ) == [{"content": "Likes tea", "observed_at": 1}] 383 - assert _read_jsonl_file(facet_root / "entities" / "20260305.jsonl")[0]["id"] == "same_entity" 384 - assert _read_jsonl_file(facet_root / "activities" / "activities.jsonl")[0]["id"] == "coding" 385 - assert _read_jsonl_file(facet_root / "activities" / "20260305.jsonl")[0]["id"] == "coding_093000_300" 393 + assert ( 394 + _read_jsonl_file(facet_root / "entities" / "20260305.jsonl")[0]["id"] 395 + == "same_entity" 396 + ) 397 + assert ( 398 + _read_jsonl_file(facet_root / "activities" / "activities.jsonl")[0]["id"] 399 + == "coding" 400 + ) 401 + assert ( 402 + _read_jsonl_file(facet_root / "activities" / "20260305.jsonl")[0]["id"] 403 + == "coding_093000_300" 404 + ) 386 405 assert ( 387 - facet_root / "activities" / "20260305" / "coding_093000_300" / "session_review.md" 406 + facet_root 407 + / "activities" 408 + / "20260305" 409 + / "coding_093000_300" 410 + / "session_review.md" 388 411 ).read_text(encoding="utf-8") == "# Session\n" 389 - assert _read_jsonl_file(facet_root / "todos" / "20260305.jsonl")[0]["text"] == "Ship it" 390 - assert _read_jsonl_file(facet_root / "calendar" / "20260305.jsonl")[0]["title"] == "Standup" 391 - assert (facet_root / "news" / "20260305.md").read_text(encoding="utf-8") == "# News\n" 392 - assert _read_jsonl_file(facet_root / "logs" / "20260305.jsonl")[0]["event"] == "ingested" 412 + assert ( 413 + _read_jsonl_file(facet_root / "todos" / "20260305.jsonl")[0]["text"] 414 + == "Ship it" 415 + ) 416 + assert ( 417 + _read_jsonl_file(facet_root / "calendar" / "20260305.jsonl")[0]["title"] 418 + == "Standup" 419 + ) 420 + assert (facet_root / "news" / "20260305.md").read_text( 421 + encoding="utf-8" 422 + ) == "# News\n" 423 + assert ( 424 + _read_jsonl_file(facet_root / "logs" / "20260305.jsonl")[0]["event"] 425 + == "ingested" 426 + ) 393 427 394 428 source = load_journal_source(env["key"]) 395 429 assert source["stats"]["facets_received"] == 1 ··· 397 431 398 432 def test_existing_facet_merge_entity_relationship(ingest_env): 399 433 env = ingest_env 400 - target_path = env["root"] / "facets" / "work" / "entities" / "same_entity" / "entity.json" 434 + target_path = ( 435 + env["root"] / "facets" / "work" / "entities" / "same_entity" / "entity.json" 436 + ) 401 437 _write_json( 402 438 target_path, 403 439 {"entity_id": "same_entity", "description": "Keep target", "attached_at": 200}, ··· 410 446 { 411 447 "path": "entities/same_entity/entity.json", 412 448 "type": "entity_relationship", 413 - "content": _json_bytes({"description": "Source desc", "last_seen": 999}), 449 + "content": _json_bytes( 450 + {"description": "Source desc", "last_seen": 999} 451 + ), 414 452 } 415 453 ], 416 454 } 417 455 ] 418 456 metadata, file_map = _build_request(facets) 419 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 457 + response = _post_facets( 458 + env["client"], env["key"], env["key_prefix"], metadata, file_map 459 + ) 420 460 421 461 assert response.status_code == 200 422 462 assert response.get_json()["merged"] == 1 ··· 431 471 def test_existing_facet_merge_observations(ingest_env): 432 472 env = ingest_env 433 473 target_path = ( 434 - env["root"] / "facets" / "work" / "entities" / "same_entity" / "observations.jsonl" 474 + env["root"] 475 + / "facets" 476 + / "work" 477 + / "entities" 478 + / "same_entity" 479 + / "observations.jsonl" 435 480 ) 436 481 _write_jsonl( 437 482 target_path, ··· 459 504 } 460 505 ] 461 506 metadata, file_map = _build_request(facets) 462 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 507 + response = _post_facets( 508 + env["client"], env["key"], env["key_prefix"], metadata, file_map 509 + ) 463 510 464 511 assert response.status_code == 200 465 512 assert response.get_json()["merged"] == 1 ··· 493 540 } 494 541 ] 495 542 metadata, file_map = _build_request(facets) 496 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 543 + response = _post_facets( 544 + env["client"], env["key"], env["key_prefix"], metadata, file_map 545 + ) 497 546 498 547 assert response.status_code == 200 499 548 assert response.get_json()["merged"] == 1 ··· 524 573 } 525 574 ] 526 575 metadata, file_map = _build_request(facets) 527 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 576 + response = _post_facets( 577 + env["client"], env["key"], env["key_prefix"], metadata, file_map 578 + ) 528 579 529 580 assert response.status_code == 200 530 581 assert response.get_json()["merged"] == 1 531 - assert [item["id"] for item in _read_jsonl_file(target_path)] == ["coding", "meeting"] 582 + assert [item["id"] for item in _read_jsonl_file(target_path)] == [ 583 + "coding", 584 + "meeting", 585 + ] 532 586 533 587 534 588 def test_existing_facet_merge_activity_records(ingest_env): ··· 548 602 "type": "activity_records", 549 603 "content": _jsonl_bytes( 550 604 [ 551 - {"id": "coding_1", "activity": "coding", "active_entities": ["same_entity"]}, 552 - {"id": "coding_2", "activity": "coding", "active_entities": ["source_entity"]}, 605 + { 606 + "id": "coding_1", 607 + "activity": "coding", 608 + "active_entities": ["same_entity"], 609 + }, 610 + { 611 + "id": "coding_2", 612 + "activity": "coding", 613 + "active_entities": ["source_entity"], 614 + }, 553 615 ] 554 616 ), 555 617 } ··· 557 619 } 558 620 ] 559 621 metadata, file_map = _build_request(facets) 560 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 622 + response = _post_facets( 623 + env["client"], env["key"], env["key_prefix"], metadata, file_map 624 + ) 561 625 562 626 assert response.status_code == 200 563 627 assert response.get_json()["merged"] == 1 ··· 593 657 } 594 658 ] 595 659 metadata, file_map = _build_request(facets) 596 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 660 + response = _post_facets( 661 + env["client"], env["key"], env["key_prefix"], metadata, file_map 662 + ) 597 663 598 664 assert response.status_code == 200 599 665 assert response.get_json()["skipped"] == 1 ··· 617 683 } 618 684 ] 619 685 metadata, file_map = _build_request(facets) 620 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 686 + response = _post_facets( 687 + env["client"], env["key"], env["key_prefix"], metadata, file_map 688 + ) 621 689 622 690 target_file = ( 623 691 env["root"] ··· 656 724 } 657 725 ] 658 726 metadata, file_map = _build_request(facets) 659 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 727 + response = _post_facets( 728 + env["client"], env["key"], env["key_prefix"], metadata, file_map 729 + ) 660 730 661 731 assert response.status_code == 200 662 732 assert response.get_json()["merged"] == 1 ··· 689 759 } 690 760 ] 691 761 metadata, file_map = _build_request(facets) 692 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 762 + response = _post_facets( 763 + env["client"], env["key"], env["key_prefix"], metadata, file_map 764 + ) 693 765 694 766 assert response.status_code == 200 695 767 assert response.get_json()["merged"] == 1 ··· 718 790 } 719 791 ] 720 792 metadata, file_map = _build_request(facets) 721 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 793 + response = _post_facets( 794 + env["client"], env["key"], env["key_prefix"], metadata, file_map 795 + ) 722 796 723 797 assert response.status_code == 200 724 798 assert response.get_json()["skipped"] == 1 ··· 742 816 } 743 817 ] 744 818 metadata, file_map = _build_request(facets) 745 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 819 + response = _post_facets( 820 + env["client"], env["key"], env["key_prefix"], metadata, file_map 821 + ) 746 822 747 823 target_path = env["root"] / "facets" / "work" / "news" / "20260305.md" 748 824 assert response.status_code == 200 ··· 768 844 } 769 845 ] 770 846 metadata, file_map = _build_request(facets) 771 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 847 + response = _post_facets( 848 + env["client"], env["key"], env["key_prefix"], metadata, file_map 849 + ) 772 850 773 851 assert response.status_code == 200 774 852 assert response.get_json()["merged"] == 1 ··· 789 867 { 790 868 "path": "entities/source_entity/observations.jsonl", 791 869 "type": "entity_observations", 792 - "content": _jsonl_bytes([{"content": "Knows Rust", "observed_at": 1}]), 870 + "content": _jsonl_bytes( 871 + [{"content": "Knows Rust", "observed_at": 1}] 872 + ), 793 873 }, 794 874 { 795 875 "path": "entities/20260305.jsonl", 796 876 "type": "detected_entities", 797 877 "content": _jsonl_bytes( 798 - [{"id": "source_entity", "name": "Source Entity", "type": "Person"}] 878 + [ 879 + { 880 + "id": "source_entity", 881 + "name": "Source Entity", 882 + "type": "Person", 883 + } 884 + ] 799 885 ), 800 886 }, 801 887 { ··· 815 901 } 816 902 ] 817 903 metadata, file_map = _build_request(facets) 818 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 904 + response = _post_facets( 905 + env["client"], env["key"], env["key_prefix"], metadata, file_map 906 + ) 819 907 820 908 facet_root = env["root"] / "facets" / "work" 821 909 assert response.status_code == 200 ··· 823 911 assert (facet_root / "entities" / "target_entity" / "entity.json").exists() 824 912 assert (facet_root / "entities" / "target_entity" / "observations.jsonl").exists() 825 913 assert not (facet_root / "entities" / "source_entity").exists() 826 - assert _read_jsonl_file(facet_root / "entities" / "20260305.jsonl")[0]["id"] == "target_entity" 827 - assert _read_jsonl_file(facet_root / "activities" / "20260305.jsonl")[0]["active_entities"] == [ 828 - "target_entity" 829 - ] 914 + assert ( 915 + _read_jsonl_file(facet_root / "entities" / "20260305.jsonl")[0]["id"] 916 + == "target_entity" 917 + ) 918 + assert _read_jsonl_file(facet_root / "activities" / "20260305.jsonl")[0][ 919 + "active_entities" 920 + ] == ["target_entity"] 830 921 831 922 832 923 def test_unmapped_entity_staging(ingest_env): ··· 845 936 } 846 937 ] 847 938 metadata, file_map = _build_request(facets) 848 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 939 + response = _post_facets( 940 + env["client"], env["key"], env["key_prefix"], metadata, file_map 941 + ) 849 942 850 943 assert response.status_code == 200 851 944 assert response.get_json() == { ··· 855 948 "staged": 1, 856 949 "errors": [], 857 950 } 858 - staged = _read_staged(env["key_prefix"], "work", "entity_relationship", "entities/unknown/entity.json") 951 + staged = _read_staged( 952 + env["key_prefix"], "work", "entity_relationship", "entities/unknown/entity.json" 953 + ) 859 954 assert staged["reason"] == "unmapped_entity" 860 955 assert staged["source_entity_id"] == "unknown" 861 956 assert staged["source_path"] == "entities/unknown/entity.json" ··· 877 972 } 878 973 ] 879 974 metadata, file_map = _build_request(facets) 880 - first = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 975 + first = _post_facets( 976 + env["client"], env["key"], env["key_prefix"], metadata, file_map 977 + ) 881 978 882 979 assert first.status_code == 200 883 980 assert first.get_json()["staged"] == 1 ··· 888 985 state_path.write_text(json.dumps(entity_state), encoding="utf-8") 889 986 890 987 metadata, file_map = _build_request(facets) 891 - second = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 988 + second = _post_facets( 989 + env["client"], env["key"], env["key_prefix"], metadata, file_map 990 + ) 892 991 893 992 assert second.status_code == 200 894 993 body = second.get_json() ··· 916 1015 } 917 1016 ] 918 1017 metadata, file_map = _build_request(facets) 919 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1018 + response = _post_facets( 1019 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1020 + ) 920 1021 921 1022 assert response.status_code == 200 922 1023 assert response.get_json()["staged"] == 1 ··· 937 1038 } 938 1039 ] 939 1040 metadata, file_map = _build_request(facets) 940 - first = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1041 + first = _post_facets( 1042 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1043 + ) 941 1044 942 1045 metadata, file_map = _build_request(facets) 943 - second = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1046 + second = _post_facets( 1047 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1048 + ) 944 1049 945 1050 assert first.status_code == 200 946 1051 assert second.status_code == 200 ··· 971 1076 } 972 1077 ] 973 1078 metadata, file_map = _build_request(facets) 974 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1079 + response = _post_facets( 1080 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1081 + ) 975 1082 976 1083 assert response.status_code == 200 977 1084 body = response.get_json() ··· 995 1102 }, 996 1103 { 997 1104 "name": "good", 998 - "files": [ 999 - {"path": "news/20260305.md", "type": "news", "content": b"ok\n"} 1000 - ], 1105 + "files": [{"path": "news/20260305.md", "type": "news", "content": b"ok\n"}], 1001 1106 }, 1002 1107 ] 1003 1108 metadata, file_map = _build_request(facets) 1004 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1109 + response = _post_facets( 1110 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1111 + ) 1005 1112 1006 1113 assert response.status_code == 200 1007 1114 body = response.get_json() ··· 1042 1149 }, 1043 1150 ] 1044 1151 metadata, file_map = _build_request(facets) 1045 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1152 + response = _post_facets( 1153 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1154 + ) 1046 1155 source = load_journal_source(env["key"]) 1047 1156 1048 1157 assert response.status_code == 200 ··· 1063 1172 } 1064 1173 ] 1065 1174 metadata, file_map = _build_request(facets) 1066 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1175 + response = _post_facets( 1176 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1177 + ) 1067 1178 1068 1179 assert response.status_code == 200 1069 1180 assert _read_state(env["key_prefix"]) == { ··· 1085 1196 } 1086 1197 ] 1087 1198 metadata, file_map = _build_request(facets) 1088 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1199 + response = _post_facets( 1200 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1201 + ) 1089 1202 1090 1203 assert response.status_code == 200 1091 1204 entries = _read_log(env["key_prefix"]) ··· 1114 1227 } 1115 1228 ] 1116 1229 metadata, file_map = _build_request(facets) 1117 - response = _post_facets(env["client"], env["key"], env["key_prefix"], metadata, file_map) 1230 + response = _post_facets( 1231 + env["client"], env["key"], env["key_prefix"], metadata, file_map 1232 + ) 1118 1233 1119 1234 assert response.status_code == 200 1120 1235 assert len(response.get_json()["errors"]) == 1
+162 -39
tests/test_import_call.py
··· 60 60 monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(tmp_path)) 61 61 think.utils._journal_path_cache = None 62 62 clear_journal_entity_cache() 63 - (tmp_path / "apps" / "import" / "journal_sources").mkdir(parents=True, exist_ok=True) 63 + (tmp_path / "apps" / "import" / "journal_sources").mkdir( 64 + parents=True, exist_ok=True 65 + ) 64 66 (tmp_path / "config").mkdir(parents=True, exist_ok=True) 65 67 66 68 key = generate_key() ··· 79 81 80 82 def _write_json(path: Path, data: dict) -> None: 81 83 path.parent.mkdir(parents=True, exist_ok=True) 82 - path.write_text(json.dumps(data, indent=2, ensure_ascii=False) + "\n", encoding="utf-8") 84 + path.write_text( 85 + json.dumps(data, indent=2, ensure_ascii=False) + "\n", encoding="utf-8" 86 + ) 83 87 84 88 85 89 def _read_json(path: Path) -> dict: ··· 102 106 103 107 104 108 def test_list_staged_empty_state(import_env): 105 - result = runner.invoke(call_app, ["import", "list-staged", "--source", "test-source"]) 109 + result = runner.invoke( 110 + call_app, ["import", "list-staged", "--source", "test-source"] 111 + ) 106 112 107 113 assert result.exit_code == 0 108 114 assert result.stdout.strip() == "" ··· 118 124 _write_json( 119 125 staged_path, 120 126 { 121 - "source_entity": {"id": "test-entity", "name": "Test Entity", "type": "Tool"}, 122 - "match_candidates": [{"id": "target-id", "name": "Target Entity", "tier": 8}], 127 + "source_entity": { 128 + "id": "test-entity", 129 + "name": "Test Entity", 130 + "type": "Tool", 131 + }, 132 + "match_candidates": [ 133 + {"id": "target-id", "name": "Target Entity", "tier": 8} 134 + ], 123 135 "reason": "low_confidence_match", 124 136 "staged_at": "2026-04-14T00:00:00+00:00", 125 137 }, ··· 137 149 "area": "entities", 138 150 "source_id": "test-entity", 139 151 "reason": "low_confidence_match", 140 - "source_entity": {"id": "test-entity", "name": "Test Entity", "type": "Tool"}, 141 - "match_candidates": [{"id": "target-id", "name": "Target Entity", "tier": 8}], 152 + "source_entity": { 153 + "id": "test-entity", 154 + "name": "Test Entity", 155 + "type": "Tool", 156 + }, 157 + "match_candidates": [ 158 + {"id": "target-id", "name": "Target Entity", "tier": 8} 159 + ], 142 160 "staged_at": "2026-04-14T00:00:00+00:00", 143 161 } 144 162 ] ··· 179 197 180 198 181 199 def test_list_staged_with_staged_facets(import_env): 182 - staged_file = "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 183 - staged_path = get_state_directory(import_env["key_prefix"]) / "facets" / "staged" / staged_file 200 + staged_file = ( 201 + "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 202 + ) 203 + staged_path = ( 204 + get_state_directory(import_env["key_prefix"]) 205 + / "facets" 206 + / "staged" 207 + / staged_file 208 + ) 184 209 _write_json( 185 210 staged_path, 186 211 { ··· 188 213 "source_entity_id": "source_entity", 189 214 "explanation": "Entity 'source_entity' has no mapping in entities/state.json id_map", 190 215 "source_path": "entities/source_entity/entity.json", 191 - "source_data": json.dumps({"entity_id": "source_entity"}, ensure_ascii=False, indent=2) 216 + "source_data": json.dumps( 217 + {"entity_id": "source_entity"}, ensure_ascii=False, indent=2 218 + ) 192 219 + "\n", 193 220 "staged_at": "2026-04-14T00:00:00+00:00", 194 221 }, ··· 211 238 "source_entity_id": "source_entity", 212 239 "explanation": "Entity 'source_entity' has no mapping in entities/state.json id_map", 213 240 "source_path": "entities/source_entity/entity.json", 214 - "source_data": json.dumps({"entity_id": "source_entity"}, ensure_ascii=False, indent=2) 241 + "source_data": json.dumps( 242 + {"entity_id": "source_entity"}, ensure_ascii=False, indent=2 243 + ) 215 244 + "\n", 216 245 "staged_at": "2026-04-14T00:00:00+00:00", 217 246 } ··· 246 275 "emails": ["alice@new.com"], 247 276 "created_at": 1000, 248 277 }, 249 - "match_candidates": [{"id": "target-id", "name": "Alice Johnson", "tier": 8}], 278 + "match_candidates": [ 279 + {"id": "target-id", "name": "Alice Johnson", "tier": 8} 280 + ], 250 281 "reason": "low_confidence_match", 251 282 "staged_at": "2026-04-14T00:00:00+00:00", 252 283 }, ··· 274 305 assert merged["emails"] == ["alice@old.com", "alice@new.com"] 275 306 assert merged["created_at"] == 1000 276 307 277 - state = _read_json(get_state_directory(import_env["key_prefix"]) / "entities" / "state.json") 308 + state = _read_json( 309 + get_state_directory(import_env["key_prefix"]) / "entities" / "state.json" 310 + ) 278 311 assert state["id_map"]["test-entity"] == "target-id" 279 312 280 - log_entries = _read_log(get_state_directory(import_env["key_prefix"]) / "entities" / "log.jsonl") 313 + log_entries = _read_log( 314 + get_state_directory(import_env["key_prefix"]) / "entities" / "log.jsonl" 315 + ) 281 316 assert log_entries[-1]["action"] == "resolved_merge" 282 317 assert log_entries[-1]["resolved_by"] == "talent" 283 318 284 319 285 320 def test_resolve_entity_create(import_env): 286 - save_journal_entity({"id": "test-entity", "name": "Occupied Entity", "type": "Tool"}) 321 + save_journal_entity( 322 + {"id": "test-entity", "name": "Occupied Entity", "type": "Tool"} 323 + ) 287 324 staged_path = ( 288 325 get_state_directory(import_env["key_prefix"]) 289 326 / "entities" ··· 293 330 _write_json( 294 331 staged_path, 295 332 { 296 - "source_entity": {"id": "test-entity", "name": "Fresh Entity", "type": "Tool"}, 297 - "match_candidates": [{"id": "test-entity", "name": "Occupied Entity", "tier": None}], 333 + "source_entity": { 334 + "id": "test-entity", 335 + "name": "Fresh Entity", 336 + "type": "Tool", 337 + }, 338 + "match_candidates": [ 339 + {"id": "test-entity", "name": "Occupied Entity", "tier": None} 340 + ], 298 341 "reason": "id_collision", 299 342 "staged_at": "2026-04-14T00:00:00+00:00", 300 343 }, ··· 302 345 303 346 result = runner.invoke( 304 347 call_app, 305 - ["import", "resolve-entity", "test-entity", "create", "--source", "test-source"], 348 + [ 349 + "import", 350 + "resolve-entity", 351 + "test-entity", 352 + "create", 353 + "--source", 354 + "test-source", 355 + ], 306 356 ) 307 357 308 358 assert result.exit_code == 0 ··· 311 361 assert created is not None 312 362 assert created["name"] == "Fresh Entity" 313 363 314 - state = _read_json(get_state_directory(import_env["key_prefix"]) / "entities" / "state.json") 364 + state = _read_json( 365 + get_state_directory(import_env["key_prefix"]) / "entities" / "state.json" 366 + ) 315 367 assert state["id_map"]["test-entity"] == "fresh_entity" 316 368 317 369 ··· 347 399 348 400 result = runner.invoke( 349 401 call_app, 350 - ["import", "resolve-entity", "new-principal", "create", "--source", "test-source"], 402 + [ 403 + "import", 404 + "resolve-entity", 405 + "new-principal", 406 + "create", 407 + "--source", 408 + "test-source", 409 + ], 351 410 ) 352 411 353 412 assert result.exit_code == 0 ··· 366 425 _write_json( 367 426 staged_path, 368 427 { 369 - "source_entity": {"id": "test-entity", "name": "Skip Entity", "type": "Tool"}, 428 + "source_entity": { 429 + "id": "test-entity", 430 + "name": "Skip Entity", 431 + "type": "Tool", 432 + }, 370 433 "match_candidates": [], 371 434 "reason": "principal_conflict", 372 435 "staged_at": "2026-04-14T00:00:00+00:00", ··· 382 445 assert not staged_path.exists() 383 446 assert load_journal_entity("test-entity") is None 384 447 385 - log_entries = _read_log(get_state_directory(import_env["key_prefix"]) / "entities" / "log.jsonl") 448 + log_entries = _read_log( 449 + get_state_directory(import_env["key_prefix"]) / "entities" / "log.jsonl" 450 + ) 386 451 assert log_entries[-1]["action"] == "resolved_skip" 387 452 assert log_entries[-1]["resolved_by"] == "talent" 388 453 ··· 403 468 get_state_directory(import_env["key_prefix"]) / "config" / "source_config.json", 404 469 {"identity": {"name": "Remote User"}}, 405 470 ) 406 - _write_json(import_env["root"] / "config" / "journal.json", {"identity": {"name": "Local User"}}) 471 + _write_json( 472 + import_env["root"] / "config" / "journal.json", 473 + {"identity": {"name": "Local User"}}, 474 + ) 407 475 408 476 result = runner.invoke( 409 477 call_app, 410 - ["import", "resolve-config", "identity.name", "apply", "--source", "test-source"], 478 + [ 479 + "import", 480 + "resolve-config", 481 + "identity.name", 482 + "apply", 483 + "--source", 484 + "test-source", 485 + ], 411 486 ) 412 487 413 488 assert result.exit_code == 0 ··· 415 490 assert journal_config["identity"]["name"] == "Remote User" 416 491 assert not diff_path.exists() 417 492 418 - log_entries = _read_log(get_state_directory(import_env["key_prefix"]) / "config" / "log.jsonl") 493 + log_entries = _read_log( 494 + get_state_directory(import_env["key_prefix"]) / "config" / "log.jsonl" 495 + ) 419 496 assert log_entries[-1]["action"] == "config_field_applied" 420 497 assert log_entries[-1]["resolved_by"] == "talent" 421 498 ··· 436 513 get_state_directory(import_env["key_prefix"]) / "config" / "source_config.json", 437 514 {"retention": {"days": 30}}, 438 515 ) 439 - _write_json(import_env["root"] / "config" / "journal.json", {"retention": {"days": 90}}) 516 + _write_json( 517 + import_env["root"] / "config" / "journal.json", {"retention": {"days": 90}} 518 + ) 440 519 441 520 result = runner.invoke( 442 521 call_app, 443 - ["import", "resolve-config", "retention.days", "keep", "--source", "test-source"], 522 + [ 523 + "import", 524 + "resolve-config", 525 + "retention.days", 526 + "keep", 527 + "--source", 528 + "test-source", 529 + ], 444 530 ) 445 531 446 532 assert result.exit_code == 0 ··· 504 590 import_env["key_prefix"], 505 591 {"id_map": {"source_entity": "target_entity"}, "received": {}}, 506 592 ) 507 - staged_file = "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 508 - staged_path = get_state_directory(import_env["key_prefix"]) / "facets" / "staged" / staged_file 593 + staged_file = ( 594 + "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 595 + ) 596 + staged_path = ( 597 + get_state_directory(import_env["key_prefix"]) 598 + / "facets" 599 + / "staged" 600 + / staged_file 601 + ) 509 602 _write_json( 510 603 staged_path, 511 604 { ··· 535 628 assert relationship["entity_id"] == "target_entity" 536 629 assert relationship["description"] == "imported relationship" 537 630 538 - log_entries = _read_log(get_state_directory(import_env["key_prefix"]) / "facets" / "log.jsonl") 631 + log_entries = _read_log( 632 + get_state_directory(import_env["key_prefix"]) / "facets" / "log.jsonl" 633 + ) 539 634 assert log_entries[-1]["action"] == "resolved_apply" 540 635 assert log_entries[-1]["resolved_by"] == "talent" 541 636 ··· 544 639 target_path = import_env["root"] / "facets" / "personal" / "facet.json" 545 640 _write_json(target_path, {"title": "Local"}) 546 641 staged_file = "personal/facet_json/facet.json.staged.json" 547 - staged_path = get_state_directory(import_env["key_prefix"]) / "facets" / "staged" / staged_file 642 + staged_path = ( 643 + get_state_directory(import_env["key_prefix"]) 644 + / "facets" 645 + / "staged" 646 + / staged_file 647 + ) 548 648 _write_json( 549 649 staged_path, 550 650 { ··· 564 664 assert not staged_path.exists() 565 665 assert _read_json(target_path) == {"title": "Remote"} 566 666 567 - log_entries = _read_log(get_state_directory(import_env["key_prefix"]) / "facets" / "log.jsonl") 667 + log_entries = _read_log( 668 + get_state_directory(import_env["key_prefix"]) / "facets" / "log.jsonl" 669 + ) 568 670 assert log_entries[-1]["action"] == "resolved_apply" 569 671 assert log_entries[-1]["item_id"] == "personal/facet.json" 570 672 assert log_entries[-1]["resolved_by"] == "talent" 571 673 572 674 573 675 def test_resolve_facet_unmapped_entity_fails_without_mapping(import_env): 574 - staged_file = "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 575 - staged_path = get_state_directory(import_env["key_prefix"]) / "facets" / "staged" / staged_file 676 + staged_file = ( 677 + "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 678 + ) 679 + staged_path = ( 680 + get_state_directory(import_env["key_prefix"]) 681 + / "facets" 682 + / "staged" 683 + / staged_file 684 + ) 576 685 _write_json( 577 686 staged_path, 578 687 { ··· 580 689 "source_entity_id": "source_entity", 581 690 "explanation": "Entity 'source_entity' has no mapping in entities/state.json id_map", 582 691 "source_path": "entities/source_entity/entity.json", 583 - "source_data": json.dumps({"entity_id": "source_entity"}, ensure_ascii=False, indent=2) 692 + "source_data": json.dumps( 693 + {"entity_id": "source_entity"}, ensure_ascii=False, indent=2 694 + ) 584 695 + "\n", 585 696 "staged_at": "2026-04-14T00:00:00+00:00", 586 697 }, ··· 592 703 ) 593 704 594 705 assert result.exit_code == 1 595 - assert "Entity source_entity has no mapping yet. Run entity review first." in result.stderr 706 + assert ( 707 + "Entity source_entity has no mapping yet. Run entity review first." 708 + in result.stderr 709 + ) 596 710 assert staged_path.exists() 597 711 598 712 599 713 def test_resolve_facet_skip(import_env): 600 - staged_file = "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 601 - staged_path = get_state_directory(import_env["key_prefix"]) / "facets" / "staged" / staged_file 714 + staged_file = ( 715 + "personal/entity_relationship/entities__source_entity__entity.json.staged.json" 716 + ) 717 + staged_path = ( 718 + get_state_directory(import_env["key_prefix"]) 719 + / "facets" 720 + / "staged" 721 + / staged_file 722 + ) 602 723 _write_json( 603 724 staged_path, 604 725 { ··· 606 727 "source_entity_id": "source_entity", 607 728 "explanation": "Entity 'source_entity' has no mapping in entities/state.json id_map", 608 729 "source_path": "entities/source_entity/entity.json", 609 - "source_data": json.dumps({"entity_id": "source_entity"}, ensure_ascii=False, indent=2) 730 + "source_data": json.dumps( 731 + {"entity_id": "source_entity"}, ensure_ascii=False, indent=2 732 + ) 610 733 + "\n", 611 734 "staged_at": "2026-04-14T00:00:00+00:00", 612 735 },
+4 -4
tests/test_journal_merge.py
··· 240 240 paths["target"] / "entities" / "alice_johnson_2" / "entity.json" 241 241 ).exists() 242 242 artifact_root = _find_merge_artifact_root(paths["target"]) 243 - staged = _read_json( 244 - artifact_root / "staging" / "alice_johnson" / "entity.json" 245 - ) 243 + staged = _read_json(artifact_root / "staging" / "alice_johnson" / "entity.json") 246 244 assert staged["id"] == "alice_johnson" 247 245 assert staged["name"] == "Alice Cooper" 248 246 assert "staged" in result.output ··· 664 662 assert result.exit_code == 0 665 663 artifact_root = _find_merge_artifact_root(paths["target"]) 666 664 entries = _read_jsonl(artifact_root / "decisions.jsonl") 667 - entity_merged = next(entry for entry in entries if entry["action"] == "entity_merged") 665 + entity_merged = next( 666 + entry for entry in entries if entry["action"] == "entity_merged" 667 + ) 668 668 assert "source" in entity_merged 669 669 assert "target" in entity_merged 670 670 assert "fields_changed" in entity_merged
+2 -4
tests/test_journal_stats.py
··· 177 177 assert "total_transcript_duration" in data["totals"] 178 178 assert "total_percept_duration" in data["totals"] 179 179 assert ( 180 - data["tokens"]["by_day"]["20240101"]["gemini-2.5-flash"]["total_tokens"] 181 - == 495 180 + data["tokens"]["by_day"]["20240101"]["gemini-2.5-flash"]["total_tokens"] == 495 182 181 ) 183 182 184 183 ··· 235 234 ts_dir = day / "default" / "123456_300" 236 235 ts_dir.mkdir(parents=True) 237 236 (ts_dir / "audio.jsonl").write_text( 238 - '{"raw": "raw.flac"}\n' 239 - '{"start": "10:00:00", "text": "hello"}\n' 237 + '{"raw": "raw.flac"}\n{"start": "10:00:00", "text": "hello"}\n' 240 238 ) 241 239 242 240 # Create facet event file
+20 -10
tests/test_ollama.py
··· 680 680 self.run = AsyncMock(return_value="test result") 681 681 MockCLIRunner.last_instance = self 682 682 683 - with patch("shutil.which", return_value="/usr/bin/opencode"), \ 684 - patch("think.providers.ollama.CLIRunner", MockCLIRunner): 683 + with ( 684 + patch("shutil.which", return_value="/usr/bin/opencode"), 685 + patch("think.providers.ollama.CLIRunner", MockCLIRunner), 686 + ): 685 687 events = [] 686 688 asyncio.run( 687 689 provider.run_cogitate( ··· 717 719 self.run = AsyncMock(return_value="ok") 718 720 MockCLIRunner.last_instance = self 719 721 720 - with patch("shutil.which", return_value="/usr/bin/opencode"), \ 721 - patch("think.providers.ollama.CLIRunner", MockCLIRunner): 722 + with ( 723 + patch("shutil.which", return_value="/usr/bin/opencode"), 724 + patch("think.providers.ollama.CLIRunner", MockCLIRunner), 725 + ): 722 726 asyncio.run( 723 727 provider.run_cogitate( 724 728 {"prompt": "test", "model": "ollama-local/qwen3.5:35b-a3b-bf16"}, ··· 743 747 self.run = AsyncMock(return_value="ok") 744 748 MockCLIRunner.last_instance = self 745 749 746 - with patch("shutil.which", return_value="/usr/bin/opencode"), \ 747 - patch("think.providers.ollama.CLIRunner", MockCLIRunner): 750 + with ( 751 + patch("shutil.which", return_value="/usr/bin/opencode"), 752 + patch("think.providers.ollama.CLIRunner", MockCLIRunner), 753 + ): 748 754 asyncio.run( 749 755 provider.run_cogitate( 750 756 { ··· 774 780 self.run = AsyncMock(return_value="ok") 775 781 MockCLIRunner.last_instance = self 776 782 777 - with patch("shutil.which", return_value="/usr/bin/opencode"), \ 778 - patch("think.providers.ollama.CLIRunner", MockCLIRunner): 783 + with ( 784 + patch("shutil.which", return_value="/usr/bin/opencode"), 785 + patch("think.providers.ollama.CLIRunner", MockCLIRunner), 786 + ): 779 787 asyncio.run( 780 788 provider.run_cogitate( 781 789 { ··· 802 810 self.run = AsyncMock(side_effect=RuntimeError("CLI not found")) 803 811 804 812 events = [] 805 - with patch("shutil.which", return_value="/usr/bin/opencode"), \ 806 - patch("think.providers.ollama.CLIRunner", MockCLIRunner): 813 + with ( 814 + patch("shutil.which", return_value="/usr/bin/opencode"), 815 + patch("think.providers.ollama.CLIRunner", MockCLIRunner), 816 + ): 807 817 with pytest.raises(RuntimeError, match="CLI not found"): 808 818 asyncio.run( 809 819 provider.run_cogitate(
+3 -1
tests/test_password_cli.py
··· 20 20 def _mock_getpass(monkeypatch, *responses): 21 21 """Mock getpass.getpass to return successive responses.""" 22 22 it = iter(responses) 23 - monkeypatch.setattr("think.password_cli.getpass.getpass", lambda prompt="": next(it)) 23 + monkeypatch.setattr( 24 + "think.password_cli.getpass.getpass", lambda prompt="": next(it) 25 + ) 24 26 25 27 26 28 class TestSetPassword:
+10 -1
tests/test_retention_config_cli.py
··· 133 133 def test_clear_with_mode_rejected(journal_env): 134 134 result = runner.invoke( 135 135 call_app, 136 - ["journal", "retention", "config", "--stream", "plaud", "--clear", "--mode", "keep"], 136 + [ 137 + "journal", 138 + "retention", 139 + "config", 140 + "--stream", 141 + "plaud", 142 + "--clear", 143 + "--mode", 144 + "keep", 145 + ], 137 146 ) 138 147 139 148 assert result.exit_code == 1
+151 -26
tests/test_segment.py
··· 88 88 "20240101", 89 89 "default", 90 90 "090000_300", 91 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 91 + stream_json={ 92 + "stream": "default", 93 + "prev_day": None, 94 + "prev_segment": None, 95 + "seq": 1, 96 + }, 92 97 ) 93 98 _make_segment( 94 99 tmp_path, 95 100 "20240101", 96 101 "custom", 97 102 "100000_300", 98 - stream_json={"stream": "custom", "prev_day": None, "prev_segment": None, "seq": 1}, 103 + stream_json={ 104 + "stream": "custom", 105 + "prev_day": None, 106 + "prev_segment": None, 107 + "seq": 1, 108 + }, 99 109 ) 100 110 101 111 args = argparse.Namespace( ··· 115 125 "20240101", 116 126 "default", 117 127 "090000_300", 118 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 128 + stream_json={ 129 + "stream": "default", 130 + "prev_day": None, 131 + "prev_segment": None, 132 + "seq": 1, 133 + }, 119 134 agents=["audio.md"], 120 135 ) 121 136 ··· 150 165 "20240101", 151 166 "default", 152 167 "090000_300", 153 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 168 + stream_json={ 169 + "stream": "default", 170 + "prev_day": None, 171 + "prev_segment": None, 172 + "seq": 1, 173 + }, 154 174 agents=["audio.md"], 155 175 ) 156 176 ··· 200 220 "20240101", 201 221 "default", 202 222 "090000_300", 203 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 223 + stream_json={ 224 + "stream": "default", 225 + "prev_day": None, 226 + "prev_segment": None, 227 + "seq": 1, 228 + }, 204 229 agents=["audio.md"], 205 230 ) 206 231 ··· 223 248 "20240101", 224 249 "default", 225 250 "090000_300", 226 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 251 + stream_json={ 252 + "stream": "default", 253 + "prev_day": None, 254 + "prev_segment": None, 255 + "seq": 1, 256 + }, 227 257 ) 228 258 _make_segment( 229 259 tmp_path, ··· 267 297 "20240101", 268 298 "default", 269 299 "090000_300", 270 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 300 + stream_json={ 301 + "stream": "default", 302 + "prev_day": None, 303 + "prev_segment": None, 304 + "seq": 1, 305 + }, 271 306 screen=False, 272 307 ) 273 308 streams_dir = tmp_path / "streams" ··· 322 357 "20240101", 323 358 "default", 324 359 "090000_300", 325 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 360 + stream_json={ 361 + "stream": "default", 362 + "prev_day": None, 363 + "prev_segment": None, 364 + "seq": 1, 365 + }, 326 366 audio=False, 327 367 screen=False, 328 368 ) ··· 377 417 378 418 out = capsys.readouterr().out 379 419 assert excinfo.value.code == 1 380 - assert "FAIL backward chain: missing previous segment 20240101/default/090000_300" in out 420 + assert ( 421 + "FAIL backward chain: missing previous segment 20240101/default/090000_300" 422 + in out 423 + ) 381 424 382 425 383 426 def test_verify_day_mode(tmp_path, monkeypatch, capsys): ··· 387 430 "20240101", 388 431 "default", 389 432 "090000_300", 390 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 433 + stream_json={ 434 + "stream": "default", 435 + "prev_day": None, 436 + "prev_segment": None, 437 + "seq": 1, 438 + }, 391 439 ) 392 440 _make_segment( 393 441 tmp_path, ··· 427 475 "20240101", 428 476 "default", 429 477 "090000_300", 430 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 478 + stream_json={ 479 + "stream": "default", 480 + "prev_day": None, 481 + "prev_segment": None, 482 + "seq": 1, 483 + }, 431 484 ) 432 485 streams_dir = tmp_path / "streams" 433 486 streams_dir.mkdir() ··· 485 538 "20240101", 486 539 "default", 487 540 "090000_300", 488 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 541 + stream_json={ 542 + "stream": "default", 543 + "prev_day": None, 544 + "prev_segment": None, 545 + "seq": 1, 546 + }, 489 547 ) 490 548 streams_dir = tmp_path / "streams" 491 549 streams_dir.mkdir() ··· 521 579 "20240101", 522 580 "default", 523 581 "090000_300", 524 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 582 + stream_json={ 583 + "stream": "default", 584 + "prev_day": None, 585 + "prev_segment": None, 586 + "seq": 1, 587 + }, 525 588 ) 526 589 streams_dir = tmp_path / "streams" 527 590 streams_dir.mkdir() ··· 550 613 "20240101", 551 614 "default", 552 615 "090000_300", 553 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 616 + stream_json={ 617 + "stream": "default", 618 + "prev_day": None, 619 + "prev_segment": None, 620 + "seq": 1, 621 + }, 554 622 ) 555 623 streams_dir = tmp_path / "streams" 556 624 streams_dir.mkdir() ··· 581 649 "20240101", 582 650 "default", 583 651 "090000_300", 584 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 652 + stream_json={ 653 + "stream": "default", 654 + "prev_day": None, 655 + "prev_segment": None, 656 + "seq": 1, 657 + }, 585 658 ) 586 659 _make_segment( 587 660 tmp_path, 588 661 "20240115", 589 662 "default", 590 663 "090000_300", 591 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 664 + stream_json={ 665 + "stream": "default", 666 + "prev_day": None, 667 + "prev_segment": None, 668 + "seq": 1, 669 + }, 592 670 ) 593 671 streams_dir = tmp_path / "streams" 594 672 streams_dir.mkdir() ··· 619 697 "20240101", 620 698 "default", 621 699 "090000_300", 622 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 700 + stream_json={ 701 + "stream": "default", 702 + "prev_day": None, 703 + "prev_segment": None, 704 + "seq": 1, 705 + }, 623 706 audio=False, 624 707 screen=False, 625 708 ) ··· 651 734 "20240101", 652 735 "default", 653 736 "090000_300", 654 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 737 + stream_json={ 738 + "stream": "default", 739 + "prev_day": None, 740 + "prev_segment": None, 741 + "seq": 1, 742 + }, 655 743 ) 656 744 _make_segment( 657 745 tmp_path, ··· 697 785 "20240101", 698 786 "default", 699 787 "090000_300", 700 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 788 + stream_json={ 789 + "stream": "default", 790 + "prev_day": None, 791 + "prev_segment": None, 792 + "seq": 1, 793 + }, 701 794 ) 702 795 _make_segment( 703 796 tmp_path, ··· 739 832 "20240101", 740 833 "default", 741 834 "090000_300", 742 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 835 + stream_json={ 836 + "stream": "default", 837 + "prev_day": None, 838 + "prev_segment": None, 839 + "seq": 1, 840 + }, 743 841 audio=False, 744 842 screen=False, 745 843 ) 746 844 events = [ 747 - {"tract": "observe", "event": "start", "day": "20240101", "segment": "090000_300"}, 845 + { 846 + "tract": "observe", 847 + "event": "start", 848 + "day": "20240101", 849 + "segment": "090000_300", 850 + }, 748 851 {"tract": "dream", "event": "done", "day": "20240101", "segment": "090000_300"}, 749 852 ] 750 - (seg_dir / "events.jsonl").write_text("\n".join(json.dumps(e) for e in events) + "\n") 853 + (seg_dir / "events.jsonl").write_text( 854 + "\n".join(json.dumps(e) for e in events) + "\n" 855 + ) 751 856 streams_dir = tmp_path / "streams" 752 857 streams_dir.mkdir() 753 858 (streams_dir / "default.json").write_text( ··· 782 887 "20240101", 783 888 "default", 784 889 "090000_300", 785 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 890 + stream_json={ 891 + "stream": "default", 892 + "prev_day": None, 893 + "prev_segment": None, 894 + "seq": 1, 895 + }, 786 896 ) 787 897 streams_dir = tmp_path / "streams" 788 898 streams_dir.mkdir() ··· 811 921 "20240101", 812 922 "default", 813 923 "090000_300", 814 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 924 + stream_json={ 925 + "stream": "default", 926 + "prev_day": None, 927 + "prev_segment": None, 928 + "seq": 1, 929 + }, 815 930 ) 816 931 817 932 args = argparse.Namespace( ··· 835 950 "20240101", 836 951 "default", 837 952 "090000_300", 838 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 953 + stream_json={ 954 + "stream": "default", 955 + "prev_day": None, 956 + "prev_segment": None, 957 + "seq": 1, 958 + }, 839 959 ) 840 960 841 961 args = argparse.Namespace( ··· 859 979 "20240101", 860 980 "default", 861 981 "090000_300", 862 - stream_json={"stream": "default", "prev_day": None, "prev_segment": None, "seq": 1}, 982 + stream_json={ 983 + "stream": "default", 984 + "prev_day": None, 985 + "prev_segment": None, 986 + "seq": 1, 987 + }, 863 988 ) 864 989 865 990 args = argparse.Namespace(
+4 -6
tests/test_segment_ingest.py
··· 461 461 assert (segment_dir / "extra.txt").read_bytes() == b"keep me" 462 462 state_data = _read_state(env["key_prefix"]) 463 463 assert "laptop/143022_300" in state_data["20260413"] 464 - assert state_data["20260413"]["laptop/143022_300"]["files"][0]["name"] == "audio.flac" 464 + assert ( 465 + state_data["20260413"]["laptop/143022_300"]["files"][0]["name"] == "audio.flac" 466 + ) 465 467 466 468 467 469 def test_ingest_stats_update(ingest_env): ··· 548 550 state_data = _read_state(env["key_prefix"]) 549 551 assert "_default/143022_300" in state_data["20260413"] 550 552 assert ( 551 - env["root"] 552 - / "20260413" 553 - / "_default" 554 - / "143022_300" 555 - / "transcript.jsonl" 553 + env["root"] / "20260413" / "_default" / "143022_300" / "transcript.jsonl" 556 554 ).read_bytes() == b'{"text":"default"}\n' 557 555 558 556
+1
tests/test_sol.py
··· 258 258 assert "--day" in captured_argv 259 259 assert "20250101" in captured_argv 260 260 261 + 261 262 class TestCommandRegistry: 262 263 """Tests for command registry completeness.""" 263 264
+6 -4
tests/test_stats_contract.py
··· 150 150 output = _scan_output(journal, stats_mod) 151 151 152 152 for python_path, _ in CONTRACT_FIELDS: 153 - assert _resolve_path(output, python_path), f"{python_path} missing from stats output" 153 + assert _resolve_path(output, python_path), ( 154 + f"{python_path} missing from stats output" 155 + ) 154 156 155 157 156 158 def test_contract_fields_referenced_in_js(): 157 159 js_source = JS_PATH.read_text() 158 160 159 161 for _, js_ref in CONTRACT_FIELDS: 160 - assert ( 161 - js_ref in js_source 162 - ), f"{js_ref} not found in dashboard.js — contract field may be stale" 162 + assert js_ref in js_source, ( 163 + f"{js_ref} not found in dashboard.js — contract field may be stale" 164 + ) 163 165 164 166 165 167 def test_all_day_fields_have_nonzero_values(tmp_path, monkeypatch):
+6 -7
tests/test_stats_schema.py
··· 18 18 ts_dir = day / "default" / "123456_300" 19 19 ts_dir.mkdir(parents=True) 20 20 (ts_dir / "audio.jsonl").write_text( 21 - '{"raw": "raw.flac"}\n' 22 - '{"start": "10:00:00", "text": "hello"}\n' 21 + '{"raw": "raw.flac"}\n{"start": "10:00:00", "text": "hello"}\n' 23 22 ) 24 23 25 24 monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) ··· 87 86 ts_dir = day / "default" / "123456_300" 88 87 ts_dir.mkdir(parents=True) 89 88 (ts_dir / "audio.jsonl").write_text( 90 - '{"raw": "raw.flac"}\n' 91 - '{"start": "10:00:00", "text": "hello"}\n' 89 + '{"raw": "raw.flac"}\n{"start": "10:00:00", "text": "hello"}\n' 92 90 ) 93 91 (ts_dir / "screen.jsonl").write_text( 94 - '{"header": true}\n' 95 - '{"frame_id": 1, "timestamp": "10:00:00"}\n' 92 + '{"header": true}\n{"frame_id": 1, "timestamp": "10:00:00"}\n' 96 93 ) 97 94 98 95 monkeypatch.setenv("_SOLSTONE_JOURNAL_OVERRIDE", str(journal)) ··· 101 98 102 99 stats = day_data["stats"] 103 100 for field in schema_mod.DAY_FIELDS: 104 - assert field in stats, f"DAY_FIELDS field '{field}' missing from scan_day output" 101 + assert field in stats, ( 102 + f"DAY_FIELDS field '{field}' missing from scan_day output" 103 + )
+12 -3
tests/test_system_status.py
··· 88 88 def test_revoked_observers_excluded(self, client): 89 89 now = int(time.time() * 1000) 90 90 observers = [ 91 - {"name": "phone", "last_seen": now - 5000, "enabled": True, "revoked": True}, 91 + { 92 + "name": "phone", 93 + "last_seen": now - 5000, 94 + "enabled": True, 95 + "revoked": True, 96 + }, 92 97 ] 93 98 with patch.object(system_mod, "list_observers", return_value=observers): 94 99 data = client.get("/api/system/status").get_json() ··· 121 126 def test_version_with_update_available(self, client): 122 127 with ( 123 128 patch.object(system_mod, "list_observers", return_value=[]), 124 - patch.object(system_mod, "_check_latest_version", return_value={"latest": "99.0.0"}), 129 + patch.object( 130 + system_mod, "_check_latest_version", return_value={"latest": "99.0.0"} 131 + ), 125 132 patch.object(system_mod, "collect_version", return_value="0.1.0"), 126 133 ): 127 134 data = client.get("/api/system/status").get_json() ··· 134 141 """Unit tests for _get_capture_health logic.""" 135 142 136 143 def test_no_last_seen_is_offline(self): 137 - with patch.object(system_mod, "list_observers", return_value=[{"name": "x", "enabled": True}]): 144 + with patch.object( 145 + system_mod, "list_observers", return_value=[{"name": "x", "enabled": True}] 146 + ): 138 147 result = system_mod._get_capture_health() 139 148 assert result["observers"][0]["status"] == "offline" 140 149
+31 -13
tests/test_transfer.py
··· 572 572 ) 573 573 574 574 with patch("observe.transfer.requests.Session", return_value=mock_session): 575 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 575 + send_segments( 576 + "https://example.com", "test-key", ["20250103"], dry_run=False 577 + ) 576 578 577 579 assert mock_session.post.call_count == 0 578 580 output = capsys.readouterr().out ··· 591 593 ) 592 594 593 595 with patch("observe.transfer.requests.Session", return_value=mock_session): 594 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 596 + send_segments( 597 + "https://example.com", "test-key", ["20250103"], dry_run=False 598 + ) 595 599 596 600 assert mock_session.post.call_count == 1 597 601 post_kwargs = mock_session.post.call_args.kwargs ··· 600 604 assert json.loads(post_kwargs["data"]["meta"]) == {"stream": "default"} 601 605 # Auth is set on the session, not per-request 602 606 assert mock_session.headers["Authorization"] == "Bearer test-key" 603 - assert "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" in ( 604 - capsys.readouterr().out 607 + assert ( 608 + "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" 609 + in (capsys.readouterr().out) 605 610 ) 606 611 607 612 def test_send_retry_on_5xx(self, tmp_path, monkeypatch, capsys): ··· 621 626 patch("observe.transfer.requests.Session", return_value=mock_session), 622 627 patch("observe.transfer.time.sleep"), 623 628 ): 624 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 629 + send_segments( 630 + "https://example.com", "test-key", ["20250103"], dry_run=False 631 + ) 625 632 626 633 assert mock_session.post.call_count == 3 627 - assert "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" in ( 628 - capsys.readouterr().out 634 + assert ( 635 + "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" 636 + in (capsys.readouterr().out) 629 637 ) 630 638 631 639 def test_send_auth_error(self): ··· 671 679 mock_session.get.side_effect = [first_get, second_get] 672 680 673 681 with patch("observe.transfer.requests.Session", return_value=mock_session): 674 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 675 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 682 + send_segments( 683 + "https://example.com", "test-key", ["20250103"], dry_run=False 684 + ) 685 + send_segments( 686 + "https://example.com", "test-key", ["20250103"], dry_run=False 687 + ) 676 688 677 689 assert mock_session.post.call_count == 1 678 690 output = capsys.readouterr().out 679 - assert "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" in ( 680 - output 691 + assert ( 692 + "Transfer complete: 1 sent, 0 skipped, 0 failed, 100 bytes transferred" 693 + in (output) 681 694 ) 682 - assert "Transfer complete: 0 sent, 1 skipped, 0 failed, 0 bytes transferred" in output 695 + assert ( 696 + "Transfer complete: 0 sent, 1 skipped, 0 failed, 0 bytes transferred" 697 + in output 698 + ) 683 699 684 700 def test_send_excludes_stream_json(self, tmp_path, monkeypatch): 685 701 from observe.transfer import send_segments ··· 690 706 mock_session = self._make_session(get_json=[]) 691 707 692 708 with patch("observe.transfer.requests.Session", return_value=mock_session): 693 - send_segments("https://example.com", "test-key", ["20250103"], dry_run=False) 709 + send_segments( 710 + "https://example.com", "test-key", ["20250103"], dry_run=False 711 + ) 694 712 695 713 files_arg = mock_session.post.call_args.kwargs["files"] 696 714 uploaded_names = [entry[1][0] for entry in files_arg]
+21 -17
tests/test_validate_key.py
··· 140 140 import json 141 141 142 142 sa_file = tmp_path / "sa.json" 143 - sa_file.write_text(json.dumps({ 144 - "type": "service_account", 145 - "project_id": "test-project", 146 - "client_email": "test@project.iam.gserviceaccount.com", 147 - "private_key": "fake", 148 - })) 143 + sa_file.write_text( 144 + json.dumps( 145 + { 146 + "type": "service_account", 147 + "project_id": "test-project", 148 + "client_email": "test@project.iam.gserviceaccount.com", 149 + "private_key": "fake", 150 + } 151 + ) 152 + ) 149 153 150 154 client = Mock() 151 155 client.models.list.return_value = [Mock()] ··· 154 158 mock_creds.service_account_email = "test@project.iam.gserviceaccount.com" 155 159 156 160 with ( 157 - patch( 158 - "think.providers.google.genai.Client", return_value=client 159 - ) as mock_cls, 161 + patch("think.providers.google.genai.Client", return_value=client) as mock_cls, 160 162 patch( 161 163 "google.oauth2.service_account.Credentials.from_service_account_file", 162 164 return_value=mock_creds, ··· 378 380 """PUT/GET vertex_credentials saves file and returns email.""" 379 381 client, journal = settings_client 380 382 381 - sa_json = json.dumps({ 382 - "type": "service_account", 383 - "project_id": "test-project", 384 - "client_email": "test@test-project.iam.gserviceaccount.com", 385 - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nfake\n-----END RSA PRIVATE KEY-----\n", 386 - "client_id": "123", 387 - "token_uri": "https://oauth2.googleapis.com/token", 388 - }) 383 + sa_json = json.dumps( 384 + { 385 + "type": "service_account", 386 + "project_id": "test-project", 387 + "client_email": "test@test-project.iam.gserviceaccount.com", 388 + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nfake\n-----END RSA PRIVATE KEY-----\n", 389 + "client_id": "123", 390 + "token_uri": "https://oauth2.googleapis.com/token", 391 + } 392 + ) 389 393 390 394 # Mock validation (don't actually call Google API) 391 395 with patch(
+1 -2
tests/verify_api.py
··· 421 421 and isinstance(item_value, (int, float)) 422 422 else ( 423 423 "<TIMESTAMP>" 424 - if item_key == "generated_at" 425 - and isinstance(item_value, str) 424 + if item_key == "generated_at" and isinstance(item_value, str) 426 425 else ( 427 426 round(item_value, 1) 428 427 if item_key in {"score", "recency"}
+9 -2
think/agents.py
··· 55 55 # Minimum content length for transcript-based generation 56 56 MIN_INPUT_CHARS = 50 57 57 58 + 58 59 def setup_logging(verbose: bool = False) -> logging.Logger: 59 60 """Configure logging for agent CLI.""" 60 61 level = logging.DEBUG if verbose else logging.INFO ··· 1183 1184 1184 1185 result = validate_key(provider_name, "") 1185 1186 if not result.get("valid"): 1186 - return "skip", f"Ollama not reachable ({result.get('error', 'unreachable')})" 1187 + return ( 1188 + "skip", 1189 + f"Ollama not reachable ({result.get('error', 'unreachable')})", 1190 + ) 1187 1191 1188 1192 try: 1189 1193 module = get_provider_module(provider_name) ··· 1235 1239 1236 1240 result = validate_key(provider_name, "") 1237 1241 if not result.get("valid"): 1238 - return "skip", f"Ollama not reachable ({result.get('error', 'unreachable')})" 1242 + return ( 1243 + "skip", 1244 + f"Ollama not reachable ({result.get('error', 'unreachable')})", 1245 + ) 1239 1246 1240 1247 # Pre-flight: check cogitate CLI binary is installed 1241 1248 binary = PROVIDER_METADATA[provider_name].get("cogitate_cli", "")
-1
think/conversation.py
··· 390 390 # Optionally, remove the old 'muse' key if it's no longer needed in the normalized dict 391 391 # del ex["muse"] 392 392 return ex 393 -
+3 -3
think/cortex.py
··· 424 424 ) 425 425 provider = event.get("provider") 426 426 if provider: 427 - self.agent_requests[agent.agent_id]["provider"] = ( 428 - provider 429 - ) 427 + self.agent_requests[agent.agent_id][ 428 + "provider" 429 + ] = provider 430 430 431 431 # Handle finish or error event 432 432 if event.get("event") in ["finish", "error"]:
+96 -62
think/merge.py
··· 52 52 53 53 _merge_segments(source, target, summary, dry_run, log_path=log_path) 54 54 _merge_entities( 55 - source, summary, dry_run, target_entities, 56 - log_path=log_path, staging_path=staging_path, 55 + source, 56 + summary, 57 + dry_run, 58 + target_entities, 59 + log_path=log_path, 60 + staging_path=staging_path, 57 61 ) 58 62 _merge_facets(source, target, summary, dry_run, log_path=log_path) 59 63 _merge_imports(source, target, summary, dry_run, log_path=log_path) ··· 457 461 item_id = item.get("id", "") 458 462 log_id = f"{facet_name}/entities/{source_det_file.name}/{item_id}" 459 463 if item_id in seen_ids: 460 - _log_decision(log_path, { 461 - "action": "facet_detected_entity_merged", 462 - "item_type": "facet_detected_entity", 463 - "item_id": log_id, 464 - "reason": "duplicate_skip", 465 - }) 464 + _log_decision( 465 + log_path, 466 + { 467 + "action": "facet_detected_entity_merged", 468 + "item_type": "facet_detected_entity", 469 + "item_id": log_id, 470 + "reason": "duplicate_skip", 471 + }, 472 + ) 466 473 else: 467 474 new_items.append(item) 468 - _log_decision(log_path, { 469 - "action": "facet_detected_entity_merged", 470 - "item_type": "facet_detected_entity", 471 - "item_id": log_id, 472 - "reason": "appended", 473 - }) 475 + _log_decision( 476 + log_path, 477 + { 478 + "action": "facet_detected_entity_merged", 479 + "item_type": "facet_detected_entity", 480 + "item_id": log_id, 481 + "reason": "appended", 482 + }, 483 + ) 474 484 if new_items and not dry_run: 475 485 _append_jsonl(target_det_file, new_items) 476 486 except Exception as exc: ··· 489 499 for item in _read_jsonl(source_todo_file): 490 500 log_id = f"{facet_name}/todos/{source_todo_file.name}/{item.get('text', '')}" 491 501 if (item["text"], item.get("created_at")) in seen: 492 - _log_decision(log_path, { 493 - "action": "facet_todo_merged", 494 - "item_type": "todo", 495 - "item_id": log_id, 496 - "reason": "duplicate_skip", 497 - }) 502 + _log_decision( 503 + log_path, 504 + { 505 + "action": "facet_todo_merged", 506 + "item_type": "todo", 507 + "item_id": log_id, 508 + "reason": "duplicate_skip", 509 + }, 510 + ) 498 511 else: 499 512 new_items.append(item) 500 - _log_decision(log_path, { 501 - "action": "facet_todo_merged", 502 - "item_type": "todo", 503 - "item_id": log_id, 504 - "reason": "appended", 505 - }) 513 + _log_decision( 514 + log_path, 515 + { 516 + "action": "facet_todo_merged", 517 + "item_type": "todo", 518 + "item_id": log_id, 519 + "reason": "appended", 520 + }, 521 + ) 506 522 if new_items and not dry_run: 507 523 _append_jsonl(target_todo_file, new_items) 508 524 except Exception as exc: ··· 523 539 for item in _read_jsonl(source_calendar_file): 524 540 log_id = f"{facet_name}/calendar/{source_calendar_file.name}/{item.get('title', '')}" 525 541 if (item["title"], item.get("start")) in seen: 526 - _log_decision(log_path, { 527 - "action": "facet_calendar_merged", 528 - "item_type": "calendar", 529 - "item_id": log_id, 530 - "reason": "duplicate_skip", 531 - }) 542 + _log_decision( 543 + log_path, 544 + { 545 + "action": "facet_calendar_merged", 546 + "item_type": "calendar", 547 + "item_id": log_id, 548 + "reason": "duplicate_skip", 549 + }, 550 + ) 532 551 else: 533 552 new_items.append(item) 534 - _log_decision(log_path, { 535 - "action": "facet_calendar_merged", 536 - "item_type": "calendar", 537 - "item_id": log_id, 538 - "reason": "appended", 539 - }) 553 + _log_decision( 554 + log_path, 555 + { 556 + "action": "facet_calendar_merged", 557 + "item_type": "calendar", 558 + "item_id": log_id, 559 + "reason": "appended", 560 + }, 561 + ) 540 562 if new_items and not dry_run: 541 563 _append_jsonl(target_calendar_file, new_items) 542 564 except Exception as exc: ··· 557 579 for item in source_config: 558 580 log_id = f"{facet_name}/activities/{item.get('id', '')}" 559 581 if item.get("id") in existing_ids: 560 - _log_decision(log_path, { 561 - "action": "facet_activities_config_merged", 562 - "item_type": "activity_config", 563 - "item_id": log_id, 564 - "reason": "duplicate_skip", 565 - }) 582 + _log_decision( 583 + log_path, 584 + { 585 + "action": "facet_activities_config_merged", 586 + "item_type": "activity_config", 587 + "item_id": log_id, 588 + "reason": "duplicate_skip", 589 + }, 590 + ) 566 591 else: 567 592 new_config.append(item) 568 - _log_decision(log_path, { 569 - "action": "facet_activities_config_merged", 570 - "item_type": "activity_config", 571 - "item_id": log_id, 572 - "reason": "appended", 573 - }) 593 + _log_decision( 594 + log_path, 595 + { 596 + "action": "facet_activities_config_merged", 597 + "item_type": "activity_config", 598 + "item_id": log_id, 599 + "reason": "appended", 600 + }, 601 + ) 574 602 if new_config and not dry_run: 575 603 _append_jsonl(target_config_file, new_config) 576 604 except Exception as exc: ··· 588 616 for item in source_records: 589 617 log_id = f"{facet_name}/activities/{source_day_file.name}/{item.get('id', '')}" 590 618 if item.get("id") in existing_ids: 591 - _log_decision(log_path, { 592 - "action": "facet_activities_record_merged", 593 - "item_type": "activity_record", 594 - "item_id": log_id, 595 - "reason": "duplicate_skip", 596 - }) 619 + _log_decision( 620 + log_path, 621 + { 622 + "action": "facet_activities_record_merged", 623 + "item_type": "activity_record", 624 + "item_id": log_id, 625 + "reason": "duplicate_skip", 626 + }, 627 + ) 597 628 else: 598 629 new_records.append(item) 599 - _log_decision(log_path, { 600 - "action": "facet_activities_record_merged", 601 - "item_type": "activity_record", 602 - "item_id": log_id, 603 - "reason": "appended", 604 - }) 630 + _log_decision( 631 + log_path, 632 + { 633 + "action": "facet_activities_record_merged", 634 + "item_type": "activity_record", 635 + "item_id": log_id, 636 + "reason": "appended", 637 + }, 638 + ) 605 639 if new_records and not dry_run: 606 640 _append_jsonl(target_day_file, new_records) 607 641 except Exception as exc:
+1 -3
think/providers/google.py
··· 128 128 config = get_config() 129 129 providers_config = config.get("providers", {}) 130 130 131 - http_options = types.HttpOptions( 132 - retry_options=types.HttpRetryOptions(attempts=8) 133 - ) 131 + http_options = types.HttpOptions(retry_options=types.HttpRetryOptions(attempts=8)) 134 132 135 133 api_key = os.getenv("GOOGLE_API_KEY") 136 134
+15 -5
think/segment.py
··· 521 521 ) 522 522 raise SystemExit(1) 523 523 524 - succ_day, succ_seg, succ_path = _find_successor_segment(src_day, stream, src_segment) 524 + succ_day, succ_seg, succ_path = _find_successor_segment( 525 + src_day, stream, src_segment 526 + ) 525 527 526 528 events_path = src_dir / "events.jsonl" 527 529 events_count = 0 ··· 558 560 559 561 rewritten = _rewrite_events_jsonl(dst_dir, to_day, new_segment) 560 562 if rewritten: 561 - print(f" rewrote {rewritten} events.jsonl lines (day: {src_day}->{to_day}, segment: {src_segment}->{new_segment})") 563 + print( 564 + f" rewrote {rewritten} events.jsonl lines (day: {src_day}->{to_day}, segment: {src_segment}->{new_segment})" 565 + ) 562 566 elif verbose: 563 567 print(" no events.jsonl to rewrite") 564 568 ··· 576 580 print(f" patched successor {succ_day}/{stream}/{succ_seg}") 577 581 if verbose: 578 582 print(f" prev_day: {succ_marker.get('prev_day')} -> {to_day}") 579 - print(f" prev_segment: {succ_marker.get('prev_segment')} -> {new_segment}") 583 + print( 584 + f" prev_segment: {succ_marker.get('prev_segment')} -> {new_segment}" 585 + ) 580 586 elif verbose: 581 587 print(" no successor to patch (stream tail)") 582 588 583 589 summary = rebuild_stream_state(stream) 584 590 print(f" rebuilt stream state: {stream}") 585 591 if verbose: 586 - print(f" scanned {summary['segments_scanned']} segments, rebuilt {len(summary['rebuilt'])} stream(s)") 592 + print( 593 + f" scanned {summary['segments_scanned']} segments, rebuilt {len(summary['rebuilt'])} stream(s)" 594 + ) 587 595 588 596 if index_info["available"]: 589 597 deleted = _delete_index_rows(journal, old_rel) 590 598 if any(deleted.values()) or verbose: 591 - print(f" deleted index rows: chunks={deleted['chunks']}, files={deleted['files']}, entities={deleted['entities']}, signals={deleted['entity_signals']}") 599 + print( 600 + f" deleted index rows: chunks={deleted['chunks']}, files={deleted['files']}, entities={deleted['entities']}, signals={deleted['entity_signals']}" 601 + ) 592 602 new_rel = f"{to_day}/{stream}/{new_segment}" 593 603 indexed = _reindex_segment(journal, dst_dir) 594 604 print(f" re-indexed: {indexed} files at {new_rel}")
+3 -1
think/stats_schema.py
··· 52 52 if "schema_version" not in data: 53 53 errors.append("missing 'schema_version'") 54 54 elif data["schema_version"] != SCHEMA_VERSION: 55 - errors.append(f"schema_version is {data['schema_version']}, expected {SCHEMA_VERSION}") 55 + errors.append( 56 + f"schema_version is {data['schema_version']}, expected {SCHEMA_VERSION}" 57 + ) 56 58 57 59 # Check generated_at 58 60 if "generated_at" not in data: