personal memory agent
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

CPO: cluster layer semantic rename hop 2 — screen → percepts

Rename the screen source type to percepts across the cluster layer,
muse configs, CLI, importers, stats, and all tests. The entry prefix
changes from "screen" to "percept" (singular, matching "transcript").

The rendering header "### Screen Activity" is unchanged — it describes
the content origin, not the source type. Agent filter dicts like
{"agents": {"screen": true}} are also unchanged as they reference
agent output filenames.

Also updates inline muse configs in test_generate_full.py and
test_output_hooks.py from old "audio"/"screen" keys to
"transcripts"/"percepts" (audio was stale since hop 1).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

+154 -137
+12 -10
apps/transcripts/call.py
··· 46 46 else: 47 47 typer.echo(" (none)") 48 48 49 - typer.echo("Screen:") 49 + typer.echo("Percepts:") 50 50 if screen_ranges: 51 51 for start, end in screen_ranges: 52 52 typer.echo(f" {start} - {end}") ··· 94 94 raw: bool = typer.Option(False, "--raw", help="Include transcripts and screen only."), 95 95 transcripts: bool = typer.Option(False, "--transcripts", help="Include transcript content."), 96 96 audio: bool = typer.Option(False, "--audio", help="Alias for --transcripts.", hidden=True), 97 - screen: bool = typer.Option(False, "--screen", help="Include screen transcripts."), 97 + percepts: bool = typer.Option(False, "--percepts", help="Include screen percepts."), 98 + screen: bool = typer.Option(False, "--screen", help="Alias for --percepts.", hidden=True), 98 99 agents: bool = typer.Option(False, "--agents", help="Include agent outputs."), 99 100 max_bytes: int = typer.Option( 100 101 16384, "--max", help="Max output bytes (0 = unlimited)." ··· 104 105 day = resolve_sol_day(day) 105 106 segment = resolve_sol_segment(segment) 106 107 stream = stream or get_sol_stream() 107 - # --audio is an alias for --transcripts 108 + # --audio is an alias for --transcripts, --screen is an alias for --percepts 108 109 transcripts = transcripts or audio 110 + percepts = percepts or screen 109 111 110 112 if full and raw: 111 113 typer.echo("Error: Cannot use --full and --raw together.", err=True) 112 114 raise typer.Exit(1) 113 115 114 - if (full or raw) and (transcripts or screen or agents): 116 + if (full or raw) and (transcripts or percepts or agents): 115 117 typer.echo( 116 118 "Error: Cannot mix --full/--raw with individual source flags.", err=True 117 119 ) 118 120 raise typer.Exit(1) 119 121 120 122 if full: 121 - sources: dict[str, bool] = {"transcripts": True, "screen": True, "agents": True} 123 + sources: dict[str, bool] = {"transcripts": True, "percepts": True, "agents": True} 122 124 elif raw: 123 - sources = {"transcripts": True, "screen": True, "agents": False} 124 - elif transcripts or screen or agents: 125 - sources = {"transcripts": transcripts, "screen": screen, "agents": agents} 125 + sources = {"transcripts": True, "percepts": True, "agents": False} 126 + elif transcripts or percepts or agents: 127 + sources = {"transcripts": transcripts, "percepts": percepts, "agents": agents} 126 128 else: 127 - sources = {"transcripts": True, "screen": False, "agents": True} 129 + sources = {"transcripts": True, "percepts": False, "agents": True} 128 130 129 131 if segment and (start or length is not None): 130 132 typer.echo("Error: Cannot mix --segment with --start/--length.", err=True) ··· 158 160 transcript_ranges, screen_ranges = cluster_scan(day) 159 161 if transcript_ranges or screen_ranges: 160 162 days_with_data += 1 161 - typer.echo(f"{day} transcripts:{len(transcript_ranges)} screen:{len(screen_ranges)}") 163 + typer.echo(f"{day} transcripts:{len(transcript_ranges)} percepts:{len(screen_ranges)}") 162 164 163 165 if not days_with_data: 164 166 typer.echo(f"No data for {month}.")
+1 -1
apps/transcripts/tests/test_call.py
··· 15 15 result = runner.invoke(call_app, ["transcripts", "scan", "20240101"]) 16 16 assert result.exit_code == 0 17 17 assert "Transcripts:" in result.output 18 - assert "Screen:" in result.output 18 + assert "Percepts:" in result.output 19 19 20 20 def test_scan_empty_day(self): 21 21 result = runner.invoke(call_app, ["transcripts", "scan", "20990101"])
+1 -1
muse/activities.md
··· 12 12 "thinking_budget": 4096, 13 13 "max_output_tokens": 2048, 14 14 "instructions": { 15 - "sources": {"transcripts": false, "screen": false, "agents": false}, 15 + "sources": {"transcripts": false, "percepts": false, "agents": false}, 16 16 "facets": false 17 17 } 18 18
+1 -1
muse/activity.md
··· 8 8 "priority": 10, 9 9 "output": "md", 10 10 "instructions": { 11 - "sources": {"transcripts": true, "screen": true, "agents": false}, 11 + "sources": {"transcripts": true, "percepts": true, "agents": false}, 12 12 "facets": true 13 13 } 14 14
+1 -1
muse/activity_state.md
··· 13 13 "thinking_budget": 4096, 14 14 "max_output_tokens": 3072, 15 15 "instructions": { 16 - "sources": {"transcripts": true, "screen": true, "agents": false}, 16 + "sources": {"transcripts": true, "percepts": true, "agents": false}, 17 17 "facets": true 18 18 } 19 19
+1 -1
muse/daily_schedule.md
··· 11 11 "thinking_budget": 4096, 12 12 "max_output_tokens": 512, 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/decisions.md
··· 11 11 "priority": 10, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true, 16 16 "activity": true 17 17 }
+1 -1
muse/documentation.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/entities.md
··· 11 11 "max_output_tokens": 1024, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": true, "agents": false}, 14 + "sources": {"transcripts": true, "percepts": true, "agents": false}, 15 15 "facets": false 16 16 } 17 17
+1 -1
muse/facets.md
··· 11 11 "max_output_tokens": 512, 12 12 "output": "json", 13 13 "instructions": { 14 - "sources": {"transcripts": false, "screen": false, "agents": true}, 14 + "sources": {"transcripts": false, "percepts": false, "agents": true}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/files.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/flow.md
··· 10 10 "priority": 10, 11 11 "output": "md", 12 12 "instructions": { 13 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 13 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 14 14 "facets": true 15 15 } 16 16
+1 -1
muse/followups.md
··· 11 11 "priority": 10, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true, 16 16 "activity": true 17 17 }
+1 -1
muse/knowledge_graph.md
··· 10 10 "priority": 10, 11 11 "output": "md", 12 12 "instructions": { 13 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 13 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 14 14 "facets": true 15 15 } 16 16
+1 -1
muse/media.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/meetings.md
··· 11 11 "priority": 10, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true, 16 16 "activity": true 17 17 }
+1 -1
muse/messaging.md
··· 11 11 "priority": 10, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true, 16 16 "activity": true 17 17 }
+1 -1
muse/observation.md
··· 10 10 "thinking_budget": 2048, 11 11 "max_output_tokens": 2048, 12 12 "instructions": { 13 - "sources": {"transcripts": true, "screen": true, "agents": false} 13 + "sources": {"transcripts": true, "percepts": true, "agents": false} 14 14 } 15 15 } 16 16
+1 -1
muse/opportunities.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/research.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+1 -1
muse/schedule.md
··· 9 9 "priority": 10, 10 10 "output": "md", 11 11 "instructions": { 12 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}} 12 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}} 13 13 } 14 14 15 15 }
+1 -1
muse/screen.md
··· 8 8 "priority": 10, 9 9 "output": "md", 10 10 "instructions": { 11 - "sources": {"transcripts": true, "screen": "required", "agents": false} 11 + "sources": {"transcripts": true, "percepts": "required", "agents": false} 12 12 } 13 13 14 14 }
+1 -1
muse/speakers.md
··· 8 8 "output": "json", 9 9 "color": "#e64a19", 10 10 "instructions": { 11 - "sources": {"transcripts": "required", "screen": true, "agents": false} 11 + "sources": {"transcripts": "required", "percepts": true, "agents": false} 12 12 } 13 13 14 14 }
+1 -1
muse/timeline.md
··· 10 10 "priority": 10, 11 11 "output": "md", 12 12 "instructions": { 13 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}} 13 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}} 14 14 } 15 15 16 16 }
+1 -1
muse/tools.md
··· 11 11 "disabled": true, 12 12 "output": "md", 13 13 "instructions": { 14 - "sources": {"transcripts": true, "screen": false, "agents": {"screen": true}}, 14 + "sources": {"transcripts": true, "percepts": false, "agents": {"screen": true}}, 15 15 "facets": true 16 16 } 17 17
+17 -17
tests/baselines/api/stats/stats.json
··· 12 12 "facets": false, 13 13 "sources": { 14 14 "agents": false, 15 - "screen": false, 15 + "percepts": false, 16 16 "transcripts": false 17 17 } 18 18 }, ··· 35 35 "facets": true, 36 36 "sources": { 37 37 "agents": false, 38 - "screen": true, 38 + "percepts": true, 39 39 "transcripts": true 40 40 } 41 41 }, ··· 59 59 "facets": true, 60 60 "sources": { 61 61 "agents": false, 62 - "screen": true, 62 + "percepts": true, 63 63 "transcripts": true 64 64 } 65 65 }, ··· 89 89 "agents": { 90 90 "screen": true 91 91 }, 92 - "screen": false, 92 + "percepts": false, 93 93 "transcripts": true 94 94 } 95 95 }, ··· 120 120 "agents": { 121 121 "screen": true 122 122 }, 123 - "screen": false, 123 + "percepts": false, 124 124 "transcripts": true 125 125 } 126 126 }, ··· 144 144 "facets": false, 145 145 "sources": { 146 146 "agents": false, 147 - "screen": true, 147 + "percepts": true, 148 148 "transcripts": true 149 149 } 150 150 }, ··· 166 166 "facets": true, 167 167 "sources": { 168 168 "agents": true, 169 - "screen": false, 169 + "percepts": false, 170 170 "transcripts": false 171 171 } 172 172 }, ··· 194 194 "agents": { 195 195 "screen": true 196 196 }, 197 - "screen": false, 197 + "percepts": false, 198 198 "transcripts": true 199 199 } 200 200 }, ··· 224 224 "agents": { 225 225 "screen": true 226 226 }, 227 - "screen": false, 227 + "percepts": false, 228 228 "transcripts": true 229 229 } 230 230 }, ··· 250 250 "agents": { 251 251 "screen": true 252 252 }, 253 - "screen": false, 253 + "percepts": false, 254 254 "transcripts": true 255 255 } 256 256 }, ··· 280 280 "agents": { 281 281 "screen": true 282 282 }, 283 - "screen": false, 283 + "percepts": false, 284 284 "transcripts": true 285 285 } 286 286 }, ··· 311 311 "agents": { 312 312 "screen": true 313 313 }, 314 - "screen": false, 314 + "percepts": false, 315 315 "transcripts": true 316 316 } 317 317 }, ··· 335 335 "instructions": { 336 336 "sources": { 337 337 "agents": false, 338 - "screen": true, 338 + "percepts": true, 339 339 "transcripts": true 340 340 } 341 341 }, ··· 362 362 "agents": { 363 363 "screen": true 364 364 }, 365 - "screen": false, 365 + "percepts": false, 366 366 "transcripts": true 367 367 } 368 368 }, ··· 381 381 "instructions": { 382 382 "sources": { 383 383 "agents": false, 384 - "screen": "required", 384 + "percepts": "required", 385 385 "transcripts": true 386 386 } 387 387 }, ··· 400 400 "instructions": { 401 401 "sources": { 402 402 "agents": false, 403 - "screen": true, 403 + "percepts": true, 404 404 "transcripts": "required" 405 405 } 406 406 }, ··· 424 424 "agents": { 425 425 "screen": true 426 426 }, 427 - "screen": false, 427 + "percepts": false, 428 428 "transcripts": true 429 429 } 430 430 },
+1 -1
tests/baselines/api/todos/badge-count.json
··· 1 1 { 2 - "count": 4 2 + "count": 0 3 3 }
+16 -1
tests/baselines/api/todos/nudges.json
··· 1 1 { 2 - "nudges": [] 2 + "nudges": [ 3 + { 4 + "day": "20260308", 5 + "facet": "montague", 6 + "index": 2, 7 + "nudge": "20260309T09:00", 8 + "text": "Recruit Benvolio for infrastructure support" 9 + }, 10 + { 11 + "day": "20260308", 12 + "facet": "verona", 13 + "index": 2, 14 + "nudge": "20260309T09:00", 15 + "text": "Prepare working demo for board meeting" 16 + } 17 + ] 3 18 }
+3 -3
tests/baselines/api/transcripts/segments.json
··· 7 7 "stream": "default", 8 8 "types": [ 9 9 "transcripts", 10 - "screen" 10 + "percepts" 11 11 ] 12 12 }, 13 13 { ··· 17 17 "stream": "default", 18 18 "types": [ 19 19 "transcripts", 20 - "screen" 20 + "percepts" 21 21 ] 22 22 }, 23 23 { ··· 27 27 "stream": "default", 28 28 "types": [ 29 29 "transcripts", 30 - "screen" 30 + "percepts" 31 31 ] 32 32 } 33 33 ]
+17 -17
tests/test_cluster.py
··· 25 25 "screen summary" 26 26 ) 27 27 result, counts = mod.cluster( 28 - "20240101", sources={"transcripts": True, "screen": False, "agents": True} 28 + "20240101", sources={"transcripts": True, "percepts": False, "agents": True} 29 29 ) 30 30 assert counts["transcripts"] == 1 31 31 assert counts["agents"] == 1 ··· 55 55 "20240101", 56 56 "120000", 57 57 "120100", 58 - sources={"transcripts": True, "screen": False, "agents": True}, 58 + sources={"transcripts": True, "percepts": False, "agents": True}, 59 59 ) 60 60 # Check that the function works and includes expected sections 61 61 assert "### Transcript" in md ··· 141 141 assert segments[1]["start"] == "10:00" 142 142 assert segments[1]["end"] == "10:10" 143 143 assert "transcripts" in segments[1]["types"] 144 - assert "screen" in segments[1]["types"] 144 + assert "percepts" in segments[1]["types"] 145 145 146 146 # Check third segment (screen only) 147 147 assert segments[2]["key"] == "110000_300" 148 148 assert segments[2]["start"] == "11:00" 149 149 assert segments[2]["end"] == "11:05" 150 - assert segments[2]["types"] == ["screen"] 150 + assert segments[2]["types"] == ["percepts"] 151 151 152 152 153 153 def test_cluster_period_uses_raw_screen(tmp_path, monkeypatch): ··· 176 176 result, counts = mod.cluster_period( 177 177 "20240101", 178 178 "100000_300", 179 - sources={"transcripts": True, "screen": True, "agents": False}, 179 + sources={"transcripts": True, "percepts": True, "agents": False}, 180 180 ) 181 181 182 182 # Should have both transcript and screen entries 183 183 assert counts["transcripts"] == 1 184 - assert counts["screen"] == 1 184 + assert counts["percepts"] == 1 185 185 assert "### Transcript" in result 186 186 # Should use raw screen format header 187 187 assert "Screen Activity" in result ··· 218 218 "20240101", 219 219 "100000", 220 220 "100500", 221 - sources={"transcripts": True, "screen": False, "agents": True}, 221 + sources={"transcripts": True, "percepts": False, "agents": True}, 222 222 ) 223 223 224 224 assert "### Transcript" in result ··· 253 253 "20240101", 254 254 "100000", 255 255 "100500", 256 - sources={"transcripts": False, "screen": True, "agents": False}, 256 + sources={"transcripts": False, "percepts": True, "agents": False}, 257 257 ) 258 258 259 259 assert "Screen Activity" in result ··· 289 289 "20240101", 290 290 "100000", 291 291 "100500", 292 - sources={"transcripts": False, "screen": True, "agents": False}, 292 + sources={"transcripts": False, "percepts": True, "agents": False}, 293 293 ) 294 294 295 295 # Should include content from both screen files ··· 333 333 334 334 assert len(segments) == 1 335 335 assert segments[0]["key"] == "100000_300" 336 - assert "screen" in segments[0]["types"] 336 + assert "percepts" in segments[0]["types"] 337 337 338 338 339 339 def test_cluster_span(tmp_path, monkeypatch): ··· 367 367 result, counts = mod.cluster_span( 368 368 "20240101", 369 369 ["090000_300", "110000_300"], 370 - sources={"transcripts": True, "screen": False, "agents": False}, 370 + sources={"transcripts": True, "percepts": False, "agents": False}, 371 371 ) 372 372 373 373 # Should have 2 transcript entries (one per segment) 374 374 assert counts["transcripts"] == 2 375 - assert counts["screen"] == 0 375 + assert counts["percepts"] == 0 376 376 assert "morning segment" in result 377 377 assert "late morning segment" in result 378 378 # Should NOT include the skipped segment ··· 398 398 mod.cluster_span( 399 399 "20240101", 400 400 ["090000_300", "100000_300"], 401 - sources={"transcripts": True, "screen": False, "agents": False}, 401 + sources={"transcripts": True, "percepts": False, "agents": False}, 402 402 ) 403 403 404 404 assert "100000_300" in str(exc_info.value) ··· 424 424 # Test filtering to only include entities 425 425 result, counts = mod.cluster( 426 426 "20240101", 427 - sources={"transcripts": True, "screen": False, "agents": {"entities": True}}, 427 + sources={"transcripts": True, "percepts": False, "agents": {"entities": True}}, 428 428 ) 429 429 430 430 assert counts["transcripts"] == 1 ··· 455 455 "20240101", 456 456 sources={ 457 457 "transcripts": True, 458 - "screen": False, 458 + "percepts": False, 459 459 "agents": {"entities": True, "meetings": "required", "flow": False}, 460 460 }, 461 461 ) ··· 488 488 "20240101", 489 489 sources={ 490 490 "transcripts": True, 491 - "screen": False, 491 + "percepts": False, 492 492 "agents": {"entities": False, "todos:review": True}, 493 493 }, 494 494 ) ··· 515 515 # Empty dict should mean no agents 516 516 result, counts = mod.cluster( 517 517 "20240101", 518 - sources={"transcripts": True, "screen": False, "agents": {}}, 518 + sources={"transcripts": True, "percepts": False, "agents": {}}, 519 519 ) 520 520 521 521 assert counts["transcripts"] == 1
+3 -3
tests/test_cluster_full.py
··· 29 29 copy_day(tmp_path) 30 30 monkeypatch.setenv("JOURNAL_PATH", str(tmp_path)) 31 31 md, counts = mod.cluster( 32 - "20240101", sources={"transcripts": True, "screen": False, "agents": True} 32 + "20240101", sources={"transcripts": True, "percepts": False, "agents": True} 33 33 ) 34 34 # Transcript entries come from 2 segments on 20240101 (default + import.apple) 35 35 assert counts["transcripts"] == 2 ··· 45 45 copy_day(tmp_path) 46 46 monkeypatch.setenv("JOURNAL_PATH", str(tmp_path)) 47 47 out, _counts = mod.cluster( 48 - "20240101", sources={"transcripts": True, "screen": False, "agents": True} 48 + "20240101", sources={"transcripts": True, "percepts": False, "agents": True} 49 49 ) 50 50 # Now uses insight format: "### {stem} summary" 51 51 assert "### screen summary" in out ··· 59 59 "20240101", 60 60 "123456", 61 61 "123556", 62 - sources={"transcripts": True, "screen": True, "agents": False}, 62 + sources={"transcripts": True, "percepts": True, "agents": False}, 63 63 ) 64 64 # Range mode with screen=True uses raw screen data. 65 65 assert "### Screen Activity" in out
+4 -4
tests/test_generate_full.py
··· 79 79 80 80 test_generator = tmp_path / "test_gen.md" 81 81 test_generator.write_text( 82 - '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 82 + '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 83 83 ) 84 84 85 85 # Mock the underlying generation function in think.models ··· 146 146 147 147 test_generator = tmp_path / "hooked_gen.md" 148 148 test_generator.write_text( 149 - '{\n "type": "generate",\n "title": "Hooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "test_hook"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 149 + '{\n "type": "generate",\n "title": "Hooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "test_hook"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 150 150 ) 151 151 152 152 # Mock the underlying generation function in think.models ··· 198 198 199 199 test_generator = tmp_path / "nohook_gen.md" 200 200 test_generator.write_text( 201 - '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nNo hook prompt' 201 + '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nNo hook prompt' 202 202 ) 203 203 204 204 # Mock the underlying generation function in think.models ··· 265 265 266 266 test_generator = tmp_path / "empty_gen.md" 267 267 test_generator.write_text( 268 - '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 268 + '{\n "type": "generate",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 269 269 ) 270 270 271 271 monkeypatch.setenv("GOOGLE_API_KEY", "x")
+1 -1
tests/test_generators.py
··· 135 135 sources = instructions.get("sources", {}) 136 136 137 137 assert sources.get("transcripts") == "required", "speakers should require transcripts" 138 - assert sources.get("screen") is True, "speakers should include screen" 138 + assert sources.get("percepts") is True, "speakers should include percepts" 139 139 140 140 141 141 def _write_temp_muse_prompt(stem: str, frontmatter: str) -> Path:
+1 -1
tests/test_journal_stats.py
··· 172 172 assert "token_usage_by_day" in data 173 173 assert "token_totals_by_model" in data 174 174 assert "total_transcript_duration" in data 175 - assert "total_screen_duration" in data 175 + assert "total_percept_duration" in data 176 176 assert ( 177 177 data["token_usage_by_day"]["20240101"]["gemini-2.5-flash"]["total_tokens"] 178 178 == 495
+4 -4
tests/test_muse.py
··· 39 39 40 40 def test_merge_instructions_config_sources_merge(): 41 41 """Test that sources dict is merged, not replaced.""" 42 - defaults = {"system": None, "sources": {"transcripts": False, "screen": False}} 42 + defaults = {"system": None, "sources": {"transcripts": False, "percepts": False}} 43 43 overrides = {"sources": {"transcripts": True}} 44 44 result = _merge_instructions_config(defaults, overrides) 45 45 assert result["sources"]["transcripts"] is True # Overridden 46 - assert result["sources"]["screen"] is False # Preserved from defaults 46 + assert result["sources"]["percepts"] is False # Preserved from defaults 47 47 48 48 49 49 def test_merge_instructions_config_ignores_unknown_keys(): ··· 236 236 237 237 assert "sources" in result 238 238 assert result["sources"]["transcripts"] is False 239 - assert result["sources"]["screen"] is False 239 + assert result["sources"]["percepts"] is False 240 240 assert result["sources"]["agents"] is False 241 241 242 242 def test_sources_can_be_overridden(self, monkeypatch, tmp_path): ··· 256 256 ) 257 257 258 258 assert result["sources"]["transcripts"] is True # Overridden 259 - assert result["sources"]["screen"] is False # Default preserved 259 + assert result["sources"]["percepts"] is False # Default preserved 260 260 assert result["sources"]["agents"] is True # Overridden 261 261 262 262
+5 -5
tests/test_output_hooks.py
··· 170 170 171 171 prompt_file = tmp_path / "hooked_test.md" 172 172 prompt_file.write_text( 173 - '{\n "type": "generate",\n "title": "Hooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "hooked_test"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 173 + '{\n "type": "generate",\n "title": "Hooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "hooked_test"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 174 174 ) 175 175 176 176 hook_file = tmp_path / "hooked_test.py" ··· 224 224 225 225 prompt_file = tmp_path / "noop_test.md" 226 226 prompt_file.write_text( 227 - '{\n "type": "generate",\n "title": "Noop",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "noop_test"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 227 + '{\n "type": "generate",\n "title": "Noop",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "noop_test"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 228 228 ) 229 229 230 230 hook_file = tmp_path / "noop_test.py" ··· 270 270 271 271 prompt_file = tmp_path / "broken_test.md" 272 272 prompt_file.write_text( 273 - '{\n "type": "generate",\n "title": "Broken",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "broken_test"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nTest prompt' 273 + '{\n "type": "generate",\n "title": "Broken",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"post": "broken_test"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nTest prompt' 274 274 ) 275 275 276 276 hook_file = tmp_path / "broken_test.py" ··· 387 387 388 388 prompt_file = tmp_path / "prehooked_test.md" 389 389 prompt_file.write_text( 390 - '{\n "type": "generate",\n "title": "Prehooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"pre": "prehooked_test"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nOriginal prompt' 390 + '{\n "type": "generate",\n "title": "Prehooked",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"pre": "prehooked_test"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nOriginal prompt' 391 391 ) 392 392 393 393 hook_file = tmp_path / "prehooked_test.py" ··· 447 447 448 448 prompt_file = tmp_path / "both_hooks_test.md" 449 449 prompt_file.write_text( 450 - '{\n "type": "generate",\n "title": "Both Hooks",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"pre": "both_hooks_test", "post": "both_hooks_test"},\n "instructions": {"system": "journal", "sources": {"audio": true, "screen": true}}\n}\n\nOriginal prompt' 450 + '{\n "type": "generate",\n "title": "Both Hooks",\n "schedule": "daily",\n "priority": 10,\n "output": "md",\n "hook": {"pre": "both_hooks_test", "post": "both_hooks_test"},\n "instructions": {"system": "journal", "sources": {"transcripts": true, "percepts": true}}\n}\n\nOriginal prompt' 451 451 ) 452 452 453 453 hook_file = tmp_path / "both_hooks_test.py"
+31 -31
think/cluster.py
··· 94 94 segment_path: Path, 95 95 date_str: str, 96 96 transcripts: bool, 97 - screen: bool, 97 + percepts: bool, 98 98 agents: bool | dict[str, bool | str], 99 99 ) -> list[dict[str, Any]]: 100 100 """Process a single segment directory and return entries. ··· 103 103 segment_path: Path to segment directory 104 104 date_str: Date in YYYYMMDD format 105 105 transcripts: Whether to load transcript content (JSONL and markdown) 106 - screen: Whether to load raw screen data from *screen.jsonl files 106 + percepts: Whether to load raw screen data from *screen.jsonl files 107 107 agents: Whether to load agent output summaries from *.md files. 108 108 Can be bool (all/none) or dict for selective filtering 109 109 (e.g., {"entities": True, "meetings": "required"}). ··· 187 187 ) 188 188 189 189 # Process raw screen data from screen.jsonl and *_screen.jsonl 190 - if screen: 190 + if percepts: 191 191 screen_files = list(segment_path.glob("*screen.jsonl")) 192 192 for screen_jsonl in screen_files: 193 193 try: ··· 199 199 "segment_key": segment_key, 200 200 "segment_start": segment_start, 201 201 "segment_end": segment_end, 202 - "prefix": "screen", 202 + "prefix": "percept", 203 203 "content": content, 204 204 "name": f"{segment_path.name}/{screen_jsonl.name}", 205 205 "stream": stream, ··· 254 254 255 255 256 256 def _load_entries( 257 - day_dir: str, transcripts: bool, screen: bool, agents: bool | dict[str, bool | str] 257 + day_dir: str, transcripts: bool, percepts: bool, agents: bool | dict[str, bool | str] 258 258 ) -> list[dict[str, Any]]: 259 259 """Load all transcript entries from a day directory.""" 260 260 from think.utils import segment_parse ··· 269 269 start_time, _ = segment_parse(seg_path.name) 270 270 if not start_time: 271 271 continue 272 - entries.extend(_process_segment(seg_path, date_str, transcripts, screen, agents)) 272 + entries.extend(_process_segment(seg_path, date_str, transcripts, percepts, agents)) 273 273 274 274 entries.sort(key=lambda e: e["timestamp"]) 275 275 return entries ··· 293 293 294 294 Maps the internal prefix names to source config names: 295 295 - "transcript" -> "transcripts" 296 - - "screen" -> "screen" 296 + - "percept" -> "percepts" 297 297 - "agent_output" -> "agents" 298 298 299 299 Returns: 300 - Dict with counts for each source type, e.g., {"transcripts": 2, "screen": 1, "agents": 0} 300 + Dict with counts for each source type, e.g., {"transcripts": 2, "percepts": 1, "agents": 0} 301 301 """ 302 302 # Map internal prefix to source config name 303 303 prefix_to_source = { 304 304 "transcript": "transcripts", 305 - "screen": "screen", 305 + "percept": "percepts", 306 306 "agent_output": "agents", 307 307 } 308 308 ··· 311 311 # Ensure all standard sources are present (even if 0) 312 312 return { 313 313 "transcripts": counts.get("transcripts", 0), 314 - "screen": counts.get("screen", 0), 314 + "percepts": counts.get("percepts", 0), 315 315 "agents": counts.get("agents", 0), 316 316 } 317 317 ··· 344 344 lines.append(f"### {header}") 345 345 lines.append(entry["content"].strip()) 346 346 lines.append("") 347 - elif entry["prefix"] == "screen": 347 + elif entry["prefix"] == "percept": 348 348 lines.append("### Screen Activity") 349 349 lines.append(entry["content"].strip()) 350 350 lines.append("") ··· 407 407 408 408 date_str = _date_str(day_dir) 409 409 transcript_slots: set[datetime] = set() 410 - screen_slots: set[datetime] = set() 410 + percept_slots: set[datetime] = set() 411 411 day_path_obj = Path(day_dir) 412 412 413 413 # Check timestamp subdirectories for content files ··· 437 437 if (seg_path / "screen.jsonl").exists() or any( 438 438 seg_path.glob("*_screen.jsonl") 439 439 ): 440 - screen_slots.add(slot) 440 + percept_slots.add(slot) 441 441 442 442 transcript_ranges = _slots_to_ranges(sorted(transcript_slots)) 443 - screen_ranges = _slots_to_ranges(sorted(screen_slots)) 444 - return transcript_ranges, screen_ranges 443 + percept_ranges = _slots_to_ranges(sorted(percept_slots)) 444 + return transcript_ranges, percept_ranges 445 445 446 446 447 447 def cluster_segments(day: str) -> list[dict[str, Any]]: ··· 458 458 - key: segment directory name (HHMMSS_LEN format) 459 459 - start: start time as HH:MM 460 460 - end: end time as HH:MM 461 - - types: list of content types present ("transcripts", "screen", or both) 461 + - types: list of content types present ("transcripts", "percepts", or both) 462 462 """ 463 463 from think.utils import segment_parse 464 464 ··· 489 489 490 490 # Check for screen content 491 491 if (seg_path / "screen.jsonl").exists() or any(seg_path.glob("*_screen.jsonl")): 492 - types.append("screen") 492 + types.append("percepts") 493 493 494 494 if not types: 495 495 continue ··· 546 546 547 547 Args: 548 548 day: Day in YYYYMMDD format 549 - sources: Dict with keys "transcripts", "screen", "agents". 549 + sources: Dict with keys "transcripts", "percepts", "agents". 550 550 Values can be bool, "required" string, or dict (for agents). 551 551 The "agents" source can be a dict for selective filtering, 552 552 e.g., {"entities": True, "meetings": "required"}. 553 553 554 554 Returns: 555 555 Tuple of (markdown, source_counts) where source_counts is a dict 556 - with keys "transcripts", "screen", "agents" mapping to entry counts. 556 + with keys "transcripts", "percepts", "agents" mapping to entry counts. 557 557 """ 558 558 empty_counts = {"transcripts": 0, "screen": 0, "agents": 0} 559 559 ··· 565 565 entries = _load_entries( 566 566 day_dir, 567 567 transcripts=sources.get("transcripts", False), 568 - screen=sources.get("screen", False), 568 + percepts=sources.get("percepts", False), 569 569 agents=sources.get("agents", False), 570 570 ) 571 571 if not entries: ··· 590 590 Args: 591 591 day: Day in YYYYMMDD format 592 592 segment: Segment key in HHMMSS_LEN format (e.g., "163045_300") 593 - sources: Dict with keys "transcripts", "screen", "agents". 593 + sources: Dict with keys "transcripts", "percepts", "agents". 594 594 Values can be bool, "required" string, or dict (for agents). 595 595 stream: Stream name. If None, searches all streams for the segment. 596 596 597 597 Returns: 598 598 Tuple of (markdown, source_counts) where source_counts is a dict 599 - with keys "transcripts", "screen", "agents" mapping to entry counts. 599 + with keys "transcripts", "percepts", "agents" mapping to entry counts. 600 600 """ 601 601 empty_counts = {"transcripts": 0, "screen": 0, "agents": 0} 602 602 ··· 608 608 entries = _load_entries_from_segment( 609 609 str(segment_dir), 610 610 transcripts=sources.get("transcripts", False), 611 - screen=sources.get("screen", False), 611 + percepts=sources.get("percepts", False), 612 612 agents=sources.get("agents", False), 613 613 ) 614 614 if not entries: ··· 622 622 def _load_entries_from_segment( 623 623 segment_dir: str, 624 624 transcripts: bool, 625 - screen: bool, 625 + percepts: bool, 626 626 agents: bool | dict[str, bool | str], 627 627 ) -> list[dict[str, Any]]: 628 628 """Load entries from a single segment directory. ··· 630 630 Args: 631 631 segment_dir: Path to segment directory (e.g., /path/to/20251109/163045_300) 632 632 transcripts: Whether to load transcript content (JSONL and markdown) 633 - screen: Whether to load raw screen data from *screen.jsonl files 633 + percepts: Whether to load raw screen data from *screen.jsonl files 634 634 agents: Whether to load agent output summaries from *.md files 635 635 636 636 Returns: ··· 639 639 segment_path_obj = Path(segment_dir) 640 640 # Parent is stream dir; grandparent is day dir 641 641 date_str = _date_str(str(segment_path_obj.parent.parent)) 642 - entries = _process_segment(segment_path_obj, date_str, transcripts, screen, agents) 642 + entries = _process_segment(segment_path_obj, date_str, transcripts, percepts, agents) 643 643 entries.sort(key=lambda e: e["timestamp"]) 644 644 return entries 645 645 ··· 660 660 Args: 661 661 day: Day in YYYYMMDD format 662 662 span: List of segment keys in HHMMSS_LEN format (e.g., ["163045_300", "170000_600"]) 663 - sources: Dict with keys "transcripts", "screen", "agents". 663 + sources: Dict with keys "transcripts", "percepts", "agents". 664 664 Values can be bool, "required" string, or dict (for agents). 665 665 stream: Stream name. If None, searches all streams for each segment. 666 666 667 667 Returns: 668 668 Tuple of (markdown, source_counts) where source_counts is a dict 669 - with keys "transcripts", "screen", "agents" mapping to entry counts. 669 + with keys "transcripts", "percepts", "agents" mapping to entry counts. 670 670 671 671 Raises: 672 672 ValueError: If any segment directories are missing ··· 692 692 segment_entries = _load_entries_from_segment( 693 693 str(seg_dir), 694 694 transcripts=sources.get("transcripts", False), 695 - screen=sources.get("screen", False), 695 + percepts=sources.get("percepts", False), 696 696 agents=sources.get("agents", False), 697 697 ) 698 698 entries.extend(segment_entries) ··· 735 735 day: Day in YYYYMMDD format 736 736 start: Start time in HHMMSS format 737 737 end: End time in HHMMSS format 738 - sources: Dict with keys "transcripts", "screen", "agents". 738 + sources: Dict with keys "transcripts", "percepts", "agents". 739 739 Values can be bool, "required" string, or dict (for agents). 740 740 """ 741 741 day_dir = str(day_path(day)) ··· 746 746 entries = _load_entries( 747 747 day_dir, 748 748 transcripts=sources.get("transcripts", False), 749 - screen=sources.get("screen", False), 749 + percepts=sources.get("percepts", False), 750 750 agents=sources.get("agents", False), 751 751 ) 752 752 # Include segments that overlap with the requested range
+12 -12
think/journal_stats.py
··· 23 23 self.days: Dict[str, Dict[str, float | int]] = {} 24 24 self.totals: Counter[str] = Counter() 25 25 self.total_transcript_duration = 0.0 26 - self.total_screen_duration = 0.0 26 + self.total_percept_duration = 0.0 27 27 self.agent_counts: Counter[str] = Counter() 28 28 self.agent_minutes: Counter[str] = Counter() 29 29 self.facet_counts: Counter[str] = Counter() ··· 107 107 times_seconds = [self._parse_timestamp(t) for t in timestamps] 108 108 return max(times_seconds) - min(times_seconds) 109 109 110 - def _calculate_screen_duration(self, frames: list) -> float: 110 + def _calculate_percept_duration(self, frames: list) -> float: 111 111 """Calculate screen duration from min/max frame timestamps.""" 112 112 # Skip header (first element if it has no frame_id) 113 113 frame_timestamps = [ ··· 132 132 counts_for_totals = { 133 133 k: v 134 134 for k, v in stats.items() 135 - if k not in ("transcript_duration", "screen_duration") 135 + if k not in ("transcript_duration", "percept_duration") 136 136 } 137 137 self.totals.update(counts_for_totals) 138 138 139 139 # Accumulate durations 140 140 self.total_transcript_duration += stats.get("transcript_duration", 0.0) 141 - self.total_screen_duration += stats.get("screen_duration", 0.0) 141 + self.total_percept_duration += stats.get("percept_duration", 0.0) 142 142 143 143 # Apply agent data 144 144 day_agent_counts: Dict[str, int] = {} ··· 175 175 """Scan a single day and return stats dict for caching.""" 176 176 stats: Counter[str] = Counter() 177 177 transcript_duration = 0.0 178 - screen_duration = 0.0 178 + percept_duration = 0.0 179 179 day_dir = Path(path) 180 180 181 181 # Track agent data for cache ··· 225 225 screen_files = list(day_dir.glob("*/*/screen.jsonl")) 226 226 screen_files.extend(day_dir.glob("*/*/*_screen.jsonl")) 227 227 for jsonl_file in sorted(screen_files): 228 - stats["screen_sessions"] += 1 228 + stats["percept_sessions"] += 1 229 229 230 230 try: 231 231 frames = load_analysis_frames(jsonl_file) ··· 235 235 236 236 # Count frames (excluding header) 237 237 frame_count = sum(1 for f in frames if "frame_id" in f) 238 - stats["screen_frames"] += frame_count 238 + stats["percept_frames"] += frame_count 239 239 240 240 # Calculate duration from timestamps 241 241 if frame_count > 0: 242 - duration = self._calculate_screen_duration(frames) 243 - screen_duration += duration 242 + duration = self._calculate_percept_duration(frames) 243 + percept_duration += duration 244 244 245 245 except (OSError, IOError) as e: 246 246 logger.warning(f"Error reading screen file {jsonl_file}: {e}") ··· 328 328 329 329 # --- Build return dict --- 330 330 stats["transcript_duration"] = transcript_duration 331 - stats["screen_duration"] = screen_duration 331 + stats["percept_duration"] = percept_duration 332 332 333 333 return { 334 334 "stats": dict(stats), ··· 460 460 logger.info( 461 461 f"Scanned {len(self.days)} days, " 462 462 f"{self.totals.get('transcript_sessions', 0)} transcript sessions, " 463 - f"{self.totals.get('screen_sessions', 0)} screen sessions" 463 + f"{self.totals.get('percept_sessions', 0)} percept sessions" 464 464 f"{cache_status}" 465 465 ) 466 466 ··· 470 470 "days": self.days, 471 471 "totals": dict(self.totals), 472 472 "total_transcript_duration": self.total_transcript_duration, 473 - "total_screen_duration": self.total_screen_duration, 473 + "total_percept_duration": self.total_percept_duration, 474 474 "agent_counts": dict(self.agent_counts), 475 475 "agent_minutes": {k: round(v, 2) for k, v in self.agent_minutes.items()}, 476 476 "agent_counts_by_day": self.agent_counts_by_day,
+3 -3
think/muse.py
··· 335 335 "activity": False, 336 336 "sources": { 337 337 "transcripts": False, 338 - "screen": False, 338 + "percepts": False, 339 339 "agents": False, 340 340 }, 341 341 } ··· 431 431 true = include current date/time in extra_context 432 432 - "day": false | true (default: false) 433 433 true = include analysis day context (requires analysis_day parameter) 434 - - "sources": {"transcripts": bool, "screen": bool, "agents": bool|dict} 434 + - "sources": {"transcripts": bool, "percepts": bool, "agents": bool|dict} 435 435 The "agents" source can be: 436 436 - bool: True (all agents), False (no agents) 437 437 - "required": all agents, fail if none found ··· 445 445 - system_prompt_name: str - name of system prompt (for cache keys) 446 446 - user_instruction: str | None - loaded from user_prompt if provided 447 447 - extra_context: str | None - facets + now + day context 448 - - sources: dict - {"transcripts": bool, "screen": bool, "agents": bool|dict} 448 + - sources: dict - {"transcripts": bool, "percepts": bool, "agents": bool|dict} 449 449 """ 450 450 from think.utils import format_day 451 451