personal memory agent
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

cogitate: per-task read-scope policy, tool budget, scope hint

Gemini policy does not provide call-count limits, so cogitate now combines per-task argsPattern read scopes with a CLIRunner budget for native read tools. The generated policy files keep non-write runs constrained to observed per-talent read patterns while the 200-call budget catches runaway tool loops from the 2026-04-26 incident class.

Talent frontmatter carries the explicit read-scope overrides so the prompt hint, generated policy, and provider invocation share one source of truth.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>

+465 -24
+2 -1
talent/awareness_tender.md
··· 5 5 "description": "Maintains identity/awareness.md — a compact situational awareness snapshot", 6 6 "schedule": "segment", 7 7 "priority": 98, 8 - "max_output_tokens": 600 8 + "max_output_tokens": 600, 9 + "read_scope": ["chronicle/<day>", "identity", "facets", "entities", "imports", "health", "stats.json"] 9 10 } 10 11 11 12 # Awareness Tender
+2 -1
talent/digest.md
··· 5 5 "description": "Synthesize a plain-English digest of who sol is and what's happening now.", 6 6 "schedule": "none", 7 7 "priority": 10, 8 - "max_output_tokens": 1000 8 + "max_output_tokens": 1000, 9 + "read_scope": ["chronicle/<day>"] 9 10 } 10 11 11 12 # Digest
+2 -1
talent/heartbeat.md
··· 4 4 "title": "Heartbeat", 5 5 "description": "Sol's periodic self-awareness — journal health, agency tending, curation scan", 6 6 "schedule": "none", 7 - "priority": 10 7 + "priority": 10, 8 + "read_scope": ["chronicle/<day>", "health", "talents", "identity", "entities", "facets", "imports"] 8 9 } 9 10 10 11 $facets
+2 -1
talent/morning_briefing.md
··· 6 6 "color": "#1565c0", 7 7 "schedule": "daily", 8 8 "priority": 50, 9 - "output": "md" 9 + "output": "md", 10 + "read_scope": ["chronicle/<day>", "facets", "entities", "imports", "health", "identity"] 10 11 } 11 12 12 13 $facets
+2 -1
talent/weekly_reflection.md
··· 4 4 "description": "Sunday-start weekly reflection synthesized from the journal", 5 5 "schedule": "weekly", 6 6 "priority": 90, 7 - "output": "md" 7 + "output": "md", 8 + "read_scope_span": 7 8 9 } 9 10 10 11 $facets
+70
tests/test_cli_provider.py
··· 98 98 assert body == "hello" 99 99 assert system == "Base system" 100 100 101 + def test_assemble_prompt_appends_read_scope_hint(self): 102 + body, system = assemble_prompt( 103 + { 104 + "prompt": "hello", 105 + "system_instruction": "Base system", 106 + "read_scope": ["chronicle/<day>"], 107 + }, 108 + sol_tool_name="run_shell_command", 109 + ) 110 + 111 + assert body == "hello" 112 + assert system is not None 113 + assert "through the `run_shell_command` tool" in system 114 + assert "Limit filesystem reads to today's segment dir" in system 115 + 101 116 102 117 # --------------------------------------------------------------------------- 103 118 # ThinkingAggregator ··· 497 512 asyncio.run(runner.run()) 498 513 499 514 assert captured_cwd == "/tmp" 515 + 516 + def test_read_tool_budget_overflow_terminates_process_group(self): 517 + events = [] 518 + callback = JSONEventCallback(events.append) 519 + aggregator = ThinkingAggregator(callback, model="test-model") 520 + stdout_lines = [ 521 + (json.dumps({"type": "tool_use", "tool_name": "read_file"}) + "\n").encode( 522 + "utf-8" 523 + ) 524 + for _ in range(201) 525 + ] 526 + process = _make_process(stdout_lines, [], 0) 527 + translated = [] 528 + 529 + def translate(event, _agg, _cb): 530 + translated.append(event) 531 + return None 532 + 533 + runner = CLIRunner( 534 + cmd=["fakecli", "--json"], 535 + prompt_text="test", 536 + translate=translate, 537 + callback=callback, 538 + aggregator=aggregator, 539 + read_call_budget=200, 540 + ) 541 + runner._terminate_process_group = AsyncMock() 542 + 543 + with ( 544 + patch( 545 + "think.providers.cli.asyncio.create_subprocess_exec", 546 + AsyncMock(return_value=process), 547 + ), 548 + patch("think.providers.cli.shutil.which", return_value="/usr/bin/fakecli"), 549 + pytest.raises(RuntimeError) as exc_info, 550 + ): 551 + asyncio.run(runner.run()) 552 + 553 + assert "tool_budget_exhausted" in str(exc_info.value) 554 + assert "(201/200)" in str(exc_info.value) 555 + assert len(translated) == 200 556 + exhausted = [ 557 + event for event in events if event["event"] == "tool_budget_exhausted" 558 + ] 559 + assert exhausted == [ 560 + { 561 + "event": "tool_budget_exhausted", 562 + "tool": "read_file", 563 + "budget": 200, 564 + "count": 201, 565 + "read_tools": ["read_file", "glob", "list_directory", "grep_search"], 566 + "ts": exhausted[0]["ts"], 567 + } 568 + ] 569 + runner._terminate_process_group.assert_called_once_with(process) 500 570 501 571 502 572 class TestCLIRunnerFirstEventTimeout:
+16 -8
tests/test_cogitate_coder.py
··· 147 147 return importlib.import_module("think.providers.google") 148 148 149 149 @patch("think.providers.google.CLIRunner") 150 - def test_no_write_uses_yolo_with_policy(self, mock_runner_cls): 150 + def test_no_write_uses_yolo_with_policy(self, mock_runner_cls, tmp_path): 151 151 """Without write flag, approval-mode is yolo with scoped policy.""" 152 152 provider = self._provider() 153 153 mock_instance = AsyncMock() ··· 155 155 mock_instance.cli_session_id = None 156 156 mock_runner_cls.return_value = mock_instance 157 157 158 + policy_path = tmp_path / "policy.toml" 159 + policy_path.write_text("# generated\n", encoding="utf-8") 158 160 config = {"prompt": "test", "model": "gemini-2.5-flash"} 159 - asyncio.run(provider.run_cogitate(config)) 161 + with patch( 162 + "think.providers.google.build_per_task_policy", 163 + return_value=policy_path, 164 + ) as build_policy: 165 + asyncio.run(provider.run_cogitate(config)) 160 166 161 167 cmd = mock_runner_cls.call_args.kwargs["cmd"] 162 168 idx = cmd.index("--approval-mode") 163 169 assert cmd[idx + 1] == "yolo" 164 170 policy_idx = cmd.index("--policy") 165 - assert cmd[policy_idx + 1].endswith("policies/cogitate.toml") 171 + assert cmd[policy_idx + 1] == str(policy_path) 172 + build_policy.assert_called_once() 173 + assert not policy_path.exists() 166 174 167 175 @patch("think.providers.google.CLIRunner") 168 176 def test_write_true_uses_yolo_mode(self, mock_runner_cls): ··· 181 189 assert cmd[idx + 1] == "yolo" 182 190 assert "--policy" not in cmd 183 191 184 - def test_cogitate_policy_file_exists_on_disk(self): 185 - """The policy path wired into argv must resolve to a real file.""" 186 - from think.providers.google import _COGITATE_POLICY_PATH 192 + def test_cogitate_base_policy_file_exists_on_disk(self): 193 + """The per-task policy generator's base policy must exist.""" 194 + from think.cogitate_policy import _BASE_POLICY_PATH 187 195 188 - assert _COGITATE_POLICY_PATH.is_file(), ( 189 - f"Expected policy file at {_COGITATE_POLICY_PATH}" 196 + assert _BASE_POLICY_PATH.is_file(), ( 197 + f"Expected policy file at {_BASE_POLICY_PATH}" 190 198 ) 191 199 192 200
+112
tests/test_cogitate_policy.py
··· 1 + # SPDX-License-Identifier: AGPL-3.0-only 2 + # Copyright (c) 2026 sol pbc 3 + 4 + from __future__ import annotations 5 + 6 + import re 7 + import stat 8 + 9 + import tomllib 10 + 11 + from think import cogitate_policy 12 + 13 + 14 + def test_resolve_read_scope_defaults_to_current_day_chronicle(): 15 + assert cogitate_policy.resolve_read_scope({}, "20260427") == ["chronicle/20260427"] 16 + 17 + 18 + def test_resolve_read_scope_expands_override_placeholders(): 19 + assert cogitate_policy.resolve_read_scope( 20 + {"read_scope": ["chronicle/<day>", "chronicle/<day-2>", "facets"]}, 21 + "20260427", 22 + ) == ["chronicle/20260427", "chronicle/20260425", "facets"] 23 + 24 + 25 + def test_resolve_read_scope_span_is_inclusive(): 26 + assert cogitate_policy.resolve_read_scope( 27 + {"read_scope_span": 2}, 28 + "20260427", 29 + ) == ["chronicle/20260425", "chronicle/20260426", "chronicle/20260427"] 30 + 31 + 32 + def test_build_per_task_policy_appends_read_tool_rules(tmp_path, monkeypatch): 33 + policy_dir = tmp_path / "policies" 34 + journal = tmp_path / "journal" 35 + journal.mkdir() 36 + base_policy = tmp_path / "base.toml" 37 + base_policy.write_text("# base\n", encoding="utf-8") 38 + monkeypatch.setattr(cogitate_policy, "_POLICY_DIR", policy_dir) 39 + monkeypatch.setenv("SOLSTONE_JOURNAL", str(journal)) 40 + 41 + path = cogitate_policy.build_per_task_policy( 42 + "morning_briefing", 43 + { 44 + "read_scope": [ 45 + "chronicle/<day>", 46 + "facets", 47 + "entities", 48 + "imports", 49 + "health", 50 + "identity", 51 + ] 52 + }, 53 + "20260427", 54 + 0, 55 + base_policy, 56 + ) 57 + 58 + assert path.parent == policy_dir 59 + assert path.name.startswith("morning_briefing-20260427-") 60 + assert stat.S_IMODE(path.stat().st_mode) == 0o600 61 + parsed = tomllib.loads(path.read_text(encoding="utf-8")) 62 + rules = parsed["rule"] 63 + assert len(rules) == 8 64 + 65 + escaped_root = re.escape(str(journal).rstrip("/") + "/") 66 + prefix = f"(?:{escaped_root}|\\./)?" 67 + scope = "chronicle/20260427|facets|entities|imports|health|identity" 68 + expected_args = [ 69 + f'"file_path":"{prefix}(?:{scope})(?:/|")', 70 + f'"dir_path":"{prefix}(?:{scope})(?:/|")', 71 + f'"(?:pattern|path)":"{prefix}(?:{scope})(?:/|[^"]*")', 72 + f'"(?:path|dir_path|include|pattern)":"{prefix}(?:{scope})(?:/|[^"]*")', 73 + ] 74 + 75 + tools = ["read_file", "list_directory", "glob", "grep_search"] 76 + for index, tool_name in enumerate(tools): 77 + allow = rules[index * 2] 78 + deny = rules[index * 2 + 1] 79 + assert allow == { 80 + "toolName": tool_name, 81 + "argsPattern": expected_args[index], 82 + "decision": "allow", 83 + "priority": 260, 84 + } 85 + assert deny == { 86 + "toolName": tool_name, 87 + "decision": "deny", 88 + "priority": 160, 89 + } 90 + 91 + 92 + def test_build_per_task_policy_file_scope_has_exact_boundary(tmp_path, monkeypatch): 93 + policy_dir = tmp_path / "policies" 94 + journal = tmp_path / "journal" 95 + journal.mkdir() 96 + base_policy = tmp_path / "base.toml" 97 + base_policy.write_text("# base\n", encoding="utf-8") 98 + monkeypatch.setattr(cogitate_policy, "_POLICY_DIR", policy_dir) 99 + monkeypatch.setenv("SOLSTONE_JOURNAL", str(journal)) 100 + 101 + path = cogitate_policy.build_per_task_policy( 102 + "awareness_tender", 103 + {"read_scope": ["stats.json"]}, 104 + "20260427", 105 + 0, 106 + base_policy, 107 + ) 108 + 109 + rules = tomllib.loads(path.read_text(encoding="utf-8"))["rule"] 110 + pattern = rules[0]["argsPattern"] 111 + assert re.search(pattern, '{"file_path":"stats.json"}') 112 + assert not re.search(pattern, '{"file_path":"stats.jsonfoo"}')
+37 -4
tests/test_google_cli.py
··· 325 325 def __init__(self, **kwargs): 326 326 self.cmd = kwargs["cmd"] 327 327 self.prompt_text = kwargs["prompt_text"] 328 + self.kwargs = kwargs 328 329 self.cli_session_id = "test-session" 329 330 self.run = AsyncMock(return_value="result") 330 331 MockCLIRunner.last_instance = self 331 332 332 333 return MockCLIRunner 333 334 334 - def test_no_write_uses_yolo_with_policy(self): 335 + def test_no_write_uses_yolo_with_policy(self, tmp_path): 335 336 provider = _google_provider() 336 337 MockCLIRunner = self._mock_runner() 337 - with patch("think.providers.google.CLIRunner", MockCLIRunner): 338 + policy_path = tmp_path / "policy.toml" 339 + policy_path.write_text("# generated\n", encoding="utf-8") 340 + with ( 341 + patch("think.providers.google.CLIRunner", MockCLIRunner), 342 + patch( 343 + "think.providers.google.build_per_task_policy", 344 + return_value=policy_path, 345 + ) as build_policy, 346 + ): 338 347 asyncio.run( 339 348 provider.run_cogitate( 340 - {"prompt": "hello", "model": "gemini-2.5-flash"}, lambda e: None 349 + { 350 + "prompt": "hello", 351 + "model": "gemini-2.5-flash", 352 + "name": "morning_briefing", 353 + }, 354 + lambda e: None, 341 355 ) 342 356 ) 343 357 cmd = MockCLIRunner.last_instance.cmd 344 358 idx = cmd.index("--approval-mode") 345 359 assert cmd[idx + 1] == "yolo" 346 360 policy_idx = cmd.index("--policy") 347 - assert cmd[policy_idx + 1].endswith("policies/cogitate.toml") 361 + assert cmd[policy_idx + 1] == str(policy_path) 362 + build_policy.assert_called_once() 363 + assert not policy_path.exists() 348 364 prompt_text = MockCLIRunner.last_instance.prompt_text 349 365 assert "through the `run_shell_command` tool" in prompt_text 350 366 assert "Do not invent or call a tool literally named `sol`." in prompt_text 367 + assert MockCLIRunner.last_instance.kwargs["read_call_budget"] == 200 351 368 352 369 def test_write_mode_uses_yolo_approval(self): 353 370 runner = _assert_write_mode_uses_yolo_approval(self._mock_runner) 354 371 prompt_text = runner.prompt_text 355 372 assert "Do not invent or call a tool literally named `sol`." not in prompt_text 373 + 374 + def test_write_mode_does_not_build_policy(self, tmp_path): 375 + provider = _google_provider() 376 + MockCLIRunner = self._mock_runner() 377 + with ( 378 + patch("think.providers.google.CLIRunner", MockCLIRunner), 379 + patch("think.providers.google.build_per_task_policy") as build_policy, 380 + ): 381 + asyncio.run( 382 + provider.run_cogitate( 383 + {"prompt": "hello", "model": "gemini-2.5-flash", "write": True}, 384 + lambda e: None, 385 + ) 386 + ) 387 + assert "--policy" not in MockCLIRunner.last_instance.cmd 388 + build_policy.assert_not_called() 356 389 357 390 def test_sandbox_none(self): 358 391 provider = _google_provider()
+155
think/cogitate_policy.py
··· 1 + # SPDX-License-Identifier: AGPL-3.0-only 2 + # Copyright (c) 2026 sol pbc 3 + 4 + from __future__ import annotations 5 + 6 + import os 7 + import re 8 + from datetime import date, datetime, timedelta 9 + from pathlib import Path 10 + from typing import Any 11 + 12 + import tomllib 13 + 14 + from think.utils import get_journal 15 + 16 + _BASE_POLICY_PATH = Path(__file__).parent / "policies" / "cogitate.toml" 17 + _POLICY_DIR = Path("/tmp/sol-cogitate-policies") 18 + _READ_TOOL_RULES = ( 19 + ("read_file", '"file_path":"', "strict"), 20 + ("list_directory", '"dir_path":"', "strict"), 21 + ("glob", '"(?:pattern|path)":"', "pattern"), 22 + ("grep_search", '"(?:path|dir_path|include|pattern)":"', "pattern"), 23 + ) 24 + 25 + 26 + def _normalize_day(day: date | str) -> str: 27 + if isinstance(day, date): 28 + return day.strftime("%Y%m%d") 29 + if day: 30 + return str(day) 31 + return datetime.now().strftime("%Y%m%d") 32 + 33 + 34 + def _day_value(day: str) -> date: 35 + return datetime.strptime(day, "%Y%m%d").date() 36 + 37 + 38 + def _expand_day_placeholders(value: str, day: str) -> str: 39 + base_day = _day_value(day) 40 + 41 + def replace(match: re.Match[str]) -> str: 42 + offset = int(match.group("offset") or 0) 43 + return (base_day - timedelta(days=offset)).strftime("%Y%m%d") 44 + 45 + return re.sub(r"<day(?:-(?P<offset>\d+))?>", replace, value) 46 + 47 + 48 + def resolve_read_scope( 49 + talent_config: dict[str, Any], 50 + day: date | str, 51 + span: int = 0, 52 + ) -> list[str]: 53 + day_str = _normalize_day(day) 54 + configured_scope = talent_config.get("read_scope") 55 + if configured_scope: 56 + return [ 57 + _expand_day_placeholders(str(scope), day_str) for scope in configured_scope 58 + ] 59 + 60 + effective_span = int(talent_config.get("read_scope_span", span or 0) or 0) 61 + if effective_span <= 0: 62 + return [f"chronicle/{day_str}"] 63 + 64 + base_day = _day_value(day_str) 65 + return [ 66 + f"chronicle/{(base_day - timedelta(days=offset)).strftime('%Y%m%d')}" 67 + for offset in range(effective_span, -1, -1) 68 + ] 69 + 70 + 71 + def _is_file_scope(scope: str) -> bool: 72 + clean = scope.rstrip("/") 73 + # Dotted basenames are files; ambiguous entries are treated as directories. 74 + return not scope.endswith("/") and "." in Path(clean).name 75 + 76 + 77 + def _scope_body(scope: str, suffix: str) -> str: 78 + clean = scope.strip("/") 79 + escaped = re.escape(clean) 80 + return f'{escaped}"' if _is_file_scope(scope) else f"{escaped}{suffix}" 81 + 82 + 83 + def _allowed_path_regex(scope: list[str], *, mode: str) -> str: 84 + journal_prefix = re.escape(str(get_journal()).rstrip("/") + "/") 85 + prefix = f"(?:{journal_prefix}|\\./)?" 86 + suffix = '(?:/|")' if mode == "strict" else '(?:/|[^"]*")' 87 + 88 + if all(not _is_file_scope(entry) for entry in scope): 89 + joined = "|".join(re.escape(entry.strip("/")) for entry in scope) 90 + return f"{prefix}(?:{joined}){suffix}" 91 + 92 + joined = "|".join(_scope_body(entry, suffix) for entry in scope) 93 + return f"{prefix}(?:{joined})" 94 + 95 + 96 + def _append_read_rules(base_text: str, scope: list[str]) -> str: 97 + chunks = [base_text.rstrip(), ""] 98 + for tool_name, key_pattern, mode in _READ_TOOL_RULES: 99 + path_pattern = _allowed_path_regex(scope, mode=mode) 100 + chunks.extend( 101 + [ 102 + "[[rule]]", 103 + f'toolName = "{tool_name}"', 104 + f"argsPattern = '{key_pattern}{path_pattern}'", 105 + 'decision = "allow"', 106 + "priority = 260", 107 + "", 108 + "[[rule]]", 109 + f'toolName = "{tool_name}"', 110 + 'decision = "deny"', 111 + "priority = 160", 112 + "", 113 + ] 114 + ) 115 + return "\n".join(chunks) 116 + 117 + 118 + def _policy_filename(talent_name: str, day: str) -> str: 119 + talent_slug = re.sub(r"[^A-Za-z0-9_.-]+", "-", talent_name).strip("-") 120 + if not talent_slug: 121 + talent_slug = "cogitate" 122 + return f"{talent_slug}-{day}-{os.getpid()}.toml" 123 + 124 + 125 + def _write_policy(content: str, path: Path) -> Path: 126 + flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY 127 + fd = os.open(path, flags, 0o600) 128 + with os.fdopen(fd, "w", encoding="utf-8") as handle: 129 + handle.write(content) 130 + return path 131 + 132 + 133 + def build_per_task_policy( 134 + talent_name: str, 135 + talent_config: dict[str, Any], 136 + day: date | str, 137 + span: int, 138 + base_policy_path: Path = _BASE_POLICY_PATH, 139 + ) -> Path: 140 + day_str = _normalize_day(day) 141 + scope = resolve_read_scope(talent_config, day_str, span=span) 142 + base_text = base_policy_path.read_text(encoding="utf-8") 143 + tomllib.loads(base_text) 144 + content = _append_read_rules(base_text, scope) 145 + 146 + _POLICY_DIR.mkdir(parents=True, exist_ok=True) 147 + filename = _policy_filename(talent_name, day_str) 148 + target = _POLICY_DIR / filename 149 + for attempt in range(100): 150 + path = target if attempt == 0 else target.with_stem(f"{target.stem}-{attempt}") 151 + try: 152 + return _write_policy(content, path) 153 + except FileExistsError: 154 + continue 155 + raise FileExistsError(f"Could not create unique cogitate policy path for {target}")
+50 -3
think/providers/cli.py
··· 33 33 34 34 _QUOTA_TOKENS = ("QUOTA_EXHAUSTED", "TerminalQuotaError") 35 35 _RETRY_DELAY_RE = re.compile(r'"?retryDelayMs"?\s*[:=]\s*"?([0-9]+(?:\.[0-9]+)?)') 36 + _READ_BUDGET_TOOL_NAMES: tuple[str, ...] = ( 37 + "read_file", 38 + "glob", 39 + "list_directory", 40 + "grep_search", 41 + ) 42 + _READ_BUDGET_TOOLS: frozenset[str] = frozenset(_READ_BUDGET_TOOL_NAMES) 36 43 37 44 38 45 class QuotaExhaustedError(Exception): ··· 114 121 system_instruction = f"{system_instruction}\n\n{hint}" 115 122 else: 116 123 system_instruction = hint 124 + if config.get("read_scope"): 125 + scope_hint = ( 126 + "Limit filesystem reads to today's segment dir unless the task explicitly requires broader history. " 127 + "If you need broader scope, state what and why in your reasoning." 128 + ) 129 + if system_instruction: 130 + system_instruction = f"{system_instruction}\n\n{scope_hint}" 131 + else: 132 + system_instruction = scope_hint 117 133 return prompt_body, system_instruction 118 134 119 135 ··· 208 224 env: Optional complete environment for the subprocess (used as-is, not merged). When None, inherits os.environ. 209 225 timeout: Subprocess timeout in seconds. Default 600. 210 226 first_event_timeout: Timeout for first stdout line in seconds. Default 90. 227 + read_call_budget: Optional combined budget for native read-tool calls. 211 228 """ 212 229 213 230 def __init__( ··· 224 241 env: dict[str, str] | None = None, 225 242 timeout: int = 600, 226 243 first_event_timeout: int = 90, 244 + read_call_budget: int | None = None, 227 245 ) -> None: 228 246 self.cmd = cmd 229 247 self.prompt_text = prompt_text ··· 237 255 self._timed_out_waiting_for_first_event = False 238 256 self._already_retried_first_event: bool = False 239 257 self._quota_error: QuotaExhaustedError | None = None 258 + self._read_call_budget = read_call_budget 259 + self._read_call_count = 0 260 + self._read_budget_tools = _READ_BUDGET_TOOLS 240 261 self.cli_session_id: str | None = None 241 262 242 263 async def run(self) -> str: ··· 490 511 if not process.stdout: 491 512 return 492 513 493 - def _process_line(raw_line: bytes) -> None: 514 + async def _process_line(raw_line: bytes) -> None: 494 515 line = raw_line.decode("utf-8", errors="replace").strip() 495 516 if not line: 496 517 return ··· 506 527 LOG.warning("Non-JSON stdout line: %s", line[:200]) 507 528 return 508 529 530 + tool_name = event_data.get("tool_name") 531 + if ( 532 + event_data.get("type") == "tool_use" 533 + and tool_name in self._read_budget_tools 534 + ): 535 + self._read_call_count += 1 536 + if ( 537 + self._read_call_budget is not None 538 + and self._read_call_count > self._read_call_budget 539 + ): 540 + self.callback.emit( 541 + { 542 + "event": "tool_budget_exhausted", 543 + "tool": tool_name, 544 + "budget": self._read_call_budget, 545 + "count": self._read_call_count, 546 + "read_tools": list(_READ_BUDGET_TOOL_NAMES), 547 + "ts": now_ms(), 548 + } 549 + ) 550 + await self._terminate_process_group(process) 551 + raise RuntimeError( 552 + "tool_budget_exhausted: read tool call budget exceeded " 553 + f"({self._read_call_count}/{self._read_call_budget})" 554 + ) 555 + 509 556 try: 510 557 session_id = self.translate(event_data, self.aggregator, self.callback) 511 558 if session_id: ··· 525 572 raise 526 573 if not first_line: 527 574 return 528 - _process_line(first_line) 575 + await _process_line(first_line) 529 576 530 577 while True: 531 578 try: ··· 548 595 continue 549 596 if not raw_line: 550 597 break 551 - _process_line(raw_line) 598 + await _process_line(raw_line) 552 599 553 600 def _write_timeout_log( 554 601 self,
+15 -4
think/providers/google.py
··· 40 40 from google import genai 41 41 from google.genai import types 42 42 43 + from think.cogitate_policy import build_per_task_policy 43 44 from think.models import GEMINI_FLASH 44 45 from think.utils import now_ms 45 46 ··· 65 66 66 67 # Backend detection cache 67 68 _detected_backend: str | None = None 68 - 69 - _COGITATE_POLICY_PATH = Path(__file__).parent.parent / "policies" / "cogitate.toml" 70 69 71 70 72 71 def _structured_to_google_contents( ··· 778 777 model, 779 778 "--sandbox=none", 780 779 ] 780 + policy_path = None 781 781 if not config.get("write"): 782 - cmd.extend(["--policy", str(_COGITATE_POLICY_PATH)]) 782 + policy_path = build_per_task_policy( 783 + config.get("name") or "cogitate", 784 + config, 785 + config.get("day") or "", 786 + int(config.get("read_scope_span", 0) or 0), 787 + ) 788 + cmd.extend(["--policy", str(policy_path)]) 783 789 784 790 # Resume from previous session if continuing 785 791 if session_id: ··· 804 810 aggregator=aggregator, 805 811 cwd=Path(cwd_value) if cwd_value else None, 806 812 env=build_cogitate_env("google"), 813 + read_call_budget=int(config.get("read_call_budget", 200)), 807 814 ) 808 815 809 - result = await runner.run() 816 + try: 817 + result = await runner.run() 818 + finally: 819 + if policy_path is not None: 820 + policy_path.unlink(missing_ok=True) 810 821 811 822 # Emit finish event (CLIRunner does not emit one) 812 823 finish_event: dict[str, Any] = {