ive harnessed the harness
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

add auto model support

dawn da2b58ac e540f820

+211 -37
+1
.gitignore
··· 1 1 /target 2 2 /.direnv 3 3 /agent.db 4 + /.codex
+66 -21
klbr-core/src/agent.rs
··· 59 59 Interrupt::UserMessage(ref text) => { 60 60 let source = interrupt.source_tag().to_string(); 61 61 62 - let memories: Vec<String> = llm 63 - .embed(text) 64 - .await 65 - .ok() 66 - .and_then(|emb| { 67 - memory 68 - .recall(Some(&emb), &[], false, config.memory_top_k) 69 - .ok() 70 - }) 71 - .unwrap_or_default() 72 - .into_iter() 73 - .filter(|e| e.distance.unwrap_or(f32::MAX) < config.memory_sim_threshold) 74 - .map(|e| e.content) 75 - .collect(); 62 + let memories: Vec<String> = match llm.embed(text).await { 63 + Ok(emb) => match memory.recall(Some(&emb), &[], false, config.memory_top_k) { 64 + Ok(results) => results 65 + .into_iter() 66 + .filter(|e| { 67 + e.distance.unwrap_or(f32::MAX) < config.memory_sim_threshold 68 + }) 69 + .map(|e| e.content) 70 + .collect(), 71 + Err(e) => { 72 + let msg = format!("memory recall failed: {e}"); 73 + tracing::error!(%msg); 74 + let _ = output.send(AgentEvent::Error(msg)); 75 + vec![] 76 + } 77 + }, 78 + Err(e) => { 79 + let msg = format!("query embedding failed: {e}"); 80 + tracing::error!(%msg); 81 + let _ = output.send(AgentEvent::Error(msg)); 82 + vec![] 83 + } 84 + }; 76 85 77 86 if !memories.is_empty() { 78 87 let _ = output.send(AgentEvent::Status(format!( ··· 101 110 let llm2 = llm.clone(); 102 111 let msgs = ctx.as_messages(); 103 112 let defs = registry.definitions(); 104 - tokio::spawn(async move { 105 - let _ = llm2.stream(&msgs, &defs, tok_tx).await; 106 - }); 113 + let stream_task = tokio::spawn(async move { llm2.stream(&msgs, &defs, tok_tx).await }); 107 114 108 115 let mut response = String::new(); 109 116 let mut thinking = String::new(); 110 117 let mut tool_calls = vec![]; 118 + let mut stream_error = None::<String>; 111 119 112 120 while let Some(ev) = tok_rx.recv().await { 113 121 match ev { ··· 128 136 } 129 137 } 130 138 } 139 + match stream_task.await { 140 + Ok(Ok(())) => {} 141 + Ok(Err(e)) => { 142 + let msg = format!("llm stream failed: {e}"); 143 + tracing::error!(%msg); 144 + let _ = output.send(AgentEvent::Error(msg.clone())); 145 + let _ = output.send(AgentEvent::Status("llm stream failed".into())); 146 + stream_error = Some(msg); 147 + } 148 + Err(e) => { 149 + let msg = format!("llm stream task panicked/cancelled: {e}"); 150 + tracing::error!(%msg); 151 + let _ = output.send(AgentEvent::Error(msg.clone())); 152 + let _ = output.send(AgentEvent::Status("llm stream task failed".into())); 153 + stream_error = Some(msg); 154 + } 155 + } 131 156 132 157 if !tool_calls.is_empty() && tool_iterations < MAX_TOOL_ITERATIONS { 133 158 tool_iterations += 1; ··· 156 181 ctx.push_tool_result(&call.id, &result); 157 182 } 158 183 continue; 184 + } 185 + 186 + if stream_error.is_some() && response.is_empty() && tool_calls.is_empty() { 187 + let _ = output.send(AgentEvent::Done); 188 + break; 159 189 } 160 190 161 191 // plain text response (or tool limit hit) — wrap up the turn ··· 314 344 let llm2 = tool_ctx.llm.clone(); 315 345 let msgs_snap = msgs.clone(); 316 346 let defs_snap = reflect_registry.definitions(); 317 - tokio::spawn(async move { 318 - let _ = llm2.stream(&msgs_snap, &defs_snap, tok_tx).await; 319 - }); 347 + let stream_task = 348 + tokio::spawn(async move { llm2.stream(&msgs_snap, &defs_snap, tok_tx).await }); 320 349 321 350 let mut tool_calls = vec![]; 351 + let mut stream_failed = false; 322 352 while let Some(ev) = tok_rx.recv().await { 323 353 match ev { 324 354 LlmEvent::ToolCalls(calls) => tool_calls = calls, ··· 331 361 _ => {} 332 362 } 333 363 } 364 + match stream_task.await { 365 + Ok(Ok(())) => {} 366 + Ok(Err(e)) => { 367 + let msg = format!("reflection llm stream failed: {e}"); 368 + tracing::error!(%msg); 369 + let _ = output.send(AgentEvent::Error(msg)); 370 + stream_failed = true; 371 + } 372 + Err(e) => { 373 + let msg = format!("reflection llm stream task panicked/cancelled: {e}"); 374 + tracing::error!(%msg); 375 + let _ = output.send(AgentEvent::Error(msg)); 376 + stream_failed = true; 377 + } 378 + } 334 379 335 - if tool_calls.is_empty() { 380 + if tool_calls.is_empty() || stream_failed { 336 381 break; 337 382 } 338 383
+9 -7
klbr-core/src/config.rs
··· 2 2 pub struct Config { 3 3 /// llama-server base url 4 4 pub llm_url: String, 5 - /// embedding server base url 5 + /// embedding server base url (typically a separate process from chat model serving) 6 6 pub embed_url: String, 7 + /// chat/completions model id. set to "auto" to pick from `GET /v1/models`. 7 8 pub llm_model: String, 9 + /// embeddings model id. keep this explicit to avoid accidental model/dimension drift. 8 10 pub embed_model: String, 9 11 pub watermark_tokens: usize, 10 12 /// how many recent turns to preserve during compaction ··· 12 14 /// memories to inject per turn 13 15 pub memory_top_k: usize, 14 16 /// cosine distance cutoff — only inject memories below this (0=identical, 2=opposite). 15 - /// 0.3 ≈ cosine similarity ≥ 0.7, a reasonable bar for nomic-embed. 17 + /// 0.3 ≈ cosine similarity ≥ 0.7, a reasonable default for bge-m3. 16 18 pub memory_sim_threshold: f32, 17 19 pub db_path: String, 18 20 pub anchor: String, ··· 51 53 impl Default for Config { 52 54 fn default() -> Self { 53 55 Self { 54 - llm_url: "http://localhost:1234".into(), 55 - embed_url: "http://localhost:1234".into(), 56 - llm_model: "google/gemma-4-26b-a4b".into(), 57 - embed_model: "nomic-embed-text-v1.5".into(), 56 + llm_url: "http://localhost:8001".into(), 57 + embed_url: "http://localhost:8002".into(), 58 + llm_model: "auto".into(), 59 + embed_model: "auto".into(), 58 60 watermark_tokens: 32_000, 59 61 compaction_keep: 10, 60 62 memory_top_k: 3, 61 63 memory_sim_threshold: 0.3, 62 64 db_path: "agent.db".into(), 63 65 anchor: ANCHOR.into(), 64 - embed_dim: 768, 66 + embed_dim: 1024, 65 67 } 66 68 } 67 69 }
+1
klbr-core/src/lib.rs
··· 28 28 ReflectStarted, 29 29 /// reflection loop finished 30 30 ReflectDone, 31 + Error(String), 31 32 Status(String), 32 33 Metrics(AgentMetrics), 33 34 ToolCall {
+118 -8
klbr-core/src/llm.rs
··· 157 157 } 158 158 } 159 159 160 + fn is_auto_model(name: &str) -> bool { 161 + let normalized = name.trim(); 162 + normalized.is_empty() || normalized.eq_ignore_ascii_case("auto") 163 + } 164 + 165 + fn endpoint(base_url: &str, path: &str) -> String { 166 + format!("{}/{}", base_url.trim_end_matches('/'), path) 167 + } 168 + 169 + async fn fetch_models(&self, base_url: &str) -> Result<Vec<String>> { 170 + let url = Self::endpoint(base_url, "v1/models"); 171 + let v = self 172 + .client 173 + .get(url) 174 + .send() 175 + .await? 176 + .error_for_status()? 177 + .json::<Value>() 178 + .await?; 179 + 180 + let ids = v["data"] 181 + .as_array() 182 + .map(|arr| { 183 + arr.iter() 184 + .filter_map(|m| m["id"].as_str().map(ToString::to_string)) 185 + .collect::<Vec<_>>() 186 + }) 187 + .unwrap_or_default(); 188 + Ok(ids) 189 + } 190 + 191 + fn looks_like_embedding_model(id: &str) -> bool { 192 + let n = id.to_ascii_lowercase(); 193 + [ 194 + "embed", 195 + "embedding", 196 + "text-embedding", 197 + "nomic", 198 + "bge", 199 + "e5", 200 + "gte", 201 + "mxbai", 202 + "minilm", 203 + "jina-emb", 204 + ] 205 + .iter() 206 + .any(|needle| n.contains(needle)) 207 + } 208 + 209 + fn choose_auto_model(candidates: &[String], for_embedding: bool) -> Option<String> { 210 + if candidates.is_empty() { 211 + return None; 212 + } 213 + 214 + if for_embedding { 215 + if let Some(id) = candidates 216 + .iter() 217 + .find(|id| Self::looks_like_embedding_model(id)) 218 + { 219 + return Some(id.clone()); 220 + } 221 + } else if let Some(id) = candidates 222 + .iter() 223 + .find(|id| !Self::looks_like_embedding_model(id)) 224 + { 225 + return Some(id.clone()); 226 + } 227 + 228 + Some(candidates[0].clone()) 229 + } 230 + 231 + async fn resolve_model( 232 + &self, 233 + base_url: &str, 234 + configured: &str, 235 + for_embedding: bool, 236 + ) -> Result<String> { 237 + if !Self::is_auto_model(configured) { 238 + return Ok(configured.to_string()); 239 + } 240 + 241 + let models = self.fetch_models(base_url).await?; 242 + let selected = Self::choose_auto_model(&models, for_embedding) 243 + .ok_or_else(|| anyhow::anyhow!("no models returned from {base_url}/v1/models"))?; 244 + Ok(selected) 245 + } 246 + 160 247 pub async fn stream( 161 248 &self, 162 249 messages: &[Message], 163 250 tools: &[ToolDef], 164 251 tok_tx: mpsc::Sender<LlmEvent>, 165 252 ) -> Result<()> { 253 + let chat_model = self 254 + .resolve_model(&self.config.llm_url, &self.config.llm_model, false) 255 + .await?; 256 + 166 257 let mut body = json!({ 167 - "model": self.config.llm_model, 258 + "model": chat_model, 168 259 "messages": messages, 169 260 "stream": true, 170 261 "stream_options": { "include_usage": true } ··· 176 267 177 268 let mut res = self 178 269 .client 179 - .post(format!("{}/v1/chat/completions", self.config.llm_url)) 270 + .post(Self::endpoint(&self.config.llm_url, "v1/chat/completions")) 180 271 .json(&body) 181 272 .send() 182 273 .await? ··· 280 371 281 372 /// non-streaming completion, used for compaction summaries (no tools) 282 373 pub async fn complete(&self, messages: &[Message]) -> Result<(String, Usage)> { 374 + let chat_model = self 375 + .resolve_model(&self.config.llm_url, &self.config.llm_model, false) 376 + .await?; 377 + 283 378 let body = json!({ 284 - "model": self.config.llm_model, 379 + "model": chat_model, 285 380 "messages": messages, 286 381 "stream": false, 287 382 }); 288 383 let v = self 289 384 .client 290 - .post(format!("{}/v1/chat/completions", self.config.llm_url)) 385 + .post(Self::endpoint(&self.config.llm_url, "v1/chat/completions")) 291 386 .json(&body) 292 387 .send() 293 388 .await? ··· 302 397 } 303 398 304 399 pub async fn embed(&self, text: &str) -> Result<Vec<f32>> { 305 - let body = json!({ "model": self.config.embed_model, "input": text }); 400 + let embed_model = self 401 + .resolve_model(&self.config.embed_url, &self.config.embed_model, true) 402 + .await?; 403 + 404 + let body = json!({ "model": embed_model, "input": text }); 306 405 let v = self 307 406 .client 308 - .post(format!("{}/v1/embeddings", self.config.embed_url)) 407 + .post(Self::endpoint(&self.config.embed_url, "v1/embeddings")) 309 408 .json(&body) 310 409 .send() 311 410 .await? 312 411 .json::<Value>() 313 412 .await?; 314 - v["data"][0]["embedding"] 413 + let embedding: Vec<f32> = v["data"][0]["embedding"] 315 414 .as_array() 316 415 .ok_or_else(|| anyhow::anyhow!("no embedding in response"))? 317 416 .iter() 318 417 .map(|x| Ok(x.as_f64().unwrap_or(0.0) as f32)) 319 - .collect() 418 + .collect::<Result<Vec<f32>>>()?; 419 + 420 + if embedding.len() != self.config.embed_dim { 421 + return Err(anyhow::anyhow!( 422 + "embedding dimension mismatch: got {}, expected {} (model: {})", 423 + embedding.len(), 424 + self.config.embed_dim, 425 + embed_model 426 + )); 427 + } 428 + 429 + Ok(embedding) 320 430 } 321 431 }
+4
klbr-daemon/src/daemon.rs
··· 155 155 AgentEvent::Done => ServerMsg::Done, 156 156 AgentEvent::ReflectStarted => ServerMsg::ReflectStarted, 157 157 AgentEvent::ReflectDone => ServerMsg::ReflectDone, 158 + AgentEvent::Error(content) => { 159 + tracing::error!(%content, "agent error"); 160 + ServerMsg::Error { content } 161 + } 158 162 AgentEvent::Status(content) => ServerMsg::Status { content }, 159 163 AgentEvent::Metrics(m) => ServerMsg::Metrics { 160 164 turn_count: m.turn_count,
+3
klbr-ipc/src/lib.rs
··· 36 36 Done, 37 37 ReflectStarted, 38 38 ReflectDone, 39 + Error { 40 + content: String, 41 + }, 39 42 Status { 40 43 content: String, 41 44 },
+9 -1
klbr-tui/src/main.rs
··· 826 826 *step = AssistantStep::Done; 827 827 } 828 828 } 829 - app.status.clear(); 829 + if !app.status.starts_with("error:") { 830 + app.status.clear(); 831 + } 830 832 app.stream_start = None; 833 + } 834 + ServerMsg::Error { content } => { 835 + app.status = format!("error: {content}"); 836 + app.history 837 + .push(ChatMsg::system(format!("error: {content}"))); 838 + app.snap_to_bottom(); 831 839 } 832 840 ServerMsg::Status { content } => { 833 841 app.status = content;