A local-first private AI assistant for everyday use. Runs on-device models with encrypted P2P sync, and supports sharing chats publicly on ATProto.
10
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat: Added sync support for sessions

+181 -92
+23 -20
pkg/build.sh
··· 1 1 #!/usr/bin/env bash 2 - 3 2 set -euo pipefail 4 3 5 - VERSION=$(grep '^version' tiles/Cargo.toml | head -1 | awk -F'"' '{print $2}') 6 - 7 - TARGET="release" 4 + BINARY_NAME="tiles" 5 + DIST_DIR="dist" 8 6 MODELFILE_DIR="modelfiles" 9 7 SERVER_DIR="server" 10 - BINARY_NAME="tiles" 11 - MODELS_DIR="models" 8 + TARGET="release" 9 + 12 10 VERSION=$(grep '^version' tiles/Cargo.toml | head -1 | awk -F'"' '{print $2}') 13 11 OS=$(uname -s | tr '[:upper:]' '[:lower:]') 14 12 ARCH=$(uname -m) 13 + 15 14 OUT_NAME="${BINARY_NAME}-v${VERSION}-${ARCH}-${OS}" 16 15 17 16 echo "🚀 Building ${BINARY_NAME} (${TARGET} mode)..." 18 17 19 18 cargo build -p tiles --${TARGET} 20 19 21 - CLI_BIN_PATH="pkgroot/usr/local/bin" 22 - LIBS_PATH="pkgroot/usr/local/share/tiles" 20 + CLI_BIN_PATH="target/${TARGET}/${BINARY_NAME}" 23 21 24 - # CLI binary install path 22 + PKG_CLI_BIN_PATH="pkgroot/usr/local/bin" 23 + 24 + PKG_LIBS_PATH="pkgroot/usr/local/share/tiles" 25 25 26 + # CLI binary pkg install path 26 27 mkdir -p "${CLI_BIN_PATH}" 27 28 28 - mkdir -p "${LIBS_PATH}" 29 + # Other libs pkg install path 30 + mkdir -p "${PKG_LIBS_PATH}" 29 31 30 32 # move cli to bin path 31 33 32 - cp "target/${TARGET}/${BINARY_NAME}" "${CLI_BIN_PATH}" 33 - chmod +x "${CLI_BIN_PATH}/tiles" 34 + cp "${CLI_BIN_PATH}" "${PKG_CLI_BIN_PATH}" 35 + 36 + chmod +x "${PKG_CLI_BIN_PATH}/tiles" 34 37 35 38 # Signing the tiles binary 36 39 codesign --force \ ··· 38 41 --options runtime \ 39 42 --timestamp \ 40 43 --strict \ 41 - "${CLI_BIN_PATH}/tiles" 44 + "${PKG_CLI_BIN_PATH}/tiles" 42 45 43 46 # Build venvstack and move to /usr/local/share/tiles 44 47 # ··· 58 61 59 62 venvstacks publish --tag-outputs --output-dir ../stack_export_prod server/stack/venvstacks.toml 60 63 61 - cp -r "${SERVER_DIR}" "${LIBS_PATH}" 64 + cp -r "${SERVER_DIR}" "${PKG_LIBS_PATH}" 62 65 63 - rm -rf "${LIBS_PATH}/server/__pycache__" 64 - rm -rf "${LIBS_PATH}/server/mem_agent/__pycache__" 65 - rm -rf "${LIBS_PATH}/server/backend/__pycache__" 66 - rm -rf "${LIBS_PATH}/server/.venv" 67 - rm -rf "${LIBS_PATH}/server/stack" 66 + rm -rf "${PKG_LIBS_PATH}/server/__pycache__" 67 + rm -rf "${PKG_LIBS_PATH}/server/mem_agent/__pycache__" 68 + rm -rf "${PKG_LIBS_PATH}/server/backend/__pycache__" 69 + rm -rf "${PKG_LIBS_PATH}/server/.venv" 70 + rm -rf "${PKG_LIBS_PATH}/server/stack" 68 71 69 - cp -r "${MODELFILE_DIR}" "${LIBS_PATH}" 72 + cp -r "${MODELFILE_DIR}" "${PKG_LIBS_PATH}" 70 73 71 74 72 75 # Creating .pkg
+57 -20
scripts/bundler.sh
··· 2 2 set -euo pipefail 3 3 4 4 BINARY_NAME="tiles" 5 + # Folder where final tar.gz will be created 5 6 DIST_DIR="dist" 7 + # Folder where we store the modelfiles, which will be copied to installer 6 8 MODELFILE_DIR="modelfiles" 9 + # Py server folder, which will be copied to installer 7 10 SERVER_DIR="server" 11 + # cargo build mode for production 8 12 TARGET="release" 9 13 14 + # Fetching the tiles binary version from its cargo.toml version 10 15 VERSION=$(grep '^version' tiles/Cargo.toml | head -1 | awk -F'"' '{print $2}') 11 16 OS=$(uname -s | tr '[:upper:]' '[:lower:]') 12 17 ARCH=$(uname -m) 18 + 19 + # Final tar.gz name 13 20 OUT_NAME="${BINARY_NAME}-v${VERSION}-${ARCH}-${OS}" 14 21 15 22 echo "🚀 Building ${BINARY_NAME} (${TARGET} mode)..." 16 23 17 24 cargo build -p tiles --${TARGET} 18 25 26 + 27 + # Destination where the release build is generated 19 28 CLI_BIN_PATH="target/${TARGET}/${BINARY_NAME}" 20 29 21 30 chmod +x "${CLI_BIN_PATH}" 22 31 23 - # Signing the tiles binary 24 - codesign --force \ 25 - --sign "$DEVELOPER_ID_APPLICATION"\ 26 - --options runtime \ 27 - --timestamp \ 28 - --strict \ 29 - "${CLI_BIN_PATH}" 32 + # echo "Signing the Tiles binary..." 33 + 34 + # # Signing the tiles binary 35 + # codesign --force \ 36 + # --sign "$DEVELOPER_ID_APPLICATION"\ 37 + # --options runtime \ 38 + # --timestamp \ 39 + # --strict \ 40 + # "${CLI_BIN_PATH}" 41 + 42 + # # echo "Notarizing Tiles binary..." 30 43 31 - # notarizing 32 - xcrun notarytool submit --force "${CLI_BIN_PATH}" \ 33 - --keychain-profile "tiles-notary-profile" \ 34 - --wait 44 + # # notarizing the tiles binary 45 + # xcrun notarytool submit --force "${CLI_BIN_PATH}" \ 46 + # --keychain-profile "tiles-notary-profile" \ 47 + # --wait 35 48 36 - # rm -rf "${DIST_DIR}" 37 49 38 50 mkdir -p "${DIST_DIR}/tmp" 39 51 40 52 cp "${CLI_BIN_PATH}" "${DIST_DIR}/tmp/" 41 53 42 - # copying pi binary 54 + echo "Embedding Pi" 55 + # Copying pi artifacts into extracted pi folder 43 56 cp pi-darwin-arm64.tar.gz "${DIST_DIR}/tmp/" 44 57 58 + tar -xvf "${DIST_DIR}/tmp/pi-darwin-arm64.tar.gz" -C "${DIST_DIR}/tmp" 59 + 60 + rm "${DIST_DIR}/tmp/pi-darwin-arm64.tar.gz" 61 + 62 + # removing unnecessary files 63 + rm -rf "${DIST_DIR}/tmp/pi/examples" 64 + 65 + # Signing the pi binary 66 + # codesign --force \ 67 + # --sign "$DEVELOPER_ID_APPLICATION"\ 68 + # --options runtime \ 69 + # --timestamp \ 70 + # --strict \ 71 + # "${DIST_DIR}/tmp/pi/pi" 72 + 73 + # echo "Notarizing Pibinary..." 74 + 75 + # # notarizing the pi binary 76 + # xcrun notarytool submit --force "${DIST_DIR}/tmp/pi/pi" \ 77 + # --keychain-profile "tiles-notary-profile" \ 78 + # --wait 79 + 45 80 # flushing this folder, else the final zip will have previous app-server zips too (#84) 46 - rm -rf "${SERVER_DIR}/stack_export_prod" 81 + # rm -rf "${SERVER_DIR}/stack_export_prod" 47 82 48 - echo "🔒 Locking the venvstack...." 83 + # echo "🔒 Locking the venvstack...." 49 84 50 - venvstacks lock server/stack/venvstacks.toml 85 + # venvstacks lock server/stack/venvstacks.toml 51 86 52 - echo "🛠️ Building the venvstack...." 87 + # echo "🛠️ Building the venvstack...." 53 88 54 - venvstacks build server/stack/venvstacks.toml 89 + # venvstacks build server/stack/venvstacks.toml 55 90 56 - echo "📦 Publishing the venvstack...." 91 + # echo "📦 Publishing the venvstack...." 57 92 58 - venvstacks publish --tag-outputs --output-dir ../stack_export_prod server/stack/venvstacks.toml 93 + # venvstacks publish --tag-outputs --output-dir ../stack_export_prod server/stack/venvstacks.toml 59 94 60 95 cp -r "${SERVER_DIR}" "${DIST_DIR}/tmp/" 61 96 62 97 rm -rf "${DIST_DIR}/tmp/server/__pycache__" 98 + rm -rf "${DIST_DIR}/tmp/server/mem_agent/__pycache__" 99 + rm -rf "${DIST_DIR}/tmp/server/backend/__pycache__" 63 100 rm -rf "${DIST_DIR}/tmp/server/.venv" 64 101 rm -rf "${DIST_DIR}/tmp/server/stack" 65 102
+2 -2
scripts/install.sh
··· 3 3 4 4 ENV="prod" # prod is another env, try taking it from github env 5 5 REPO="tilesprivacy/tiles" 6 - # VERSION=$(grep '^version' tiles/Cargo.toml | head -1 | awk -F'"' '{print $2}') 7 6 8 - VERSION="0.4.7" 7 + VERSION="0.4.8" 8 + 9 9 INSTALL_DIR="/usr/local/bin" # CLI install location 10 10 11 11 SERVER_DIR="/usr/local/share/tiles/server" # Python server folder
+98 -34
tiles/src/core/chats.rs
··· 3 3 //! Stuff related to chats with the models 4 4 //! 5 5 6 + use std::collections::HashMap; 6 7 use std::str::FromStr; 7 8 8 9 use crate::core::accounts::User; ··· 37 38 pub content: String, 38 39 } 39 40 40 - #[derive(Debug, serde::Serialize, serde::Deserialize)] 41 + #[derive(Debug, serde::Serialize, serde::Deserialize, Clone)] 41 42 pub struct Chats { 42 43 pub id: String, 43 44 content: String, ··· 54 55 session_id: String, 55 56 } 56 57 57 - #[derive(Debug, serde::Serialize, serde::Deserialize)] 58 + #[derive(Debug, serde::Serialize, serde::Deserialize, Clone)] 58 59 pub struct Session { 59 60 pub id: String, 60 61 name: String, ··· 79 80 }, 80 81 } 81 82 83 + #[derive(serde::Serialize, serde::Deserialize, Clone)] 84 + pub struct DeltaChat { 85 + pub chats: Vec<Chats>, 86 + pub sessions: Vec<Session>, 87 + } 88 + 82 89 pub fn save_chat(conn: &Connection, user: &User, chat_resp: ChatResponse) -> Result<Chats> { 83 90 let row_counter = get_last_row_counter(conn, &user.user_id)?; 84 91 let chat = Chats { ··· 128 135 Err(err) => Err(<rusqlite::Error as Into<anyhow::Error>>::into(err)), 129 136 } 130 137 } 131 - /// Return list of rows for the given `user_id` since `last_row_counter` 132 - pub fn get_delta(conn: &Connection, user_id: &str, last_row_couter: i64) -> Result<Vec<Chats>> { 138 + /// Return a Delta of chats and sessions for the given `user_id` since `last_row_counter` 139 + pub fn get_delta(conn: &Connection, user_id: &str, last_row_couter: i64) -> Result<DeltaChat> { 140 + let mut session_map: HashMap<String, Session> = HashMap::new(); 141 + 133 142 let mut stmt = conn.prepare("select id, user_id, content, resp_id, role, context_id, created_at, updated_at , row_counter, session_id from chats where user_id = ?1 and row_counter > ?2 order by id")?; 134 143 135 144 let chat_rows = stmt.query_map(params![user_id, last_row_couter], |row| { ··· 139 148 let updated_at: f64 = row.get(7)?; 140 149 let resp_id: Option<String> = row.get(3)?; 141 150 let ctx_id = row.get(5)?; 151 + let session_id: String = row.get(9)?; 152 + 153 + if session_id.len() > 0 && !session_map.contains_key(&session_id) { 154 + // lets fetch the session details 155 + match fetch_session(conn, &session_id) { 156 + Ok(session) => { 157 + // lets add to the map 158 + session_map.insert(session_id.clone(), session); 159 + } 160 + Err(err) => { 161 + warn!("Fetching session {} failed due to {:?}", &session_id, err); 162 + } 163 + } 164 + } 142 165 Ok(Chats { 143 166 id, 144 167 content: row.get(2)?, ··· 149 172 created_at: created_at as u64, 150 173 updated_at: updated_at as u64, 151 174 row_counter: row.get(8)?, 152 - session_id: row.get(9)?, 175 + session_id, 153 176 }) 154 177 })?; 155 178 ··· 159 182 chats.push(chat?); 160 183 } 161 184 162 - Ok(chats) 185 + let sessions: Vec<Session> = session_map.into_values().collect(); 186 + 187 + Ok(DeltaChat { 188 + chats: chats, 189 + sessions: sessions, 190 + }) 163 191 } 164 192 165 - pub fn apply_delta(chat_conn: &mut Connection, delta_chats: &Vec<Chats>) -> Result<()> { 193 + pub fn apply_delta(chat_conn: &mut Connection, delta_chats: DeltaChat) -> Result<()> { 166 194 // TODO: Handle primary key conflict, for now reject it (in a way its impossible to have this scenario, and if its occuring then that means 167 195 // some issue in syncing, so ignore it, by rejecting it), later 168 196 // do LWW based on issuer of UCAN 169 197 // 170 198 199 + let chats = delta_chats.chats; 200 + let sessions = delta_chats.sessions; 171 201 let txn = chat_conn.transaction()?; 172 202 { 173 203 let mut stmt = txn.prepare("insert into chats(id, user_id, content, resp_id, role, context_id, created_at, updated_at, row_counter, session_id) values (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)")?; 174 204 175 - for chat in delta_chats { 205 + for chat in chats { 176 206 match stmt.execute(params![ 177 207 &chat.id.to_string(), 178 208 &chat.user_id, ··· 206 236 Ok(_) => (), 207 237 } 208 238 } 239 + 240 + // session metadata sync 241 + let mut session_stmt = txn.prepare( 242 + "insert into sessions(id, name, created_at, creator_id) values (?1, ?2, ?3, ?4)", 243 + )?; 244 + 245 + for session in sessions { 246 + match session_stmt.execute(params![ 247 + &session.id.to_string(), 248 + &session.name, 249 + &session.created_at.to_string(), 250 + &session.creator_id 251 + ]) { 252 + Err(rusqlite::Error::SqliteFailure(_, Some(reason))) 253 + if reason == "UNIQUE constraint failed: sessions.id" => 254 + { 255 + log::error!( 256 + "err in writing row {:?}, already exists, skipping", 257 + &session.id 258 + ); 259 + } 260 + // NOTE: If any other error occurs and write failed we abort the sync, so the the row_counter doesn't get skipped. 261 + // use RUST_LOG=error tiles to debug the issue 262 + Err(err) => { 263 + log::error!( 264 + "err in writing row due to {:?}, Aborting the sync during session sync....", 265 + err 266 + ); 267 + break; 268 + } 269 + 270 + Ok(_) => (), 271 + } 272 + } 209 273 } 210 274 txn.commit()?; 211 275 ··· 245 309 } 246 310 SyncOp::ApplyDelta { delta, resp } => { 247 311 let chat_rows = decode_delta_from_bytes(&delta)?; 248 - let apply_res = apply_delta(&mut chat_db_conn, &chat_rows); 312 + let apply_res = apply_delta(&mut chat_db_conn, chat_rows); 249 313 resp.send(apply_res) 250 314 .map_err(|_| anyhow!("Error sending apply delta response"))?; 251 315 } ··· 299 363 )?; 300 364 Ok(sesh) 301 365 } 302 - fn encode_delta_to_bytes(delta_chats: &Vec<Chats>) -> Vec<u8> { 366 + fn encode_delta_to_bytes(delta_chats: &DeltaChat) -> Vec<u8> { 303 367 postcard::to_stdvec(delta_chats).expect("Failed to convert to bytes with postcard") 304 368 } 305 369 306 - fn decode_delta_from_bytes(bytes: &[u8]) -> Result<Vec<Chats>> { 370 + fn decode_delta_from_bytes(bytes: &[u8]) -> Result<DeltaChat> { 307 371 postcard::from_bytes(bytes).map_err(Into::into) 308 372 } 309 373 ··· 495 559 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 496 560 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 497 561 498 - let rows = get_delta(&conn, &user.user_id, chat_1.row_counter).unwrap(); 499 - assert_eq!(rows.len(), 3); 562 + let delta = get_delta(&conn, &user.user_id, chat_1.row_counter).unwrap(); 563 + assert_eq!(delta.chats.len(), 3); 500 564 } 501 565 502 566 #[test] ··· 519 583 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 520 584 521 585 let rows = get_delta(&conn, &user.user_id, 0).unwrap(); 522 - assert_eq!(rows.len(), 4); 586 + assert_eq!(rows.chats.len(), 4); 523 587 } 524 588 525 589 #[test] ··· 542 606 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 543 607 544 608 let rows = get_delta(&conn, "", 0).unwrap(); 545 - assert_eq!(rows.len(), 0); 609 + assert_eq!(rows.chats.len(), 0); 546 610 } 547 611 548 612 #[test] ··· 566 630 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 567 631 568 632 let rows = get_delta(&conn, &user.user_id, 0).unwrap(); 569 - assert_eq!(rows.len(), 4); 570 - assert!(apply_delta(&mut conn_2, &rows).is_ok()); 633 + assert_eq!(rows.chats.len(), 4); 634 + assert!(apply_delta(&mut conn_2, rows).is_ok()); 571 635 let rows = get_delta(&conn_2, &user.user_id, 0).unwrap(); 572 - assert_eq!(rows.len(), 4); 636 + assert_eq!(rows.chats.len(), 4); 573 637 } 574 638 575 639 #[test] ··· 593 657 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 594 658 595 659 let rows = get_delta(&conn, &user.user_id, 0).unwrap(); 596 - assert_eq!(rows.len(), 4); 660 + assert_eq!(rows.chats.len(), 4); 597 661 let chat_bytes = encode_delta_to_bytes(&rows); 598 662 let decoded_chat = decode_delta_from_bytes(&chat_bytes).unwrap(); 599 - assert!(apply_delta(&mut conn_2, &decoded_chat).is_ok()); 663 + assert!(apply_delta(&mut conn_2, decoded_chat).is_ok()); 600 664 let rows = get_delta(&conn_2, &user.user_id, 0).unwrap(); 601 - assert_eq!(rows.len(), 4); 665 + assert_eq!(rows.chats.len(), 4); 602 666 } 603 667 604 668 #[test] ··· 622 686 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 623 687 624 688 let rows = get_delta(&conn, &user.user_id, 4).unwrap(); 625 - assert_eq!(rows.len(), 0); 689 + assert_eq!(rows.chats.len(), 0); 626 690 let chat_bytes = encode_delta_to_bytes(&rows); 627 691 let decoded_chat = decode_delta_from_bytes(&chat_bytes).unwrap(); 628 - assert!(apply_delta(&mut conn_2, &decoded_chat).is_ok()); 692 + assert!(apply_delta(&mut conn_2, decoded_chat).is_ok()); 629 693 let rows = get_delta(&conn_2, &user.user_id, 0).unwrap(); 630 - assert_eq!(rows.len(), 0); 694 + assert_eq!(rows.chats.len(), 0); 631 695 } 632 696 633 697 #[test] ··· 650 714 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 651 715 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 652 716 let rows = get_delta(&conn, &user.user_id, chat_1.row_counter).unwrap(); 653 - assert_eq!(rows.len(), 3); 717 + assert_eq!(rows.chats.len(), 3); 654 718 } 655 719 656 720 #[test] ··· 675 739 let _ = save_chat(&conn, &user, chat_response.clone()).expect("chat should be saved"); 676 740 677 741 let rows = get_delta(&conn, &user.user_id, 0).unwrap(); 678 - assert_eq!(rows.len(), 4); 742 + assert_eq!(rows.chats.len(), 4); 679 743 let chat_bytes = encode_delta_to_bytes(&rows); 680 744 let decoded_chat = decode_delta_from_bytes(&chat_bytes).unwrap(); 681 - assert!(apply_delta(&mut conn_2, &decoded_chat).is_ok()); 745 + assert!(apply_delta(&mut conn_2, decoded_chat.clone()).is_ok()); 682 746 let rows = get_delta(&conn_2, &user.user_id, 0).unwrap(); 683 - assert_eq!(rows.len(), 4); 684 - assert!(apply_delta(&mut conn_2, &decoded_chat).is_ok()); 747 + assert_eq!(rows.chats.len(), 4); 748 + assert!(apply_delta(&mut conn_2, decoded_chat).is_ok()); 685 749 let rows = get_delta(&conn_2, &user.user_id, 0).unwrap(); 686 - assert_eq!(rows.len(), 4); 750 + assert_eq!(rows.chats.len(), 4); 687 751 } 688 752 689 753 #[test] ··· 739 803 let user_bs_diff_rows = 740 804 get_delta(&conn_2, &user_b.user_id, user_b_last_entry_of_user_a).unwrap(); 741 805 742 - assert_eq!(user_bs_diff_rows.len(), 4); 806 + assert_eq!(user_bs_diff_rows.chats.len(), 4); 743 807 744 808 // user_bs diff is encoded 745 809 let user_b_chat_bytes = encode_delta_to_bytes(&user_bs_diff_rows); ··· 748 812 let user_b_decoded_chat = decode_delta_from_bytes(&user_b_chat_bytes).unwrap(); 749 813 750 814 // Now user_a is gonna apply the user_b diff 751 - assert!(apply_delta(&mut conn, &user_b_decoded_chat).is_ok()); 815 + assert!(apply_delta(&mut conn, user_b_decoded_chat).is_ok()); 752 816 753 817 // Just checking if we user_a has all 8 rows 754 818 ··· 768 832 let user_as_diff_rows = 769 833 get_delta(&conn, &user_a.user_id, user_a_last_entry_of_user_b).unwrap(); 770 834 771 - assert_eq!(user_as_diff_rows.len(), 4); 835 + assert_eq!(user_as_diff_rows.chats.len(), 4); 772 836 773 837 // user_as diff is encoded 774 838 let user_a_chat_bytes = encode_delta_to_bytes(&user_as_diff_rows); ··· 777 841 let user_a_decoded_chat = decode_delta_from_bytes(&user_a_chat_bytes).unwrap(); 778 842 779 843 // Now user_b is gonna apply the user_b diff 780 - assert!(apply_delta(&mut conn_2, &user_a_decoded_chat).is_ok()); 844 + assert!(apply_delta(&mut conn_2, user_a_decoded_chat).is_ok()); 781 845 782 846 // Just checking eventual consistency 783 847
-10
tiles/src/daemon.rs
··· 229 229 230 230 #[tokio::test] 231 231 #[serial] 232 - async fn test_sever_process_started_not_server() -> Result<()> { 233 - tokio::spawn(async move { 234 - let _ = start_server(None).await; 235 - }); 236 - assert!(ping(None).await.is_err()); 237 - stop_server(None).await 238 - } 239 - 240 - #[tokio::test] 241 - #[serial] 242 232 async fn test_sever_process_and_server_started() -> Result<()> { 243 233 tokio::spawn(async move { 244 234 let _ = start_server(None).await;
+1 -5
tiles/src/runtime/mlx.rs
··· 450 450 let response: PiResponse = serde_json::from_str(&line)?; 451 451 452 452 match response { 453 - PiResponse::AgentStart => { 454 - info!("\nAgent start\n"); 455 - } 453 + PiResponse::AgentStart => {} 456 454 PiResponse::MessageUpdate(msg_update) => { 457 455 if msg_update.assistant_message_event.r#type == "text_delta" 458 456 && msg_update.assistant_message_event.delta.is_some() ··· 465 463 } 466 464 } 467 465 PiResponse::AgentEnd => { 468 - info!("\nAgent End\n"); 469 466 break; 470 467 } 471 468 PiResponse::TurnEnd(turn_event) => { 472 - info!("\nTurn end\n"); 473 469 session_turn_count += 1; 474 470 475 471 // on agent end create a new session entry, only for the
-1
tiles/src/utils/config.rs
··· 463 463 } 464 464 } 465 465 466 - //TODO: Add more tests for config.toml 467 466 #[cfg(test)] 468 467 mod tests { 469 468