diff --git a/examples/localcowork/.env.example b/examples/localcowork/.env.example
index 16e64bd..b18bd1e 100644
--- a/examples/localcowork/.env.example
+++ b/examples/localcowork/.env.example
@@ -11,6 +11,7 @@
# Text model API endpoint (OpenAI-compatible). Set by start-model.sh.
# Default when using LFM2 via llama-server: http://localhost:8080/v1
# Default when using Ollama: http://localhost:11434/v1
+# Default when using LM Studio: http://localhost:1234/v1
# LOCALCOWORK_MODEL_ENDPOINT=http://localhost:8080/v1
# Vision model endpoint (for AI-powered OCR — optional, falls back to Tesseract)
diff --git a/examples/localcowork/.git-hooks/pre-commit b/examples/localcowork/.git-hooks/pre-commit
index d97f222..02cbe62 100755
--- a/examples/localcowork/.git-hooks/pre-commit
+++ b/examples/localcowork/.git-hooks/pre-commit
@@ -28,6 +28,20 @@ fi
WARNINGS=()
+# ── Check 0: Shell scripts pass shellcheck ───────────────────────────────────
+
+for file in $STAGED_FILES; do
+ case "$file" in
+ *.sh)
+ if command -v shellcheck >/dev/null 2>&1; then
+ if ! shellcheck -s bash "$file" >/dev/null 2>&1; then
+ WARNINGS+=("shellcheck failed for $file (run: shellcheck -s bash $file)")
+ fi
+ fi
+ ;;
+ esac
+done
+
# ── Check 1: Source files changed but PROGRESS.yaml not staged ──────────────
HAS_SOURCE_CHANGES=false
diff --git a/examples/localcowork/.github/workflows/shellcheck.yml b/examples/localcowork/.github/workflows/shellcheck.yml
new file mode 100644
index 0000000..1738149
--- /dev/null
+++ b/examples/localcowork/.github/workflows/shellcheck.yml
@@ -0,0 +1,20 @@
+name: Shellcheck
+
+on:
+ push:
+ paths:
+ - "**.sh"
+ pull_request:
+ paths:
+ - "**.sh"
+
+jobs:
+ shellcheck:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Run shellcheck
+ uses: ludeeus/action-shellcheck@master
+ env:
+ SHELLCHECK_OPTS: "-s bash -S error"
diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml
new file mode 100644
index 0000000..34a3fa8
--- /dev/null
+++ b/src-tauri/Cargo.toml
@@ -0,0 +1,45 @@
+[package]
+name = "localcowork"
+version = "0.1.0"
+description = "LocalCowork — on-device AI agent desktop app"
+authors = ["LocalCowork Contributors"]
+license = "MIT"
+edition = "2021"
+rust-version = "1.77"
+
+[build-dependencies]
+tauri-build = { version = "2", features = [] }
+
+[dependencies]
+tauri = { version = "2", features = ["devtools"] }
+tauri-plugin-shell = "2"
+serde = { version = "1", features = ["derive"] }
+serde_json = "1"
+serde_yaml = "0.9"
+tokio = { version = "1", features = ["full"] }
+anyhow = "1"
+thiserror = "2"
+tracing = "0.1"
+tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] }
+uuid = { version = "1", features = ["v4"] }
+chrono = { version = "0.4", features = ["serde"] }
+reqwest = { version = "0.12", features = ["json", "stream"] }
+rusqlite = { version = "0.32", features = ["bundled"] }
+futures = "0.3"
+dirs = "6"
+sysinfo = "0.33"
+sha2 = "0.10"
+tauri-plugin-dialog = "2"
+
+[dev-dependencies]
+tempfile = "3"
+
+[features]
+default = ["custom-protocol"]
+custom-protocol = ["tauri/custom-protocol"]
+
+[profile.release]
+strip = true
+lto = true
+codegen-units = 1
+panic = "abort"
diff --git a/src-tauri/build.rs b/src-tauri/build.rs
new file mode 100644
index 0000000..261851f
--- /dev/null
+++ b/src-tauri/build.rs
@@ -0,0 +1,3 @@
+fn main() {
+ tauri_build::build();
+}
diff --git a/src-tauri/capabilities/default.json b/src-tauri/capabilities/default.json
new file mode 100644
index 0000000..9102361
--- /dev/null
+++ b/src-tauri/capabilities/default.json
@@ -0,0 +1,15 @@
+{
+ "identifier": "default",
+ "description": "Default capabilities for LocalCowork",
+ "windows": ["main"],
+ "permissions": [
+ "core:default",
+ "shell:allow-open",
+ "shell:allow-execute",
+ "shell:allow-spawn",
+ "shell:allow-stdin-write",
+ "dialog:default",
+ "dialog:allow-open",
+ "dialog:allow-save"
+ ]
+}
diff --git a/src-tauri/entitlements.plist b/src-tauri/entitlements.plist
new file mode 100644
index 0000000..8ce248b
--- /dev/null
+++ b/src-tauri/entitlements.plist
@@ -0,0 +1,19 @@
+
+
+
+
+
+ com.apple.security.cs.allow-unsigned-executable-memory
+
+
+ com.apple.security.cs.disable-library-validation
+
+
+ com.apple.security.network.client
+
+
+ com.apple.security.files.user-selected.read-write
+
+
+
diff --git a/src-tauri/icons/128x128.png b/src-tauri/icons/128x128.png
new file mode 100644
index 0000000..6a66724
Binary files /dev/null and b/src-tauri/icons/128x128.png differ
diff --git a/src-tauri/icons/128x128@2x.png b/src-tauri/icons/128x128@2x.png
new file mode 100644
index 0000000..323b36e
Binary files /dev/null and b/src-tauri/icons/128x128@2x.png differ
diff --git a/src-tauri/icons/32x32.png b/src-tauri/icons/32x32.png
new file mode 100644
index 0000000..5568400
Binary files /dev/null and b/src-tauri/icons/32x32.png differ
diff --git a/src-tauri/icons/icon.icns b/src-tauri/icons/icon.icns
new file mode 100644
index 0000000..caa531a
Binary files /dev/null and b/src-tauri/icons/icon.icns differ
diff --git a/src-tauri/icons/icon.ico b/src-tauri/icons/icon.ico
new file mode 100644
index 0000000..4107408
Binary files /dev/null and b/src-tauri/icons/icon.ico differ
diff --git a/src-tauri/mcp-servers.json b/src-tauri/mcp-servers.json
new file mode 100644
index 0000000..6df9c17
--- /dev/null
+++ b/src-tauri/mcp-servers.json
@@ -0,0 +1,3 @@
+{
+ "servers": {}
+}
diff --git a/src-tauri/src/agent_core/conversation.rs b/src-tauri/src/agent_core/conversation.rs
new file mode 100644
index 0000000..e21f2a0
--- /dev/null
+++ b/src-tauri/src/agent_core/conversation.rs
@@ -0,0 +1,841 @@
+//! ConversationManager — persistent conversation history with context window management.
+//!
+//! Responsibilities:
+//! - Store and retrieve conversation messages (SQLite)
+//! - Track token usage per message
+//! - Enforce context window budget (32k default) via eviction
+//! - Maintain session summaries for evicted turns
+//! - Build `Vec` for the inference client
+
+use crate::inference::types::{
+ ChatMessage, FunctionCallResponse, Role, ToolCallResponse,
+};
+
+use super::database::AgentDatabase;
+use super::errors::AgentError;
+use super::tokens;
+use super::types::{
+ ConversationMessage, ContextBudget, NewMessage, NewUndoEntry, SessionSummary, UndoEntry,
+};
+
+// ─── Constants ──────────────────────────────────────────────────────────────
+
+/// Default total context window size (tokens).
+const DEFAULT_CONTEXT_WINDOW: u32 = 32_768;
+
+/// Tokens reserved for the system prompt (rules + few-shot examples).
+const SYSTEM_PROMPT_BUDGET: u32 = 900;
+
+/// Default tokens reserved for tool definitions.
+///
+/// Used when the actual tool definition tokens haven't been measured yet.
+/// This is a conservative fallback — the real value should be computed from
+/// the serialized tool definitions and set via `set_tool_definitions_budget()`.
+const DEFAULT_TOOL_DEFINITIONS_BUDGET: u32 = 2_000;
+
+/// Tokens reserved for the model's output response.
+///
+/// Every production agent reserves space for the model to generate its
+/// response. Without this, the context window could be 100% filled with
+/// input, leaving no room for output.
+///
+/// Note: The PRD's "Active file/document content" budget (~9,500 tokens)
+/// was a static reservation for a ProactiveContextor feature that hasn't
+/// been built yet. When that feature is implemented, it will dynamically
+/// claim tokens from the conversation budget — not from a phantom static
+/// reservation that wastes 29% of the context window.
+const OUTPUT_RESERVATION: u32 = 2_000;
+
+/// Safety buffer — never fill these tokens.
+const SAFETY_BUFFER: u32 = 768;
+
+/// When remaining tokens drop below this, trigger eviction.
+///
+/// Set to 5,000 so eviction fires well before the agent loop's
+/// `MIN_ROUND_TOKEN_BUDGET` (1,500) gate kills the loop. With the
+/// old value of 1,000, eviction never triggered because the budget
+/// gate always fired first, making eviction effectively dead.
+const EVICTION_THRESHOLD: u32 = 5_000;
+
+/// Number of most recent turns to keep in full detail during eviction.
+const FULL_DETAIL_TURNS: usize = 10;
+
+/// Maximum tokens allowed for the session summary.
+///
+/// Without a cap, each eviction cycle appends to the summary, which can
+/// grow to 2,000+ tokens after 3 cycles — eating into the space eviction
+/// was supposed to free. The cap keeps the most recent portion.
+const MAX_SUMMARY_TOKENS: u32 = 500;
+
+// ─── ConversationManager ────────────────────────────────────────────────────
+
+/// Manages conversation history, token budgets, and context window eviction.
+pub struct ConversationManager {
+ /// SQLite database handle.
+ db: AgentDatabase,
+ /// Total context window size (configurable per model).
+ context_window: u32,
+ /// Actual tokens consumed by tool definitions (measured, not estimated).
+ ///
+ /// Set by `set_tool_definitions_budget()` after tool definitions are built.
+ /// Falls back to `DEFAULT_TOOL_DEFINITIONS_BUDGET` if not set.
+ tool_definitions_budget: u32,
+ /// Actual tokens consumed by the system prompt (measured, not estimated).
+ ///
+ /// Set by `set_system_prompt_budget()` after the dynamic system prompt is built.
+ /// Falls back to `SYSTEM_PROMPT_BUDGET` if not set.
+ system_prompt_budget: u32,
+}
+
+impl ConversationManager {
+ /// Create a new ConversationManager backed by the given database.
+ pub fn new(db: AgentDatabase) -> Self {
+ Self {
+ db,
+ context_window: DEFAULT_CONTEXT_WINDOW,
+ tool_definitions_budget: DEFAULT_TOOL_DEFINITIONS_BUDGET,
+ system_prompt_budget: SYSTEM_PROMPT_BUDGET,
+ }
+ }
+
+ /// Override the context window size (e.g., from model config).
+ pub fn set_context_window(&mut self, size: u32) {
+ self.context_window = size;
+ }
+
+ /// Set the actual tool definitions token budget based on measured serialization.
+ ///
+ /// This should be called after tool definitions are built (in `send_message`)
+ /// so the budget calculation uses the real cost instead of the default estimate.
+ pub fn set_tool_definitions_budget(&mut self, tokens: u32) {
+ self.tool_definitions_budget = tokens;
+ }
+
+ /// Set the actual system prompt token budget based on the dynamic prompt.
+ ///
+ /// Called in `start_session` after building the prompt from the MCP registry.
+ /// Ensures the context budget display reflects the real prompt size.
+ pub fn set_system_prompt_budget(&mut self, tokens: u32) {
+ self.system_prompt_budget = tokens;
+ }
+
+ /// Access the underlying database (for ToolRouter/audit operations).
+ pub fn db(&self) -> &AgentDatabase {
+ &self.db
+ }
+
+ // ─── Session Management ─────────────────────────────────────────────
+
+ /// Start a new conversation session.
+ ///
+ /// Creates the session record and inserts the system prompt as the first
+ /// message. Returns the session ID.
+ pub fn new_session(
+ &self,
+ session_id: &str,
+ system_prompt: &str,
+ ) -> Result<(), AgentError> {
+ self.db.create_session(session_id)?;
+
+ let token_count = tokens::estimate_system_prompt_tokens(system_prompt);
+ let msg = NewMessage {
+ role: Role::System,
+ content: Some(system_prompt.to_string()),
+ tool_calls: None,
+ tool_call_id: None,
+ tool_result: None,
+ };
+ self.db.insert_message(session_id, &msg, token_count)?;
+ Ok(())
+ }
+
+ // ─── Message Operations ─────────────────────────────────────────────
+
+ /// Add a user message to the conversation.
+ pub fn add_user_message(
+ &self,
+ session_id: &str,
+ content: &str,
+ ) -> Result {
+ let token_count = tokens::estimate_tokens(content) + 4; // overhead
+ let msg = NewMessage {
+ role: Role::User,
+ content: Some(content.to_string()),
+ tool_calls: None,
+ tool_call_id: None,
+ tool_result: None,
+ };
+ self.db.insert_message(session_id, &msg, token_count)
+ }
+
+ /// Add an assistant text message to the conversation.
+ pub fn add_assistant_message(
+ &self,
+ session_id: &str,
+ content: &str,
+ ) -> Result {
+ let token_count = tokens::estimate_tokens(content) + 4;
+ let msg = NewMessage {
+ role: Role::Assistant,
+ content: Some(content.to_string()),
+ tool_calls: None,
+ tool_call_id: None,
+ tool_result: None,
+ };
+ self.db.insert_message(session_id, &msg, token_count)
+ }
+
+ /// Add an assistant message that contains tool calls.
+ pub fn add_tool_call_message(
+ &self,
+ session_id: &str,
+ tool_calls: &[crate::inference::types::ToolCall],
+ ) -> Result {
+ // Estimate tokens for tool calls
+ let mut token_count: u32 = 4; // overhead
+ for tc in tool_calls {
+ token_count += 10; // per-call overhead
+ token_count += tokens::estimate_tokens(&tc.name);
+ token_count += tokens::estimate_tokens(
+ &serde_json::to_string(&tc.arguments).unwrap_or_default(),
+ );
+ }
+
+ let msg = NewMessage {
+ role: Role::Assistant,
+ content: None,
+ tool_calls: Some(tool_calls.to_vec()),
+ tool_call_id: None,
+ tool_result: None,
+ };
+ self.db.insert_message(session_id, &msg, token_count)
+ }
+
+ /// Add a tool result message to the conversation.
+ pub fn add_tool_result_message(
+ &self,
+ session_id: &str,
+ tool_call_id: &str,
+ result: &serde_json::Value,
+ ) -> Result {
+ // Use the plain string if the value is a String, otherwise JSON-encode it.
+ // This avoids double-serialization (wrapping "text" as "\"text\"") which
+ // confuses local LLMs into thinking the tool result is empty/malformed.
+ let result_str = match result.as_str() {
+ Some(s) => s.to_string(),
+ None => serde_json::to_string(result).unwrap_or_default(),
+ };
+ let token_count = tokens::estimate_tokens(&result_str) + 4;
+
+ let msg = NewMessage {
+ role: Role::Tool,
+ content: Some(result_str),
+ tool_calls: None,
+ tool_call_id: Some(tool_call_id.to_string()),
+ tool_result: Some(result.clone()),
+ };
+ self.db.insert_message(session_id, &msg, token_count)
+ }
+
+ /// Get the full conversation history for a session.
+ pub fn get_history(
+ &self,
+ session_id: &str,
+ ) -> Result, AgentError> {
+ self.db.get_messages(session_id)
+ }
+
+ /// Get the N most recent messages.
+ pub fn get_recent(
+ &self,
+ session_id: &str,
+ n: usize,
+ ) -> Result, AgentError> {
+ self.db.get_recent_messages(session_id, n)
+ }
+
+ // ─── Context Window Management ──────────────────────────────────────
+
+ /// Get the current context budget snapshot.
+ pub fn get_budget(&self, session_id: &str) -> Result {
+ let conversation_tokens = self.db.total_message_tokens(session_id)?;
+ let total = self.context_window;
+ let overhead = self.system_prompt_budget + self.tool_definitions_budget
+ + OUTPUT_RESERVATION + SAFETY_BUFFER;
+ let remaining = total.saturating_sub(overhead).saturating_sub(conversation_tokens);
+
+ Ok(ContextBudget {
+ total,
+ system_prompt: self.system_prompt_budget,
+ tool_definitions: self.tool_definitions_budget,
+ conversation_history: conversation_tokens,
+ output_reservation: OUTPUT_RESERVATION,
+ remaining,
+ })
+ }
+
+ /// Check if eviction is needed and perform it.
+ ///
+ /// Evicts the oldest non-system messages until remaining tokens are
+ /// above the threshold. Evicted messages are summarized into the
+ /// session summary.
+ pub fn evict_if_needed(&self, session_id: &str) -> Result {
+ let budget = self.get_budget(session_id)?;
+
+ if budget.remaining >= EVICTION_THRESHOLD {
+ return Ok(0); // No eviction needed
+ }
+
+ let message_count = self.db.message_count(session_id)?;
+ if message_count <= FULL_DETAIL_TURNS + 1 {
+ // +1 for system prompt
+ return Ok(0); // Not enough messages to evict
+ }
+
+ // Evict messages beyond the full-detail window
+ let evict_count = message_count - FULL_DETAIL_TURNS - 1;
+ let evicted = self.db.delete_oldest_messages(session_id, evict_count)?;
+
+ // Build a summary from evicted messages
+ let mut summary_parts = Vec::new();
+ let mut files: Vec = Vec::new();
+
+ for msg in &evicted {
+ let line = tokens::summarize_turn(&msg.role, msg.content.as_deref());
+ summary_parts.push(line);
+
+ // Track file paths mentioned in tool calls
+ if let Some(ref tc) = msg.tool_calls {
+ for call in tc {
+ if let Some(path) = call.arguments.get("path").and_then(|v| v.as_str()) {
+ if !files.contains(&path.to_string()) {
+ files.push(path.to_string());
+ }
+ }
+ }
+ }
+ }
+
+ let summary_text = summary_parts.join("\n");
+ let evicted_tokens: u32 = evicted.iter().map(|m| m.token_count).sum();
+
+ // Update session summary (append to existing, then cap)
+ let existing = self.db.get_session_summary(session_id)?;
+ let full_summary = match existing {
+ Some(s) => format!("{}\n{}", s.summary_text, summary_text),
+ None => summary_text,
+ };
+
+ // Cap summary to prevent it from consuming the space eviction freed
+ let summary_tokens = tokens::estimate_tokens(&full_summary);
+ let capped_summary = if summary_tokens > MAX_SUMMARY_TOKENS {
+ let target_chars = (MAX_SUMMARY_TOKENS as f64 * 3.2) as usize;
+ let start = full_summary.len().saturating_sub(target_chars);
+ format!("[earlier context omitted]\n{}", &full_summary[start..])
+ } else {
+ full_summary
+ };
+
+ self.db.update_session_summary(
+ session_id,
+ &capped_summary,
+ &files,
+ &[], // decisions are tracked separately
+ )?;
+
+ Ok(evicted_tokens)
+ }
+
+ /// Build the `Vec` to send to the inference client.
+ ///
+ /// Includes: session summary (if any) + system prompt + recent messages.
+ pub fn build_chat_messages(
+ &self,
+ session_id: &str,
+ ) -> Result, AgentError> {
+ let messages = self.db.get_messages(session_id)?;
+ let summary = self.db.get_session_summary(session_id)?;
+
+ let mut chat_messages = Vec::new();
+
+ for msg in &messages {
+ match msg.role {
+ Role::System => {
+ // Prepend session summary to system prompt
+ let mut content = msg.content.clone().unwrap_or_default();
+ if let Some(ref s) = summary {
+ content = format!(
+ "{content}\n\n## Previous conversation summary:\n{}",
+ s.summary_text
+ );
+ }
+ chat_messages.push(ChatMessage {
+ role: Role::System,
+ content: Some(content),
+ tool_call_id: None,
+ tool_calls: None,
+ });
+ }
+ Role::User => {
+ chat_messages.push(ChatMessage {
+ role: Role::User,
+ content: msg.content.clone(),
+ tool_call_id: None,
+ tool_calls: None,
+ });
+ }
+ Role::Assistant => {
+ let tool_calls = msg.tool_calls.as_ref().map(|calls| {
+ calls
+ .iter()
+ .map(|tc| ToolCallResponse {
+ id: tc.id.clone(),
+ r#type: "function".to_string(),
+ function: FunctionCallResponse {
+ name: tc.name.clone(),
+ arguments: serde_json::to_string(&tc.arguments)
+ .unwrap_or_default(),
+ },
+ })
+ .collect()
+ });
+ chat_messages.push(ChatMessage {
+ role: Role::Assistant,
+ content: msg.content.clone(),
+ tool_call_id: None,
+ tool_calls,
+ });
+ }
+ Role::Tool => {
+ chat_messages.push(ChatMessage {
+ role: Role::Tool,
+ content: msg.content.clone(),
+ tool_call_id: msg.tool_call_id.clone(),
+ tool_calls: None,
+ });
+ }
+ }
+ }
+
+ Ok(chat_messages)
+ }
+
+ /// Build a windowed `Vec` optimized for multi-step workflows.
+ ///
+ /// Implements a 3-tier message strategy to minimize token waste:
+ /// - **Tier 1 (recent)**: Last `recent_window` messages sent verbatim
+ /// - **Tier 2 (middle)**: Tool results compressed to one-line summaries;
+ /// user/assistant messages kept verbatim
+ /// - **Tier 3 (evicted)**: Already handled by session summary
+ ///
+ /// This prevents stale tool results from consuming context. A 6,000-char
+ /// OCR result from round 2 is compressed to ~50 chars in rounds 4+.
+ pub fn build_windowed_chat_messages(
+ &self,
+ session_id: &str,
+ recent_window: usize,
+ ) -> Result, AgentError> {
+ let messages = self.db.get_messages(session_id)?;
+ let summary = self.db.get_session_summary(session_id)?;
+
+ let total = messages.len();
+ // Window start index: everything before this is Tier 2 (compressed)
+ // +1 to account for system prompt at index 0
+ let window_start = if total > recent_window + 1 {
+ total - recent_window
+ } else {
+ 1 // include everything after system prompt
+ };
+
+ let mut chat_messages = Vec::new();
+
+ for (i, msg) in messages.iter().enumerate() {
+ match msg.role {
+ Role::System => {
+ // Prepend session summary to system prompt (same as build_chat_messages)
+ let mut content = msg.content.clone().unwrap_or_default();
+ if let Some(ref s) = summary {
+ content = format!(
+ "{content}\n\n## Previous conversation summary:\n{}",
+ s.summary_text
+ );
+ }
+ chat_messages.push(ChatMessage {
+ role: Role::System,
+ content: Some(content),
+ tool_call_id: None,
+ tool_calls: None,
+ });
+ }
+ Role::Tool if i < window_start => {
+ // Tier 2: compress old tool results to one-line summary
+ let compressed = tokens::summarize_tool_result(
+ msg.tool_call_id.as_deref().unwrap_or("tool"),
+ &msg.tool_result.clone().unwrap_or(serde_json::Value::Null),
+ );
+ chat_messages.push(ChatMessage {
+ role: Role::Tool,
+ content: Some(compressed),
+ tool_call_id: msg.tool_call_id.clone(),
+ tool_calls: None,
+ });
+ }
+ Role::User => {
+ chat_messages.push(ChatMessage {
+ role: Role::User,
+ content: msg.content.clone(),
+ tool_call_id: None,
+ tool_calls: None,
+ });
+ }
+ Role::Assistant => {
+ let tool_calls = msg.tool_calls.as_ref().map(|calls| {
+ calls
+ .iter()
+ .map(|tc| ToolCallResponse {
+ id: tc.id.clone(),
+ r#type: "function".to_string(),
+ function: FunctionCallResponse {
+ name: tc.name.clone(),
+ arguments: serde_json::to_string(&tc.arguments)
+ .unwrap_or_default(),
+ },
+ })
+ .collect()
+ });
+ chat_messages.push(ChatMessage {
+ role: Role::Assistant,
+ content: msg.content.clone(),
+ tool_call_id: None,
+ tool_calls,
+ });
+ }
+ Role::Tool => {
+ // Tier 1: recent tool results — send verbatim
+ chat_messages.push(ChatMessage {
+ role: Role::Tool,
+ content: msg.content.clone(),
+ tool_call_id: msg.tool_call_id.clone(),
+ tool_calls: None,
+ });
+ }
+ }
+ }
+
+ Ok(chat_messages)
+ }
+
+ // ─── Undo Stack (delegates to DB) ───────────────────────────────────
+
+ /// Push a new entry onto the undo stack.
+ pub fn push_undo(
+ &self,
+ session_id: &str,
+ entry: &NewUndoEntry,
+ ) -> Result {
+ self.db.push_undo_entry(session_id, entry)
+ }
+
+ /// Get the current undo stack for a session.
+ pub fn get_undo_stack(
+ &self,
+ session_id: &str,
+ ) -> Result, AgentError> {
+ self.db.get_undo_stack(session_id)
+ }
+
+ /// Mark an undo entry as undone.
+ pub fn mark_undone(&self, undo_id: i64) -> Result<(), AgentError> {
+ self.db.mark_undone(undo_id)
+ }
+
+ /// Get the session summary.
+ pub fn get_session_summary(
+ &self,
+ session_id: &str,
+ ) -> Result