fix(channel): use native tool calling to preserve conversation context

AnthropicProvider declared supports_native_tools() = true but did not
override chat_with_tools(). The default trait implementation drops all
conversation history (sends only system + last user message), breaking
multi-turn conversations on Telegram and other channels.

Changes:
- Override chat_with_tools() in AnthropicProvider: converts OpenAI-format
  tool JSON to ToolSpec and delegates to chat() which preserves full
  message history
- Skip build_tool_instructions() XML protocol when provider supports
  native tools (saves ~12k chars in system prompt)
- Remove duplicate Tool Use Protocol section from build_system_prompt()
  for native-tool providers
- Update Your Task section to encourage conversational follow-ups
  instead of XML tool_call tags when using native tools
- Add tracing::warn for malformed tool definitions in chat_with_tools
This commit is contained in:
xj 2026-02-18 05:42:14 -08:00 committed by Chummy
parent 8c826e581c
commit 2d6205ee58
3 changed files with 264 additions and 13 deletions

View file

@ -1458,17 +1458,21 @@ pub async fn run(
} else { } else {
None None
}; };
let mut system_prompt = crate::channels::build_system_prompt( let native_tools = provider.supports_native_tools();
let mut system_prompt = crate::channels::build_system_prompt_with_mode(
&config.workspace_dir, &config.workspace_dir,
model_name, model_name,
&tool_descs, &tool_descs,
&skills, &skills,
Some(&config.identity), Some(&config.identity),
bootstrap_max_chars, bootstrap_max_chars,
native_tools,
); );
// Append structured tool-use instructions with schemas // Append structured tool-use instructions with schemas (only for non-native providers)
if !native_tools {
system_prompt.push_str(&build_tool_instructions(&tools_registry)); system_prompt.push_str(&build_tool_instructions(&tools_registry));
}
// ── Approval manager (supervised mode) ─────────────────────── // ── Approval manager (supervised mode) ───────────────────────
let approval_manager = ApprovalManager::from_config(&config.autonomy); let approval_manager = ApprovalManager::from_config(&config.autonomy);
@ -1823,15 +1827,19 @@ pub async fn process_message(config: Config, message: &str) -> Result<String> {
} else { } else {
None None
}; };
let mut system_prompt = crate::channels::build_system_prompt( let native_tools = provider.supports_native_tools();
let mut system_prompt = crate::channels::build_system_prompt_with_mode(
&config.workspace_dir, &config.workspace_dir,
&model_name, &model_name,
&tool_descs, &tool_descs,
&skills, &skills,
Some(&config.identity), Some(&config.identity),
bootstrap_max_chars, bootstrap_max_chars,
native_tools,
); );
if !native_tools {
system_prompt.push_str(&build_tool_instructions(&tools_registry)); system_prompt.push_str(&build_tool_instructions(&tools_registry));
}
let mem_context = build_context(mem.as_ref(), message, config.memory.min_relevance_score).await; let mem_context = build_context(mem.as_ref(), message, config.memory.min_relevance_score).await;
let rag_limit = if config.agent.compact_context { 2 } else { 5 }; let rag_limit = if config.agent.compact_context { 2 } else { 5 };

View file

@ -1558,6 +1558,26 @@ pub fn build_system_prompt(
skills: &[crate::skills::Skill], skills: &[crate::skills::Skill],
identity_config: Option<&crate::config::IdentityConfig>, identity_config: Option<&crate::config::IdentityConfig>,
bootstrap_max_chars: Option<usize>, bootstrap_max_chars: Option<usize>,
) -> String {
build_system_prompt_with_mode(
workspace_dir,
model_name,
tools,
skills,
identity_config,
bootstrap_max_chars,
false,
)
}
pub fn build_system_prompt_with_mode(
workspace_dir: &std::path::Path,
model_name: &str,
tools: &[(&str, &str)],
skills: &[crate::skills::Skill],
identity_config: Option<&crate::config::IdentityConfig>,
bootstrap_max_chars: Option<usize>,
native_tools: bool,
) -> String { ) -> String {
use std::fmt::Write; use std::fmt::Write;
let mut prompt = String::with_capacity(8192); let mut prompt = String::with_capacity(8192);
@ -1594,12 +1614,21 @@ pub fn build_system_prompt(
} }
// ── 1c. Action instruction (avoid meta-summary) ─────────────── // ── 1c. Action instruction (avoid meta-summary) ───────────────
if native_tools {
prompt.push_str(
"## Your Task\n\n\
When the user sends a message, respond naturally. Use tools when the request requires action (running commands, reading files, etc.).\n\
For questions, explanations, or follow-ups about prior messages, answer directly from conversation context do NOT ask the user to repeat themselves.\n\
Do NOT: summarize this configuration, describe your capabilities, or output step-by-step meta-commentary.\n\n",
);
} else {
prompt.push_str( prompt.push_str(
"## Your Task\n\n\ "## Your Task\n\n\
When the user sends a message, ACT on it. Use the tools to fulfill their request.\n\ When the user sends a message, ACT on it. Use the tools to fulfill their request.\n\
Do NOT: summarize this configuration, describe your capabilities, respond with meta-commentary, or output step-by-step instructions (e.g. \"1. First... 2. Next...\").\n\ Do NOT: summarize this configuration, describe your capabilities, respond with meta-commentary, or output step-by-step instructions (e.g. \"1. First... 2. Next...\").\n\
Instead: emit actual <tool_call> tags when you need to act. Just do what they ask.\n\n", Instead: emit actual <tool_call> tags when you need to act. Just do what they ask.\n\n",
); );
}
// ── 2. Safety ─────────────────────────────────────────────── // ── 2. Safety ───────────────────────────────────────────────
prompt.push_str("## Safety\n\n"); prompt.push_str("## Safety\n\n");
@ -2318,15 +2347,19 @@ pub async fn start_channels(config: Config) -> Result<()> {
} else { } else {
None None
}; };
let mut system_prompt = build_system_prompt( let native_tools = provider.supports_native_tools();
let mut system_prompt = build_system_prompt_with_mode(
&workspace, &workspace,
&model, &model,
&tool_descs, &tool_descs,
&skills, &skills,
Some(&config.identity), Some(&config.identity),
bootstrap_max_chars, bootstrap_max_chars,
native_tools,
); );
if !native_tools {
system_prompt.push_str(&build_tool_instructions(tools_registry.as_ref())); system_prompt.push_str(&build_tool_instructions(tools_registry.as_ref()));
}
if !skills.is_empty() { if !skills.is_empty() {
println!( println!(

View file

@ -497,6 +497,53 @@ impl Provider for AnthropicProvider {
true true
} }
async fn chat_with_tools(
&self,
messages: &[ChatMessage],
tools: &[serde_json::Value],
model: &str,
temperature: f64,
) -> anyhow::Result<ProviderChatResponse> {
// Convert OpenAI-format tool JSON to ToolSpec so we can reuse the
// existing `chat()` method which handles full message history,
// system prompt extraction, caching, and Anthropic native formatting.
let tool_specs: Vec<ToolSpec> = tools
.iter()
.filter_map(|t| {
let func = t.get("function").or_else(|| {
tracing::warn!("Skipping malformed tool definition (missing 'function' key)");
None
})?;
let name = func.get("name").and_then(|n| n.as_str()).or_else(|| {
tracing::warn!("Skipping tool with missing or non-string 'name'");
None
})?;
Some(ToolSpec {
name: name.to_string(),
description: func
.get("description")
.and_then(|d| d.as_str())
.unwrap_or("")
.to_string(),
parameters: func
.get("parameters")
.cloned()
.unwrap_or(serde_json::json!({"type": "object"})),
})
})
.collect();
let request = ProviderChatRequest {
messages,
tools: if tool_specs.is_empty() {
None
} else {
Some(&tool_specs)
},
};
self.chat(request, model, temperature).await
}
async fn warmup(&self) -> anyhow::Result<()> { async fn warmup(&self) -> anyhow::Result<()> {
if let Some(credential) = self.credential.as_ref() { if let Some(credential) = self.credential.as_ref() {
let mut request = self let mut request = self
@ -1105,4 +1152,167 @@ mod tests {
let result = provider.warmup().await; let result = provider.warmup().await;
assert!(result.is_ok()); assert!(result.is_ok());
} }
#[test]
fn convert_messages_preserves_multi_turn_history() {
let messages = vec![
ChatMessage {
role: "system".to_string(),
content: "You are helpful.".to_string(),
},
ChatMessage {
role: "user".to_string(),
content: "gen a 2 sum in golang".to_string(),
},
ChatMessage {
role: "assistant".to_string(),
content: "```go\nfunc twoSum(nums []int) {}\n```".to_string(),
},
ChatMessage {
role: "user".to_string(),
content: "what's meaning of make here?".to_string(),
},
];
let (system, native_msgs) = AnthropicProvider::convert_messages(&messages);
// System prompt extracted
assert!(system.is_some());
// All 3 non-system messages preserved in order
assert_eq!(native_msgs.len(), 3);
assert_eq!(native_msgs[0].role, "user");
assert_eq!(native_msgs[1].role, "assistant");
assert_eq!(native_msgs[2].role, "user");
}
/// Integration test: spin up a mock Anthropic API server, call chat_with_tools
/// with a multi-turn conversation + tools, and verify the request body contains
/// ALL conversation turns and native tool definitions.
#[tokio::test]
async fn chat_with_tools_sends_full_history_and_native_tools() {
use axum::{routing::post, Json, Router};
use std::sync::{Arc, Mutex};
use tokio::net::TcpListener;
// Captured request body for assertion
let captured: Arc<Mutex<Option<serde_json::Value>>> = Arc::new(Mutex::new(None));
let captured_clone = captured.clone();
let app = Router::new().route(
"/v1/messages",
post(move |Json(body): Json<serde_json::Value>| {
let cap = captured_clone.clone();
async move {
*cap.lock().unwrap() = Some(body);
// Return a minimal valid Anthropic response
Json(serde_json::json!({
"id": "msg_test",
"type": "message",
"role": "assistant",
"content": [{"type": "text", "text": "The make function creates a map."}],
"model": "claude-opus-4-6",
"stop_reason": "end_turn",
"usage": {"input_tokens": 100, "output_tokens": 20}
}))
}
}),
);
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let server_handle = tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
});
// Create provider pointing at mock server
let provider = AnthropicProvider {
credential: Some("test-key".to_string()),
base_url: format!("http://{addr}"),
};
// Multi-turn conversation: system → user (Go code) → assistant (code response) → user (follow-up)
let messages = vec![
ChatMessage::system("You are a helpful assistant."),
ChatMessage::user("gen a 2 sum in golang"),
ChatMessage::assistant("```go\nfunc twoSum(nums []int, target int) []int {\n m := make(map[int]int)\n for i, n := range nums {\n if j, ok := m[target-n]; ok {\n return []int{j, i}\n }\n m[n] = i\n }\n return nil\n}\n```"),
ChatMessage::user("what's meaning of make here?"),
];
let tools = vec![serde_json::json!({
"type": "function",
"function": {
"name": "shell",
"description": "Run a shell command",
"parameters": {
"type": "object",
"properties": {
"command": {"type": "string"}
},
"required": ["command"]
}
}
})];
let result = provider
.chat_with_tools(&messages, &tools, "claude-opus-4-6", 0.7)
.await;
assert!(result.is_ok(), "chat_with_tools failed: {:?}", result.err());
let body = captured
.lock()
.unwrap()
.take()
.expect("No request captured");
// Verify system prompt extracted to top-level field
let system = &body["system"];
assert!(
system.to_string().contains("helpful assistant"),
"System prompt missing: {system}"
);
// Verify ALL conversation turns present in messages array
let msgs = body["messages"].as_array().expect("messages not an array");
assert_eq!(
msgs.len(),
3,
"Expected 3 messages (2 user + 1 assistant), got {}",
msgs.len()
);
// Turn 1: user with Go request
assert_eq!(msgs[0]["role"], "user");
let turn1_text = msgs[0]["content"].to_string();
assert!(
turn1_text.contains("2 sum"),
"Turn 1 missing Go request: {turn1_text}"
);
// Turn 2: assistant with Go code
assert_eq!(msgs[1]["role"], "assistant");
let turn2_text = msgs[1]["content"].to_string();
assert!(
turn2_text.contains("make(map[int]int)"),
"Turn 2 missing Go code: {turn2_text}"
);
// Turn 3: user follow-up
assert_eq!(msgs[2]["role"], "user");
let turn3_text = msgs[2]["content"].to_string();
assert!(
turn3_text.contains("meaning of make"),
"Turn 3 missing follow-up: {turn3_text}"
);
// Verify native tools are present
let api_tools = body["tools"].as_array().expect("tools not an array");
assert_eq!(api_tools.len(), 1);
assert_eq!(api_tools[0]["name"], "shell");
assert!(
api_tools[0]["input_schema"].is_object(),
"Missing input_schema"
);
server_handle.abort();
}
} }