Skip to content

Commit 5b00812

Browse files
committed
feat: support single agent toolcall
1 parent c51cc8c commit 5b00812

File tree

5 files changed

+521
-32
lines changed

5 files changed

+521
-32
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,4 @@ dist/
1818
node_modules/
1919
inner-docs/
2020
*.db
21+
frontend/

agentmesh/models/llm/base_model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,21 @@ class LLMRequest:
1212
"""
1313

1414
def __init__(self, messages: list,
15-
temperature=0.5, json_format=False, stream=False):
15+
temperature=0.5, json_format=False, stream=False, tools=None):
1616
"""
1717
Initialize the BaseRequest with the necessary fields.
1818
1919
:param messages: A list of messages to be sent to the model.
2020
:param temperature: The sampling temperature for the model.
2121
:param json_format: Whether to request JSON formatted response.
2222
:param stream: Whether to enable streaming for the response.
23+
:param tools: List of tools for function calling (OpenAI format).
2324
"""
2425
self.messages = messages
2526
self.temperature = temperature
2627
self.json_format = json_format
2728
self.stream = stream
29+
self.tools = tools
2830

2931

3032
class LLMResponse:

agentmesh/models/llm/claude_model.py

Lines changed: 127 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1-
from agentmesh.models.llm.base_model import LLMModel, LLMRequest, LLMResponse
2-
from agentmesh.common.enums import ModelApiBase
3-
import requests
41
import json
52

3+
import requests
4+
5+
from agentmesh.common.enums import ModelApiBase
6+
from agentmesh.models.llm.base_model import LLMModel, LLMRequest, LLMResponse
7+
68

79
class ClaudeModel(LLMModel):
810
def __init__(self, model: str, api_key: str, api_base: str = None):
@@ -44,6 +46,10 @@ def call(self, request: LLMRequest) -> LLMResponse:
4446
if system_prompt:
4547
data["system"] = system_prompt
4648

49+
# Add tools if present in request
50+
if hasattr(request, 'tools') and request.tools:
51+
data["tools"] = request.tools
52+
4753
try:
4854
response = requests.post(
4955
f"{self.api_base}/messages",
@@ -55,6 +61,32 @@ def call(self, request: LLMRequest) -> LLMResponse:
5561
if response.status_code == 200:
5662
claude_response = response.json()
5763

64+
# Extract content blocks
65+
content_blocks = claude_response.get("content", [])
66+
text_content = ""
67+
tool_calls = []
68+
69+
for block in content_blocks:
70+
if block.get("type") == "text":
71+
text_content = block.get("text", "")
72+
elif block.get("type") == "tool_use":
73+
tool_calls.append({
74+
"id": block.get("id", ""),
75+
"type": "function",
76+
"function": {
77+
"name": block.get("name", ""),
78+
"arguments": json.dumps(block.get("input", {}))
79+
}
80+
})
81+
82+
# Build message
83+
message = {
84+
"role": "assistant",
85+
"content": text_content
86+
}
87+
if tool_calls:
88+
message["tool_calls"] = tool_calls
89+
5890
# Format the response to match OpenAI's structure
5991
openai_format_response = {
6092
"id": claude_response.get("id", ""),
@@ -64,10 +96,7 @@ def call(self, request: LLMRequest) -> LLMResponse:
6496
"choices": [
6597
{
6698
"index": 0,
67-
"message": {
68-
"role": "assistant",
69-
"content": claude_response.get("content", [{}])[0].get("text", "")
70-
},
99+
"message": message,
71100
"finish_reason": claude_response.get("stop_reason", "stop")
72101
}
73102
],
@@ -154,10 +183,22 @@ def call_stream(self, request: LLMRequest):
154183
if system_prompt:
155184
data["system"] = system_prompt
156185

186+
# Add tools if present in request
187+
if hasattr(request, 'tools') and request.tools:
188+
data["tools"] = request.tools
189+
print(f"[DEBUG] Sending tools to Claude: {len(request.tools)} tools")
190+
for tool in request.tools:
191+
print(f" - {tool['name']}")
192+
157193
# Add response format if JSON is requested
158194
if request.json_format:
159195
data["response_format"] = {"type": "json_object"}
160196

197+
# Debug: print request
198+
print(f"[DEBUG] Claude API request to: {self.api_base}/messages")
199+
print(f"[DEBUG] Model: {self.model}")
200+
print(f"[DEBUG] Messages: {len(claude_messages)} messages")
201+
161202
try:
162203
response = requests.post(
163204
f"{self.api_base}/messages",
@@ -166,11 +207,16 @@ def call_stream(self, request: LLMRequest):
166207
stream=True
167208
)
168209

210+
print(f"[DEBUG] Response status: {response.status_code}")
211+
169212
# Check for error response
170213
if response.status_code != 200:
171214
# Try to extract error message
215+
error_text = response.text
216+
print(f"[DEBUG] Error response text: {error_text}")
217+
172218
try:
173-
error_data = json.loads(response.text)
219+
error_data = json.loads(error_text)
174220
if "error" in error_data:
175221
if isinstance(error_data["error"], dict) and "message" in error_data["error"]:
176222
error_msg = error_data["error"]["message"]
@@ -179,9 +225,11 @@ def call_stream(self, request: LLMRequest):
179225
elif "message" in error_data:
180226
error_msg = error_data["message"]
181227
else:
182-
error_msg = response.text
228+
error_msg = error_text
183229
except:
184-
error_msg = response.text or "Unknown error"
230+
error_msg = error_text or "Unknown error"
231+
232+
print(f"[DEBUG] Parsed error message: {error_msg}")
185233

186234
# Yield an error object that can be detected by the caller
187235
yield {
@@ -191,6 +239,10 @@ def call_stream(self, request: LLMRequest):
191239
}
192240
return
193241

242+
# Track tool use state
243+
current_tool_use_index = -1
244+
tool_uses_map = {} # {index: {id, name, input}}
245+
194246
for line in response.iter_lines():
195247
if line:
196248
line = line.decode('utf-8')
@@ -199,28 +251,72 @@ def call_stream(self, request: LLMRequest):
199251
if line == '[DONE]':
200252
break
201253
try:
202-
chunk = json.loads(line)
203-
# Extract content from the delta
204-
content = ""
205-
if "delta" in chunk and "text" in chunk["delta"]:
206-
content = chunk["delta"]["text"]
207-
208-
# Convert Claude streaming format to OpenAI format
209-
yield {
210-
"id": chunk.get("id", ""),
211-
"object": "chat.completion.chunk",
212-
"created": int(chunk.get("created_at", 0)),
213-
"model": self.model,
214-
"choices": [
215-
{
216-
"index": 0,
217-
"delta": {
218-
"content": content
219-
},
220-
"finish_reason": None
254+
event = json.loads(line)
255+
event_type = event.get("type")
256+
257+
# Handle different event types
258+
if event_type == "content_block_start":
259+
# New content block
260+
block = event.get("content_block", {})
261+
if block.get("type") == "tool_use":
262+
current_tool_use_index = event.get("index", 0)
263+
tool_uses_map[current_tool_use_index] = {
264+
"id": block.get("id", ""),
265+
"name": block.get("name", ""),
266+
"input": ""
221267
}
222-
]
223-
}
268+
269+
elif event_type == "content_block_delta":
270+
delta = event.get("delta", {})
271+
delta_type = delta.get("type")
272+
273+
if delta_type == "text_delta":
274+
# Text content
275+
content = delta.get("text", "")
276+
yield {
277+
"id": event.get("id", ""),
278+
"object": "chat.completion.chunk",
279+
"created": 0,
280+
"model": self.model,
281+
"choices": [{
282+
"index": 0,
283+
"delta": {"content": content},
284+
"finish_reason": None
285+
}]
286+
}
287+
288+
elif delta_type == "input_json_delta":
289+
# Tool input accumulation
290+
if current_tool_use_index >= 0:
291+
tool_uses_map[current_tool_use_index]["input"] += delta.get("partial_json", "")
292+
293+
elif event_type == "message_delta":
294+
# Message complete - yield tool calls if any
295+
if tool_uses_map:
296+
for idx in sorted(tool_uses_map.keys()):
297+
tool_data = tool_uses_map[idx]
298+
yield {
299+
"id": event.get("id", ""),
300+
"object": "chat.completion.chunk",
301+
"created": 0,
302+
"model": self.model,
303+
"choices": [{
304+
"index": 0,
305+
"delta": {
306+
"tool_calls": [{
307+
"index": idx,
308+
"id": tool_data["id"],
309+
"type": "function",
310+
"function": {
311+
"name": tool_data["name"],
312+
"arguments": tool_data["input"]
313+
}
314+
}]
315+
},
316+
"finish_reason": None
317+
}]
318+
}
319+
224320
except json.JSONDecodeError:
225321
continue
226322
except requests.RequestException as e:

agentmesh/protocol/agent.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from agentmesh.common.utils.log import logger
77
from agentmesh.common.utils.xml_util import XmlResParser
88
from agentmesh.models import LLMRequest, LLMModel
9+
from agentmesh.protocol.agent_stream import AgentStreamExecutor
910
from agentmesh.protocol.context import TeamContext, AgentOutput
1011
from agentmesh.protocol.result import AgentAction, AgentActionType, ToolResult, AgentResult
1112
from agentmesh.tools.base_tool import BaseTool
@@ -489,6 +490,48 @@ def capture_tool_use(self, tool_name, input_params, output, status, thought=None
489490

490491
return action
491492

493+
def run_stream(self, user_message: str, on_event=None) -> str:
494+
"""
495+
Execute single agent task with streaming (based on tool-call)
496+
497+
This is a new method for single agent mode, supporting:
498+
- Streaming output
499+
- Multi-turn reasoning based on tool-call
500+
- Event callbacks
501+
502+
Args:
503+
user_message: User message
504+
on_event: Event callback function callback(event: dict)
505+
event = {"type": str, "timestamp": float, "data": dict}
506+
507+
Returns:
508+
Final response text
509+
510+
Example:
511+
def print_event(event):
512+
if event["type"] == "message_update":
513+
print(event["data"]["delta"], end="", flush=True)
514+
515+
response = agent.run_stream("Hello", on_event=print_event)
516+
"""
517+
# Get model to use
518+
model_to_use = self.model if self.model else self.team_context.model if self.team_context else None
519+
if not model_to_use:
520+
raise ValueError("No model available for agent")
521+
522+
# Create stream executor
523+
executor = AgentStreamExecutor(
524+
agent=self,
525+
model=model_to_use,
526+
system_prompt=self.system_prompt,
527+
tools=self.tools,
528+
max_turns=self.max_steps if self.max_steps else 10,
529+
on_event=on_event
530+
)
531+
532+
# Execute
533+
return executor.run_stream(user_message)
534+
492535

493536
AGENT_REPLY_PROMPT = """You are part of the team, you only need to reply the part of user question related to your responsibilities
494537

0 commit comments

Comments
 (0)