Skip to content

Commit 4d69c8b

Browse files
authored
Merge pull request #42 from syncable-dev/feature/langgraph-integration
feat: update langgraph stdio demo output format
2 parents a0249a6 + 97a0a8d commit 4d69c8b

2 files changed

Lines changed: 32 additions & 6 deletions

File tree

.DS_Store

6 KB
Binary file not shown.

mcp-python-server-client/src/langgraph_stdio_demo.py

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,14 @@
33
import asyncio
44
import os
55
from dotenv import load_dotenv
6+
from collections import namedtuple
67

78
from langchain_mcp_adapters.client import MultiServerMCPClient
89
from langgraph.prebuilt import create_react_agent
910
import openai
1011

12+
from .utils import render_utility_result
13+
1114
load_dotenv()
1215
openai.api_key = os.getenv("OPENAI_API_KEY")
1316

@@ -27,7 +30,7 @@ async def main():
2730
for t in tools:
2831
print(f" • {t.name}")
2932

30-
agent = create_react_agent("openai:gpt-4o", tools)
33+
agent = create_react_agent("openai:gpt-4.1", tools)
3134

3235
tests = [
3336
("about_info", "Call the 'about_info' tool."),
@@ -36,12 +39,35 @@ async def main():
3639
("dependency_scan","Call 'dependency_scan' on path '../'."),
3740
]
3841

42+
TextContent = namedtuple('TextContent', ['text'])
43+
ToolResult = namedtuple('ToolResult', ['content', 'isError'])
44+
3945
for name, prompt in tests:
4046
print(f"\n--- {name}{prompt}")
41-
resp = await agent.ainvoke({
42-
"messages": [{"role": "user", "content": prompt}]
43-
})
44-
print(resp)
47+
tool_outputs = []
48+
agent_final_response = None
49+
50+
# Stream through the agent's steps
51+
async for chunk in agent.astream({"messages": [{"role": "user", "content": prompt}]}
52+
):
53+
if "tools" in chunk:
54+
tool_outputs.extend(chunk["tools"]["messages"])
55+
if "agent" in chunk:
56+
# The agent's message is the latest one in the list
57+
message = chunk["agent"]["messages"][-1]
58+
# If it's a final response (no more tool calls), we save it.
59+
if not message.tool_calls:
60+
agent_final_response = message
61+
62+
# Render the collected outputs. Prioritize tool output.
63+
if tool_outputs:
64+
# To make the output identical, we remove the "Tool output:" header
65+
for msg in tool_outputs:
66+
mock_result = ToolResult(content=[TextContent(text=msg.content)], isError=False)
67+
render_utility_result(mock_result)
68+
elif agent_final_response:
69+
# Only if no tool was called, print the agent's response.
70+
print(agent_final_response.content)
4571

4672
if __name__ == "__main__":
47-
asyncio.run(main())
73+
asyncio.run(main())

0 commit comments

Comments
 (0)