|
| 1 | +# src/langgraph_stdio_demo.py |
| 2 | + |
1 | 3 | import asyncio |
2 | 4 | import os |
3 | 5 | from dotenv import load_dotenv |
| 6 | + |
4 | 7 | from langchain_mcp_adapters.client import MultiServerMCPClient |
5 | 8 | from langgraph.prebuilt import create_react_agent |
6 | 9 | import openai |
7 | 10 |
|
8 | | -# Load .env file |
9 | 11 | load_dotenv() |
10 | 12 | openai.api_key = os.getenv("OPENAI_API_KEY") |
11 | 13 |
|
12 | 14 | async def main(): |
13 | 15 | client = MultiServerMCPClient({ |
14 | | - "math": { |
15 | | - "command": "python", |
16 | | - "args": ["src/demo_server.py"], |
17 | | - "transport": "stdio" |
| 16 | + "syncable_cli": { |
| 17 | + # Adjust this path if needed—just needs to point |
| 18 | + # at your compiled mcp-stdio binary. |
| 19 | + "command": "../rust-mcp-server-syncable-cli/target/release/mcp-stdio", |
| 20 | + "args": [], # no extra args |
| 21 | + "transport": "stdio", # stdio transport |
18 | 22 | } |
19 | 23 | }) |
20 | 24 |
|
21 | 25 | tools = await client.get_tools() |
22 | | - print(f"Fetched {len(tools)} tools from MCP server.") |
23 | | - for tool in tools: |
24 | | - print(f"- {tool.name}") |
| 26 | + print(f"Fetched {len(tools)} tools:") |
| 27 | + for t in tools: |
| 28 | + print(f" • {t.name}") |
25 | 29 |
|
26 | 30 | agent = create_react_agent("openai:gpt-4o", tools) |
27 | 31 |
|
28 | | - prompts = [ |
29 | | - ("about_info", "Call the 'about_info' tool."), |
30 | | - ("analysis_scan", "Call the 'analysis_scan' tool on path '../' with display 'matrix'."), |
31 | | - ("security_scan", "Call the 'security_scan' tool on path '../'."), |
32 | | - ("dependency_scan", "Call the 'dependency_scan' tool on path '../'.") |
| 32 | + tests = [ |
| 33 | + ("about_info", "Call the 'about_info' tool."), |
| 34 | + ("analysis_scan", "Call 'analysis_scan' on path '../' with display 'matrix'."), |
| 35 | + ("security_scan", "Call 'security_scan' on path '../'."), |
| 36 | + ("dependency_scan","Call 'dependency_scan' on path '../'."), |
33 | 37 | ] |
34 | 38 |
|
35 | | - for tool_name, prompt in prompts: |
36 | | - print(f"\nInvoking agent to call '{tool_name}'...") |
37 | | - response = await agent.ainvoke({ |
38 | | - "messages": [ |
39 | | - {"role": "user", "content": prompt} |
40 | | - ] |
| 39 | + for name, prompt in tests: |
| 40 | + print(f"\n--- {name} → {prompt}") |
| 41 | + resp = await agent.ainvoke({ |
| 42 | + "messages": [{"role": "user", "content": prompt}] |
41 | 43 | }) |
42 | | - print(f"Result for '{tool_name}':") |
43 | | - print(response) |
| 44 | + print(resp) |
44 | 45 |
|
45 | | -asyncio.run(main()) |
| 46 | +if __name__ == "__main__": |
| 47 | + asyncio.run(main()) |
0 commit comments