-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtest_sequential_fix.py
More file actions
56 lines (46 loc) · 1.82 KB
/
test_sequential_fix.py
File metadata and controls
56 lines (46 loc) · 1.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python3
from praisonaiagents import Agent, MCP
import os
def test_agent_direct():
"""Test using gpt-4o-mini directly (agent.py path)"""
print("=" * 50)
print("Testing gpt-4o-mini (agent.py path)")
print("=" * 50)
agent = Agent(
instructions="You are a helpful assistant that can break down complex problems.",
llm="gpt-4o-mini",
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
)
result = agent.start("What are 3 steps to make coffee?")
print("✅ Agent direct path completed successfully")
return result
def test_llm_class():
"""Test using openai/gpt-4o-mini (llm.py path)"""
print("\n" + "=" * 50)
print("Testing openai/gpt-4o-mini (llm.py path)")
print("=" * 50)
agent = Agent(
instructions="You are a helpful assistant that can break down complex problems.",
llm="openai/gpt-4o-mini",
tools=MCP("npx -y @modelcontextprotocol/server-sequential-thinking")
)
result = agent.start("What are 3 steps to make coffee?")
print("✅ LLM class path completed successfully")
return result
if __name__ == "__main__":
try:
# Test both approaches
result1 = test_agent_direct()
result2 = test_llm_class()
print("\n" + "=" * 50)
print("SUMMARY")
print("=" * 50)
print("✅ Both formats work correctly!")
print("✅ gpt-4o-mini uses agent.py direct OpenAI calls")
print("✅ openai/gpt-4o-mini uses llm.py LiteLLM wrapper")
print("✅ Both support sequential tool calling and MCP integration")
print("✅ Sequential thinking tool works properly in both modes")
except Exception as e:
print(f"❌ Error occurred: {e}")
import traceback
traceback.print_exc()