Skip to content

Commit 8ba2ff4

Browse files
committed
example: multi-agent-debate lints
1 parent 46f1f05 commit 8ba2ff4

File tree

6 files changed

+101
-64
lines changed

6 files changed

+101
-64
lines changed

examples/multi-agent-debate/config.py

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from rich.prompt import Prompt
21
from typing import Optional
32
import langroid.language_models as lm
43
import langroid.utils.configuration
@@ -23,6 +22,7 @@
2322

2423
MISTRAL_MAX_OUTPUT_TOKENS = 16_000
2524

25+
2626
def get_global_settings(debug: bool = False, nocache: bool = True) -> Settings:
2727
"""
2828
Retrieve global Langroid settings.
@@ -40,7 +40,9 @@ def get_global_settings(debug: bool = False, nocache: bool = True) -> Settings:
4040
)
4141

4242

43-
def create_llm_config(chat_model_option: str, temperature: Optional[float] = None) -> OpenAIGPTConfig:
43+
def create_llm_config(
44+
chat_model_option: str, temperature: Optional[float] = None
45+
) -> OpenAIGPTConfig:
4446
"""
4547
Creates an LLM (Language Learning Model) configuration based on the selected model.
4648
@@ -57,32 +59,41 @@ def create_llm_config(chat_model_option: str, temperature: Optional[float] = Non
5759
Raises:
5860
ValueError: If the user provided`chat_model_option` does not exist in `MODEL_MAP`.
5961
"""
62+
6063
chat_model = MODEL_MAP.get(chat_model_option)
6164
# Load generation configuration from JSON
62-
generation_config: GenerationConfig = load_generation_config("examples/multi-agent-debate/generation_config.json")
65+
generation_config: GenerationConfig = load_generation_config(
66+
"examples/multi-agent-debate/generation_config.json"
67+
)
6368

6469
if not chat_model:
6570
raise ValueError(f"Invalid model selection: {chat_model_option}")
6671

6772
# Determine max_output_tokens based on the selected model
68-
max_output_tokens = (
69-
MISTRAL_MAX_OUTPUT_TOKENS if chat_model_option == "7" else generation_config.max_output_tokens
73+
max_output_tokens_config = (
74+
MISTRAL_MAX_OUTPUT_TOKENS
75+
if chat_model_option == "7"
76+
else generation_config.max_output_tokens
7077
)
7178

7279
# Use passed temperature if provided; otherwise, use the one from the JSON config
73-
effective_temperature = temperature if temperature is not None else generation_config.temperature
80+
effective_temperature = (
81+
temperature if temperature is not None else generation_config.temperature
82+
)
7483

7584
# Create and return the LLM configuration
7685
return OpenAIGPTConfig(
7786
chat_model=chat_model,
7887
min_output_tokens=generation_config.min_output_tokens,
79-
max_output_tokens=generation_config.max_output_tokens,
88+
max_output_tokens=max_output_tokens_config,
8089
temperature=effective_temperature,
81-
seed=generation_config.seed
90+
seed=generation_config.seed,
8291
)
8392

8493

85-
def get_base_llm_config(chat_model_option: str, temperature: Optional[float] = None) -> OpenAIGPTConfig:
94+
def get_base_llm_config(
95+
chat_model_option: str, temperature: Optional[float] = None
96+
) -> OpenAIGPTConfig:
8697
"""
8798
Prompt the user to select a base LLM configuration and return it.
8899
@@ -96,4 +107,4 @@ def get_base_llm_config(chat_model_option: str, temperature: Optional[float] = N
96107
# Pass temperature only if it is provided
97108
if temperature is not None:
98109
return create_llm_config(chat_model_option, temperature)
99-
return create_llm_config(chat_model_option)
110+
return create_llm_config(chat_model_option)

examples/multi-agent-debate/generation_config_models.py

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,25 @@
1-
from typing import List, Optional
2-
from pydantic import BaseModel, Field
1+
from typing import Optional
2+
from langroid.pydantic_v1 import BaseModel, Field
33
import json
44

55

66
class GenerationConfig(BaseModel):
77
"""Represents configuration for text generation."""
8-
max_output_tokens: int = Field(default=1024, ge=1, description="Maximum output tokens.")
9-
min_output_tokens: int = Field(default=1, ge=0, description="Minimum output tokens.")
10-
temperature: float = Field(default=0.7, ge=0.0, le=1.0, description="Sampling temperature.")
11-
seed: Optional[int] = Field(default=42, description="Seed for reproducibility. If set, ensures deterministic "
12-
"outputs for the same input.")
8+
9+
max_output_tokens: int = Field(
10+
default=1024, ge=1, description="Maximum output tokens."
11+
)
12+
min_output_tokens: int = Field(
13+
default=1, ge=0, description="Minimum output tokens."
14+
)
15+
temperature: float = Field(
16+
default=0.7, ge=0.0, le=1.0, description="Sampling temperature."
17+
)
18+
seed: Optional[int] = Field(
19+
default=42,
20+
description="Seed for reproducibility. If set, ensures deterministic "
21+
"outputs for the same input.",
22+
)
1323

1424

1525
def load_generation_config(file_path: str) -> GenerationConfig:

examples/multi-agent-debate/main.py

Lines changed: 45 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
FEEDBACK_AGENT_SYSTEM_MESSAGE,
2121
generate_metaphor_search_agent_system_message,
2222
)
23+
2324
# Import from utils.py
2425
from utils import (
2526
select_model,
@@ -32,9 +33,7 @@
3233

3334

3435
class MetaphorSearchChatAgent(ChatAgent):
35-
def handle_message_fallback(
36-
self, msg: str | ChatDocument
37-
) -> str | None:
36+
def handle_message_fallback(self, msg: str | ChatDocument) -> str | None:
3837
"""Handle scenario where LLM did not generate any Tool"""
3938
if isinstance(msg, ChatDocument) and msg.metadata.sender == Entity.LLM:
4039
return f"""
@@ -82,20 +81,18 @@ def parse_and_format_message_history(message_history: List[Any]) -> str:
8281

8382

8483
def create_chat_agent(
85-
name: str,
86-
llm_config: OpenAIGPTConfig,
87-
system_message: str
84+
name: str, llm_config: OpenAIGPTConfig, system_message: str
8885
) -> ChatAgent:
8986
"""creates a ChatAgent with the given parameters.
9087
91-
Args:
92-
name (str): The name of the agent.
93-
llm_config (OpenAIGPTConfig): The LLM configuration for the agent.
94-
system_message (str): The system message to guide the agent's LLM.
88+
Args:
89+
name (str): The name of the agent.
90+
llm_config (OpenAIGPTConfig): The LLM configuration for the agent.
91+
system_message (str): The system message to guide the agent's LLM.
9592
96-
Returns:
97-
ChatAgent: A configured ChatAgent instance.
98-
"""
93+
Returns:
94+
ChatAgent: A configured ChatAgent instance.
95+
"""
9996
return ChatAgent(
10097
ChatAgentConfig(
10198
llm=llm_config,
@@ -131,9 +128,9 @@ def run_debate() -> None:
131128

132129
# Get base LLM configuration
133130
if same_llm:
134-
135131
shared_agent_config: OpenAIGPTConfig = get_base_llm_config(
136-
select_model("main LLM"))
132+
select_model("main LLM")
133+
)
137134
pro_agent_config = con_agent_config = shared_agent_config
138135

139136
# Create feedback_agent_config by modifying shared_agent_config
@@ -146,29 +143,40 @@ def run_debate() -> None:
146143
)
147144
metaphor_search_agent_config = feedback_agent_config
148145
else:
149-
pro_agent_config: OpenAIGPTConfig = get_base_llm_config(select_model("for Pro Agent"))
150-
con_agent_config: OpenAIGPTConfig = get_base_llm_config(select_model("for Con Agent"))
151-
feedback_agent_config: OpenAIGPTConfig = get_base_llm_config(select_model("feedback"),temperature=0.2)
146+
pro_agent_config: OpenAIGPTConfig = get_base_llm_config(
147+
select_model("for Pro Agent")
148+
)
149+
con_agent_config: OpenAIGPTConfig = get_base_llm_config(
150+
select_model("for Con Agent")
151+
)
152+
feedback_agent_config: OpenAIGPTConfig = get_base_llm_config(
153+
select_model("feedback"), temperature=0.2
154+
)
152155
metaphor_search_agent_config = feedback_agent_config
153156

154-
system_messages: SystemMessages = load_system_messages("examples/multi-agent-debate/system_messages.json")
157+
system_messages: SystemMessages = load_system_messages(
158+
"examples/multi-agent-debate/system_messages.json"
159+
)
155160
topic_name, pro_key, con_key, side = select_topic_and_setup_side(system_messages)
156161

157162
# Generate the system message
158-
metaphor_search_agent_system_message = (generate_metaphor_search_agent_system_message(
159-
system_messages, pro_key, con_key))
160-
161-
pro_agent = create_chat_agent("Pro",
162-
pro_agent_config,
163-
system_messages.messages[pro_key].message +
164-
DEFAULT_SYSTEM_MESSAGE_ADDITION)
165-
con_agent = create_chat_agent("Con",
166-
con_agent_config,
167-
system_messages.messages[con_key].message +
168-
DEFAULT_SYSTEM_MESSAGE_ADDITION)
169-
feedback_agent = create_chat_agent("Feedback",
170-
feedback_agent_config,
171-
FEEDBACK_AGENT_SYSTEM_MESSAGE)
163+
metaphor_search_agent_system_message = (
164+
generate_metaphor_search_agent_system_message(system_messages, pro_key, con_key)
165+
)
166+
167+
pro_agent = create_chat_agent(
168+
"Pro",
169+
pro_agent_config,
170+
system_messages.messages[pro_key].message + DEFAULT_SYSTEM_MESSAGE_ADDITION,
171+
)
172+
con_agent = create_chat_agent(
173+
"Con",
174+
con_agent_config,
175+
system_messages.messages[con_key].message + DEFAULT_SYSTEM_MESSAGE_ADDITION,
176+
)
177+
feedback_agent = create_chat_agent(
178+
"Feedback", feedback_agent_config, FEEDBACK_AGENT_SYSTEM_MESSAGE
179+
)
172180
metaphor_search_agent = MetaphorSearchChatAgent( # Use the subclass here
173181
ChatAgentConfig(
174182
llm=metaphor_search_agent_config,
@@ -180,7 +188,10 @@ def run_debate() -> None:
180188
logger.info("Pro, Con, feedback, and metaphor_search agents created.")
181189

182190
# Determine user's side and assign user_agent and ai_agent based on user selection
183-
agents = {"pro": (pro_agent, con_agent, "Pro", "Con"), "con": (con_agent, pro_agent, "Con", "Pro")}
191+
agents = {
192+
"pro": (pro_agent, con_agent, "Pro", "Con"),
193+
"con": (con_agent, pro_agent, "Con", "Pro"),
194+
}
184195
user_agent, ai_agent, user_side, ai_side = agents[side]
185196
logger.info(
186197
f"Starting debate on topic: {topic_name}, taking the {user_side} side. "

examples/multi-agent-debate/models.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from typing import Dict
2-
from pydantic import BaseModel
2+
from langroid.pydantic_v1 import BaseModel
33
import json
44
import logging
55
from langroid.utils.logging import setup_logger
@@ -16,6 +16,7 @@ class Message(BaseModel):
1616
topic (str): The topic of the message.
1717
message (str): The content of the message.
1818
"""
19+
1920
topic: str
2021
message: str
2122

@@ -27,6 +28,7 @@ class SystemMessages(BaseModel):
2728
messages (Dict[str, Message]): A dictionary where the key is the message
2829
identifier (e.g., 'pro_ai') and the value is a `Message` object.
2930
"""
31+
3032
messages: Dict[str, Message]
3133

3234

examples/multi-agent-debate/system_messages.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22
from langroid.agent.tools.orchestration import DoneTool
33

44

5-
DEFAULT_SYSTEM_MESSAGE_ADDITION = f"""
5+
DEFAULT_SYSTEM_MESSAGE_ADDITION = """
66
DO NOT REPEAT ARGUMENTS THAT HAVE BEEN PREVIOUSLY GENERATED
77
AND CAN BE SEEN IN THE DEBATE HISTORY PROVIDED.
88
"""
9-
FEEDBACK_AGENT_SYSTEM_MESSAGE = f"""
9+
FEEDBACK_AGENT_SYSTEM_MESSAGE = """
1010
You are an expert and experienced judge specializing in Lincoln-Douglas style debates.
1111
Your goal is to evaluate the debate thoroughly based on the following criteria:
1212
1. Clash of Values: Assess how well each side upholds their stated value (e.g., justice, morality)
@@ -67,5 +67,5 @@ def generate_metaphor_search_agent_system_message(system_messages, pro_key, con_
6767
metaphor_tool_name=MetaphorSearchTool.name(),
6868
pro_message=system_messages.messages[pro_key].message,
6969
con_message=system_messages.messages[con_key].message,
70-
done_tool_name=DoneTool.name()
70+
done_tool_name=DoneTool.name(),
7171
)

examples/multi-agent-debate/utils.py

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,7 @@ def extract_topics(system_messages: SystemMessages) -> List[Tuple[str, str, str]
3131
for key, message in system_messages.messages.items():
3232
# Process only "pro_" keys to avoid duplicates
3333
if key.startswith("pro_"):
34-
con_key = key.replace("pro_",
35-
"con_",
36-
1) # Match "con_" dynamically
34+
con_key = key.replace("pro_", "con_", 1) # Match "con_" dynamically
3735
if con_key in system_messages.messages: # Ensure "con_" exists
3836
topics.append((message.topic, key, con_key))
3937
return topics
@@ -134,7 +132,9 @@ def select_side(topic_name: str) -> Literal["pro", "con"]:
134132
return "pro" if side == "1" else "con"
135133

136134

137-
def select_topic_and_setup_side(system_messages: SystemMessages) -> Tuple[str, str, str, str]:
135+
def select_topic_and_setup_side(
136+
system_messages: SystemMessages,
137+
) -> Tuple[str, str, str, str]:
138138
"""Prompt the user to select a debate topic and sets up the respective side.
139139
140140
This function handles the user interaction for selecting a debate topic and the side
@@ -199,11 +199,11 @@ def is_metaphor_search_key_set() -> bool:
199199
def is_same_llm_for_all_agents() -> bool:
200200
"""Prompt the user to decide if same LLM should be used for all agents.
201201
202-
Asks the user whether the same LLM should be configured for all agents.
202+
Asks the user whether the same LLM should be configured for all agents.
203203
204-
Returns:
205-
bool: True if the user chooses same LLM for all agents, otherwise return False.
206-
"""
204+
Returns:
205+
bool: True if the user chooses same LLM for all agents, otherwise return False.
206+
"""
207207
# Ask the user if they want to use the same LLM configuration for all agents
208208
return Confirm.ask(
209209
"Do you want to use the same LLM for all agents?",
@@ -214,7 +214,10 @@ def is_same_llm_for_all_agents() -> bool:
214214
def select_max_debate_turns() -> int:
215215
# Prompt for number of debate turns
216216
while True:
217-
max_turns = Prompt.ask(f"How many turns should the debate continue for?", default=str(DEFAULT_TURN_COUNT))
217+
max_turns = Prompt.ask(
218+
"How many turns should the debate continue for?",
219+
default=str(DEFAULT_TURN_COUNT),
220+
)
218221
try:
219222
return int(max_turns)
220223
except ValueError:

0 commit comments

Comments
 (0)