Skip to content

feat: Replace ChatAgent single_iteration with max_iteration #2483

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 16 additions & 7 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,13 @@ class ChatAgent(BaseAgent):
(default: :obj:`None`)
scheduling_strategy (str): name of function that defines how to select
the next model in ModelManager. (default: :str:`round_robin`)
single_iteration (bool): Whether to let the agent perform only one
model calling at each step. (default: :obj:`False`)
max_iteration (Optional[int], optional): Maximum number of model
calling iterations allowed per step. If `None` (default), there's
no explicit limit, and the agent behaves like the old
`single_iteration=False` (continues calling tools/model until a
content response or other termination). If `1`, it performs a
single model call (like old `single_iteration=True`). If `N > 1`,
it allows up to N model calls. (default: :obj:`None`)
agent_id (str, optional): The ID of the agent. If not provided, a
random UUID will be generated. (default: :obj:`None`)
stop_event (Optional[threading.Event], optional): Event to signal
Expand Down Expand Up @@ -197,7 +202,7 @@ def __init__(
] = None,
response_terminators: Optional[List[ResponseTerminator]] = None,
scheduling_strategy: str = "round_robin",
single_iteration: bool = False,
max_iteration: Optional[int] = None,
agent_id: Optional[str] = None,
stop_event: Optional[threading.Event] = None,
) -> None:
Expand Down Expand Up @@ -272,7 +277,7 @@ def __init__(
# Set up other properties
self.terminated = False
self.response_terminators = response_terminators or []
self.single_iteration = single_iteration
self.max_iteration = max_iteration
self.stop_event = stop_event

def reset(self):
Expand Down Expand Up @@ -789,6 +794,7 @@ def step(

# Initialize token usage tracker
step_token_usage = self._create_token_usage_tracker()
iteration_count = 0

while True:
try:
Expand All @@ -805,6 +811,7 @@ def step(
response_format,
self._get_full_tool_schemas(),
)
iteration_count += 1

# Accumulate API token usage
self._update_token_usage_tracker(
Expand Down Expand Up @@ -839,7 +846,7 @@ def step(
if external_tool_call_requests:
break

if self.single_iteration:
if self.max_iteration is not None and iteration_count >= self.max_iteration:
break

# If we're still here, continue the loop
Expand Down Expand Up @@ -914,6 +921,7 @@ async def astep(

# Initialize token usage tracker
step_token_usage = self._create_token_usage_tracker()
iteration_count = 0
while True:
try:
openai_messages, num_tokens = self.memory.get_context()
Expand All @@ -929,6 +937,7 @@ async def astep(
response_format,
self._get_full_tool_schemas(),
)
iteration_count += 1

# Terminate Agent if stop_event is set
if self.stop_event and self.stop_event.is_set():
Expand Down Expand Up @@ -959,7 +968,7 @@ async def astep(
if external_tool_call_requests:
break

if self.single_iteration:
if self.max_iteration is not None and iteration_count >= self.max_iteration:
break

# If we're still here, continue the loop
Expand Down Expand Up @@ -1754,7 +1763,7 @@ def clone(self, with_memory: bool = False) -> ChatAgent:
],
response_terminators=self.response_terminators,
scheduling_strategy=self.model_backend.scheduling_strategy.__name__,
single_iteration=self.single_iteration,
max_iteration=self.max_iteration,
stop_event=self.stop_event,
)

Expand Down
6 changes: 3 additions & 3 deletions camel/benchmarks/browsecomp.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,7 @@ def process_benchmark_row(row: Dict[str, Any]) -> Dict[str, Any]:
formatter_in_process = task_json_formatter.clone()
else:
formatter_in_process = ChatAgent(
"You are a helpful assistant."
"You are a helpful assistant.", max_iteration=None
)
response_text = formatter_in_process.step(
FORMAT_JSON_TEMPLATE.format(content=task.result),
Expand Down Expand Up @@ -639,7 +639,7 @@ def process_benchmark_row(row: Dict[str, Any]) -> Dict[str, Any]:
summarizer_in_process = roleplaying_summarizer.clone()
else:
summarizer_in_process = ChatAgent(
"You are a helpful assistant."
"You are a helpful assistant.", max_iteration=None
)

summarize_prompt = SUMMARIZE_TEMPLATE.format(
Expand Down Expand Up @@ -738,7 +738,7 @@ def validate_each_one(raw_result: Dict[str, Any]) -> SingleEvalResult:
if grader:
grader_in_process = grader.clone()
else:
grader_in_process = ChatAgent("You are a helpful assistant.")
grader_in_process = ChatAgent("You are a helpful assistant.", max_iteration=None)

# Create a conversation list for the result
convo = [
Expand Down
2 changes: 1 addition & 1 deletion camel/datagen/evol_instruct/evol_instruct.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def __init__(
(default: :obj:`None`)
"""
self.templates = templates
self.agent = agent or ChatAgent()
self.agent = agent or ChatAgent(max_iteration=None)

def _resolve_evolution_method(self, method_key: str) -> str:
r"""Resolve evolution method key to concrete implementation.
Expand Down
4 changes: 2 additions & 2 deletions camel/datagen/evol_instruct/scorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(self, agent: Optional[ChatAgent] = None):
Respond with a JSON object like:
{ "solution": ..., "diversity": ..., "difficulty": ..., "solvability": ... }
"""
self.agent = agent or ChatAgent(self.system_msg)
self.agent = agent or ChatAgent(self.system_msg, max_iteration=None)

class MathScoreSchema(BaseModel):
diversity: int = Field(
Expand Down Expand Up @@ -117,7 +117,7 @@ def __init__(self, agent: Optional[ChatAgent] = None):
"Respond with a JSON object like: "
"{ \"diversity\": ..., \"complexity\": ..., \"validity\": ... }"
)
self.agent = agent or ChatAgent(self.system_msg)
self.agent = agent or ChatAgent(self.system_msg, max_iteration=None)

class GeneralScoreSchema(BaseModel):
diversity: int = Field(
Expand Down
2 changes: 2 additions & 0 deletions camel/datasets/self_instruct_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ def default_instruction_agent(self) -> ChatAgent:
return ChatAgent(
DEFAULT_INSTRUCTION_SYSTEM_PROMPT,
model=model,
max_iteration=None,
)

def default_rationale_agent(self) -> ChatAgent:
Expand All @@ -201,6 +202,7 @@ def default_rationale_agent(self) -> ChatAgent:
return ChatAgent(
DEFAULT_RATIONALE_SYSTEM_PROMPT.format(package_list=self.packages),
model=model,
max_iteration=None,
)

@staticmethod
Expand Down
4 changes: 2 additions & 2 deletions camel/personas/persona_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def text_to_persona(

# Set Agent to generate personal
t2p_agent = ChatAgent(
system_message="You are a helpful assistant", model=self.model
system_message="You are a helpful assistant", model=self.model, max_iteration=None
)
t2p_agent.reset()

Expand Down Expand Up @@ -170,7 +170,7 @@ def persona_to_persona(self, persona: Persona) -> Dict[uuid.UUID, Persona]:
)

p2p_agent = ChatAgent(
system_message="You're a helpful assistant.", model=self.model
system_message="You're a helpful assistant.", model=self.model, max_iteration=None
)
p2p_agent.reset()

Expand Down
6 changes: 3 additions & 3 deletions camel/societies/workforce/workforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,14 +163,14 @@ def __init__(
"a new worker for a task, etc.",
)
self.coordinator_agent = ChatAgent(
coord_agent_sys_msg, **(coordinator_agent_kwargs or {})
coord_agent_sys_msg, **(coordinator_agent_kwargs or {}), max_iteration=None
)

task_sys_msg = BaseMessage.make_assistant_message(
role_name="Task Planner",
content="You are going to compose and decompose tasks.",
)
self.task_agent = ChatAgent(task_sys_msg, **(task_agent_kwargs or {}))
self.task_agent = ChatAgent(task_sys_msg, **(task_agent_kwargs or {}), max_iteration=None)

# If there is one, will set by the workforce class wrapping this
self._task: Optional[Task] = None
Expand Down Expand Up @@ -434,7 +434,7 @@ def _create_new_agent(self, role: str, sys_msg: str) -> ChatAgent:
model_config_dict={"temperature": 0},
)

return ChatAgent(worker_sys_msg, model=model, tools=function_list) # type: ignore[arg-type]
return ChatAgent(worker_sys_msg, model=model, tools=function_list, max_iteration=None) # type: ignore[arg-type]

async def _get_returned_task(self) -> Task:
r"""Get the task that's published by this node and just get returned
Expand Down
2 changes: 2 additions & 0 deletions camel/toolkits/async_browser_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -1017,6 +1017,7 @@ def _initialize_agent(self) -> Tuple["ChatAgent", "ChatAgent"]:
system_message=system_prompt,
model=web_agent_model,
output_language=self.output_language,
max_iteration=None,
)

planning_system_prompt = PLANNING_AGENT_SYSTEM_PROMPT
Expand All @@ -1025,6 +1026,7 @@ def _initialize_agent(self) -> Tuple["ChatAgent", "ChatAgent"]:
system_message=planning_system_prompt,
model=planning_model,
output_language=self.output_language,
max_iteration=None,
)

return web_agent, planning_agent
Expand Down
4 changes: 2 additions & 2 deletions camel/toolkits/audio_analysis_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ def __init__(
from camel.agents import ChatAgent

if audio_reasoning_model:
self.audio_agent = ChatAgent(model=audio_reasoning_model)
self.audio_agent = ChatAgent(model=audio_reasoning_model, max_iteration=None)
else:
self.audio_agent = ChatAgent()
self.audio_agent = ChatAgent(max_iteration=None)
logger.warning(
"No audio reasoning model provided. Using default model in"
" ChatAgent."
Expand Down
2 changes: 2 additions & 0 deletions camel/toolkits/browser_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,6 +864,7 @@ def _initialize_agent(
system_message=system_prompt,
model=web_agent_model_instance,
output_language=self.output_language,
max_iteration=None,
)

planning_system_prompt = PLANNING_AGENT_SYSTEM_PROMPT
Expand All @@ -872,6 +873,7 @@ def _initialize_agent(
system_message=planning_system_prompt,
model=planning_model,
output_language=self.output_language,
max_iteration=None,
)

return web_agent, planning_agent
Expand Down
3 changes: 2 additions & 1 deletion camel/toolkits/function_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def add(a: int, b: int) -> int:
)
# Initialize assistant with system message and model
assistant_sys_msg = "You are a helpful assistant."
docstring_assistant = ChatAgent(assistant_sys_msg, model=model)
docstring_assistant = ChatAgent(assistant_sys_msg, model=model, max_iteration=None)

# Create user message to prompt the assistant
user_msg = docstring_prompt + code
Expand Down Expand Up @@ -747,6 +747,7 @@ def sum(a, b, c=0):
synthesis_agent = ChatAgent(
assistant_sys_msg,
model=self.synthesize_output_model,
max_iteration=None,
)

# User message combining function string and additional context
Expand Down
1 change: 1 addition & 0 deletions camel/toolkits/image_analysis_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ def _analyze_image(
agent = ChatAgent(
system_message=system_message,
model=self.model,
max_iteration=None,
)

user_msg = BaseMessage.make_user_message(
Expand Down
4 changes: 2 additions & 2 deletions camel/toolkits/video_analysis_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,14 +150,14 @@ def __init__(
from camel.agents import ChatAgent

self.vl_agent = ChatAgent(
model=self.vl_model, output_language=self.output_language
model=self.vl_model, output_language=self.output_language, max_iteration=None
)
else:
# If no model is provided, use default model in ChatAgent
# Import ChatAgent at runtime to avoid circular imports
from camel.agents import ChatAgent

self.vl_agent = ChatAgent(output_language=self.output_language)
self.vl_agent = ChatAgent(output_language=self.output_language, max_iteration=None)
logger.warning(
"No vision-language model provided. Using default model in "
"ChatAgent."
Expand Down
2 changes: 1 addition & 1 deletion examples/models/cohere_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

user_msg = """Who is the best"""

Expand Down
6 changes: 3 additions & 3 deletions examples/models/mistral_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

user_msg = """Say hi to CAMEL AI, one open-source community dedicated to the
study of autonomous and communicative agents."""
Expand All @@ -54,7 +54,7 @@
)

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

# URL of the image
url = "https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png"
Expand Down Expand Up @@ -87,7 +87,7 @@
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

user_msg = """Say hi to CAMEL AI, one open-source community dedicated to the
study of autonomous and communicative agents."""
Expand Down
4 changes: 2 additions & 2 deletions examples/models/nvidia_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

user_msg = """give me python code to develop a trading bot"""

Expand Down Expand Up @@ -154,7 +154,7 @@ def trade(exchange, symbol, amount, strategy):
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)
camel_agent = ChatAgent(system_message=sys_msg, model=model, max_iteration=None)

user_msg = """Say hi to CAMEL AI, one open-source community
dedicated to the study of autonomous and communicative agents."""
Expand Down
2 changes: 1 addition & 1 deletion examples/models/openai_gpt_4.1_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
)

# Set agent
camel_agent = ChatAgent(model=gpt_4_1_model)
camel_agent = ChatAgent(model=gpt_4_1_model, max_iteration=None)

# Set user message
user_msg = """Say hi to CAMEL AI, one open-source community
Expand Down
2 changes: 1 addition & 1 deletion examples/models/watsonx_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model, tools=tools)
camel_agent = ChatAgent(system_message=sys_msg, model=model, tools=tools, max_iteration=None)

user_msg = "Assume now is 2024 in the Gregorian calendar, University of Oxford was set up in 1096, estimate the current age of University of Oxford" # noqa: E501

Expand Down
5 changes: 4 additions & 1 deletion services/agent_mcp/agent_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
chat_agent = ChatAgent(
model=chat_model,
system_message="You are a helpful assistant.",
max_iteration=None,
# Uncomment to set a specific output language
# output_language="en", # or "zh", "es", "fr", etc.
)
Expand All @@ -66,6 +67,7 @@
reasoning_agent = ChatAgent(
model=reasoning_model,
system_message="You are a helpful assistant.",
max_iteration=None,
)

reasoning_agent_description = """
Expand All @@ -78,7 +80,8 @@
search_agent = ChatAgent(
model=search_model,
system_message="You are a helpful assistant.",
tools=toolkit.get_tools(), # type: ignore[arg-type]
tools=SearchToolkit().get_tools(), # Add search tool
max_iteration=None,
)

search_agent_description = """
Expand Down
Loading
Loading