Skip to content

Commit bb6619f

Browse files
perf: improve tool calls in reasoning and multiple tool calls display (#7742)
* perf: improve tool calls in reasoning and multiple tool calls display - Updated LiveChatRoute and OpenApiRoute to replace manual message accumulation with BotMessageAccumulator. - Simplified message saving logic by using build_bot_history_content and collect_plain_text_from_message_parts. - Enhanced message processing to handle various message types (plain, image, record, file, video) more efficiently. - Improved reasoning handling by extracting thinking parts and displaying them correctly in the UI components. - Refactored message normalization and reasoning extraction logic in useMessages composable for better clarity and maintainability. - Updated ChatMessageList, MessageList, StandaloneChat, and ReasoningBlock components to accommodate new message structure and rendering logic. * feat(chat): reasoning activity panel - Introduced a new ReasoningSidebar component for displaying reasoning details. - Refactored MessageList and StandaloneChat components to utilize renderBlocks for improved message part handling. - Added ReasoningTimeline component to visualize reasoning steps. - Updated message handling logic to differentiate between thinking and content blocks. - Enhanced localization for reasoning-related terms in English, Russian, and Chinese. - Improved styling for various components to ensure consistency and readability. * Update astrbot/dashboard/routes/chat.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
1 parent 2f479b5 commit bb6619f

20 files changed

Lines changed: 1499 additions & 749 deletions

File tree

astrbot/core/agent/runners/tool_loop_agent_runner.py

Lines changed: 45 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -717,6 +717,15 @@ async def step(self):
717717
if self.stats.time_to_first_token == 0:
718718
self.stats.time_to_first_token = time.time() - self.stats.start_time
719719

720+
if llm_response.reasoning_content:
721+
yield AgentResponse(
722+
type="streaming_delta",
723+
data=AgentResponseData(
724+
chain=MessageChain(type="reasoning").message(
725+
llm_response.reasoning_content,
726+
),
727+
),
728+
)
720729
if llm_response.result_chain:
721730
yield AgentResponse(
722731
type="streaming_delta",
@@ -729,15 +738,6 @@ async def step(self):
729738
chain=MessageChain().message(llm_response.completion_text),
730739
),
731740
)
732-
if llm_response.reasoning_content:
733-
yield AgentResponse(
734-
type="streaming_delta",
735-
data=AgentResponseData(
736-
chain=MessageChain(type="reasoning").message(
737-
llm_response.reasoning_content,
738-
),
739-
),
740-
)
741741
if self._is_stop_requested():
742742
llm_resp_result = LLMResponse(
743743
role="assistant",
@@ -791,6 +791,15 @@ async def step(self):
791791
await self._complete_with_assistant_response(llm_resp)
792792

793793
# 返回 LLM 结果
794+
if llm_resp.reasoning_content:
795+
yield AgentResponse(
796+
type="llm_result",
797+
data=AgentResponseData(
798+
chain=MessageChain(type="reasoning").message(
799+
llm_resp.reasoning_content,
800+
),
801+
),
802+
)
794803
if llm_resp.result_chain:
795804
yield AgentResponse(
796805
type="llm_result",
@@ -803,15 +812,6 @@ async def step(self):
803812
chain=MessageChain().message(llm_resp.completion_text),
804813
),
805814
)
806-
if llm_resp.reasoning_content:
807-
yield AgentResponse(
808-
type="llm_result",
809-
data=AgentResponseData(
810-
chain=MessageChain(type="reasoning").message(
811-
llm_resp.reasoning_content,
812-
),
813-
),
814-
)
815815

816816
# 如果有工具调用,还需处理工具调用
817817
if llm_resp.tools_call_name:
@@ -821,6 +821,15 @@ async def step(self):
821821
logger.warning(
822822
"skills_like tool re-query returned no tool calls; fallback to assistant response."
823823
)
824+
if llm_resp.reasoning_content:
825+
yield AgentResponse(
826+
type="llm_result",
827+
data=AgentResponseData(
828+
chain=MessageChain(type="reasoning").message(
829+
llm_resp.reasoning_content,
830+
),
831+
),
832+
)
824833
if llm_resp.result_chain:
825834
yield AgentResponse(
826835
type="llm_result",
@@ -833,15 +842,7 @@ async def step(self):
833842
chain=MessageChain().message(llm_resp.completion_text),
834843
),
835844
)
836-
if llm_resp.reasoning_content:
837-
yield AgentResponse(
838-
type="llm_result",
839-
data=AgentResponseData(
840-
chain=MessageChain(type="reasoning").message(
841-
llm_resp.reasoning_content,
842-
),
843-
),
844-
)
845+
845846
await self._complete_with_assistant_response(llm_resp)
846847
return
847848

@@ -988,6 +989,7 @@ def _append_tool_call_result(tool_call_id: str, content: str) -> None:
988989
llm_response.tools_call_args,
989990
llm_response.tools_call_ids,
990991
):
992+
tool_result_blocks_start = len(tool_call_result_blocks)
991993
tool_call_streak = self._track_tool_call_streak(func_tool_name)
992994
yield _HandleFunctionToolsResult.from_message_chain(
993995
MessageChain(
@@ -1201,24 +1203,23 @@ def _append_tool_call_result(tool_call_id: str, content: str) -> None:
12011203
),
12021204
)
12031205

1204-
# yield the last tool call result
1205-
if tool_call_result_blocks:
1206-
last_tcr_content = str(tool_call_result_blocks[-1].content)
1207-
yield _HandleFunctionToolsResult.from_message_chain(
1208-
MessageChain(
1209-
type="tool_call_result",
1210-
chain=[
1211-
Json(
1212-
data={
1213-
"id": func_tool_id,
1214-
"ts": time.time(),
1215-
"result": last_tcr_content,
1216-
}
1217-
)
1218-
],
1206+
if len(tool_call_result_blocks) > tool_result_blocks_start:
1207+
tool_result_content = str(tool_call_result_blocks[-1].content)
1208+
yield _HandleFunctionToolsResult.from_message_chain(
1209+
MessageChain(
1210+
type="tool_call_result",
1211+
chain=[
1212+
Json(
1213+
data={
1214+
"id": func_tool_id,
1215+
"ts": time.time(),
1216+
"result": tool_result_content,
1217+
}
1218+
)
1219+
],
1220+
)
12191221
)
1220-
)
1221-
logger.info(f"Tool `{func_tool_name}` Result: {last_tcr_content}")
1222+
logger.info(f"Tool `{func_tool_name}` Result: {tool_result_content}")
12221223

12231224
# 处理函数调用响应
12241225
if tool_call_result_blocks:

astrbot/core/astr_agent_run_util.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,12 @@ async def run_agent(
235235
)
236236
await astr_event.send(chain)
237237
continue
238+
elif resp.type == "llm_result":
239+
chain = resp.data["chain"]
240+
if chain.type == "reasoning":
241+
# For non-streaming mode, we handle reasoning in astrbot/core/astr_agent_hooks.py.
242+
# For streaming mode, we yield content immediately when received a reasoning chunk but not in here, see below.
243+
continue
238244

239245
if stream_to_general and resp.type == "streaming_delta":
240246
continue

astrbot/core/provider/sources/openai_source.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -652,6 +652,8 @@ async def _query_stream(
652652
reasoning = self._extract_reasoning_content(chunk)
653653
_y = False
654654
llm_response.id = chunk.id
655+
llm_response.reasoning_content = ""
656+
llm_response.completion_text = ""
655657
if reasoning:
656658
llm_response.reasoning_content = reasoning
657659
_y = True

0 commit comments

Comments
 (0)