From a2edee145fa392e8535e4814a13fc3f67c60c3db Mon Sep 17 00:00:00 2001 From: "xzq.xu" Date: Wed, 18 Mar 2026 17:33:22 +0800 Subject: [PATCH] fix(loop): add return_exceptions=True to parallel tool gather Without this flag, a BaseException (e.g. CancelledError from /stop) in one tool would propagate immediately and discard results from the other concurrent tools, corrupting the OpenAI message format. With return_exceptions=True, all tool results are collected; any exception is converted to an error string for the LLM. Made-with: Cursor --- nanobot/agent/loop.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py index 368a240ef..7c1af3add 100644 --- a/nanobot/agent/loop.py +++ b/nanobot/agent/loop.py @@ -261,12 +261,16 @@ class AgentLoop: # Execute all tool calls concurrently — the LLM batches # independent calls in a single response on purpose. + # return_exceptions=True ensures all results are collected + # even if one tool is cancelled or raises BaseException. results = await asyncio.gather(*( self.tools.execute(tc.name, tc.arguments) for tc in response.tool_calls - )) + ), return_exceptions=True) for tool_call, result in zip(response.tool_calls, results): + if isinstance(result, BaseException): + result = f"Error: {type(result).__name__}: {result}" messages = self.context.add_tool_result( messages, tool_call.id, tool_call.name, result )