mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-15 07:29:52 +00:00
fix(runner): preserve injection flag after max-iteration drain
Keep late follow-up injections observable when they are drained during max-iteration shutdown so loop-level response suppression still makes the right decision. Made-with: Cursor
This commit is contained in:
parent
a1e1eed2f1
commit
a38bc637bd
@ -515,10 +515,12 @@ class AgentRunner:
|
||||
# independent inbound messages by _dispatch's finally block.
|
||||
# We ignore should_continue here because the for-loop has already
|
||||
# exhausted all iterations.
|
||||
_, injection_cycles = await self._try_drain_injections(
|
||||
drained_after_max_iterations, injection_cycles = await self._try_drain_injections(
|
||||
spec, messages, None, injection_cycles,
|
||||
phase="after max_iterations",
|
||||
)
|
||||
if drained_after_max_iterations:
|
||||
had_injections = True
|
||||
|
||||
return AgentRunResult(
|
||||
final_content=final_content,
|
||||
|
||||
@ -2610,6 +2610,7 @@ async def test_drain_injections_on_max_iterations():
|
||||
))
|
||||
|
||||
assert result.stop_reason == "max_iterations"
|
||||
assert result.had_injections is True
|
||||
# The injection was consumed from the queue (preventing re-publish)
|
||||
assert injection_queue.empty()
|
||||
# The injection message is appended to conversation history
|
||||
@ -2620,6 +2621,69 @@ async def test_drain_injections_on_max_iterations():
|
||||
assert len(injected) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_drain_injections_set_flag_when_followup_arrives_after_last_iteration():
|
||||
"""Late follow-ups drained in max_iterations should still flip had_injections."""
|
||||
from nanobot.agent.hook import AgentHook
|
||||
from nanobot.agent.runner import AgentRunSpec, AgentRunner
|
||||
from nanobot.bus.events import InboundMessage
|
||||
|
||||
provider = MagicMock()
|
||||
call_count = {"n": 0}
|
||||
|
||||
async def chat_with_retry(*, messages, **kwargs):
|
||||
call_count["n"] += 1
|
||||
return LLMResponse(
|
||||
content="",
|
||||
tool_calls=[ToolCallRequest(id=f"c{call_count['n']}", name="read_file", arguments={"path": "x"})],
|
||||
usage={},
|
||||
)
|
||||
|
||||
provider.chat_with_retry = chat_with_retry
|
||||
tools = MagicMock()
|
||||
tools.get_definitions.return_value = []
|
||||
tools.execute = AsyncMock(return_value="file content")
|
||||
|
||||
injection_queue = asyncio.Queue()
|
||||
inject_cb = _make_injection_callback(injection_queue)
|
||||
|
||||
class InjectOnLastAfterIterationHook(AgentHook):
|
||||
def __init__(self) -> None:
|
||||
self.after_iteration_calls = 0
|
||||
|
||||
async def after_iteration(self, context) -> None:
|
||||
self.after_iteration_calls += 1
|
||||
if self.after_iteration_calls == 2:
|
||||
await injection_queue.put(
|
||||
InboundMessage(
|
||||
channel="cli",
|
||||
sender_id="u",
|
||||
chat_id="c",
|
||||
content="late follow-up after max iters",
|
||||
)
|
||||
)
|
||||
|
||||
runner = AgentRunner(provider)
|
||||
result = await runner.run(AgentRunSpec(
|
||||
initial_messages=[{"role": "user", "content": "hello"}],
|
||||
tools=tools,
|
||||
model="test-model",
|
||||
max_iterations=2,
|
||||
max_tool_result_chars=_MAX_TOOL_RESULT_CHARS,
|
||||
injection_callback=inject_cb,
|
||||
hook=InjectOnLastAfterIterationHook(),
|
||||
))
|
||||
|
||||
assert result.stop_reason == "max_iterations"
|
||||
assert result.had_injections is True
|
||||
assert injection_queue.empty()
|
||||
injected = [
|
||||
m for m in result.messages
|
||||
if m.get("role") == "user" and m.get("content") == "late follow-up after max iters"
|
||||
]
|
||||
assert len(injected) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_injection_cycle_cap_on_error_path():
|
||||
"""Injection cycles should be capped even when every iteration hits an LLM error."""
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user