feat(agent): add debug logging for LLM usage including cached tokens

This commit is contained in:
chengyongru 2026-03-30 16:56:31 +08:00
parent f6575effa5
commit 720cf222a3

View File

@ -3,6 +3,7 @@
from __future__ import annotations
import asyncio
import logging
from dataclasses import dataclass, field
from typing import Any
@ -11,6 +12,8 @@ from nanobot.agent.tools.registry import ToolRegistry
from nanobot.providers.base import LLMProvider, ToolCallRequest
from nanobot.utils.helpers import build_assistant_message
logger = logging.getLogger(__name__)
_DEFAULT_MAX_ITERATIONS_MESSAGE = (
"I reached the maximum number of tool call iterations ({max_iterations}) "
"without completing the task. You can try breaking the task into smaller steps."
@ -106,6 +109,12 @@ class AgentRunner:
usage["cached_tokens"] = usage.get("cached_tokens", 0) + iter_usage["cached_tokens"]
context.response = response
context.usage = iter_usage
logger.debug(
"LLM usage: prompt=%d completion=%d cached=%s",
iter_usage["prompt_tokens"],
iter_usage["completion_tokens"],
iter_usage.get("cached_tokens", 0),
)
context.tool_calls = list(response.tool_calls)
if response.has_tool_calls: