mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-05-03 16:25:53 +00:00
fix(agent): use loguru instead of stdlib logging in runner
The project consistently uses loguru across all 32 modules. The
stdlib logging import was an oversight from the cached_tokens feature.
Also fixes log format from %d/%s placeholders to {} for loguru.
This commit is contained in:
parent
cd2c7eb26f
commit
07f216b13f
@ -3,17 +3,16 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
from nanobot.agent.hook import AgentHook, AgentHookContext
|
from nanobot.agent.hook import AgentHook, AgentHookContext
|
||||||
from nanobot.agent.tools.registry import ToolRegistry
|
from nanobot.agent.tools.registry import ToolRegistry
|
||||||
from nanobot.providers.base import LLMProvider, ToolCallRequest
|
from nanobot.providers.base import LLMProvider, ToolCallRequest
|
||||||
from nanobot.utils.helpers import build_assistant_message
|
from nanobot.utils.helpers import build_assistant_message
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_DEFAULT_MAX_ITERATIONS_MESSAGE = (
|
_DEFAULT_MAX_ITERATIONS_MESSAGE = (
|
||||||
"I reached the maximum number of tool call iterations ({max_iterations}) "
|
"I reached the maximum number of tool call iterations ({max_iterations}) "
|
||||||
"without completing the task. You can try breaking the task into smaller steps."
|
"without completing the task. You can try breaking the task into smaller steps."
|
||||||
@ -110,7 +109,7 @@ class AgentRunner:
|
|||||||
context.response = response
|
context.response = response
|
||||||
context.usage = iter_usage
|
context.usage = iter_usage
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"LLM usage: prompt=%d completion=%d cached=%s",
|
"LLM usage: prompt={} completion={} cached={}",
|
||||||
iter_usage["prompt_tokens"],
|
iter_usage["prompt_tokens"],
|
||||||
iter_usage["completion_tokens"],
|
iter_usage["completion_tokens"],
|
||||||
iter_usage.get("cached_tokens", 0),
|
iter_usage.get("cached_tokens", 0),
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user