mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-02 09:22:36 +00:00
fix(agent): use loguru instead of stdlib logging in runner
The project consistently uses loguru across all 32 modules. The
stdlib logging import was an oversight from the cached_tokens feature.
Also fixes log format from %d/%s placeholders to {} for loguru.
This commit is contained in:
parent
cd2c7eb26f
commit
07f216b13f
@ -3,17 +3,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from nanobot.agent.hook import AgentHook, AgentHookContext
|
||||
from nanobot.agent.tools.registry import ToolRegistry
|
||||
from nanobot.providers.base import LLMProvider, ToolCallRequest
|
||||
from nanobot.utils.helpers import build_assistant_message
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_MAX_ITERATIONS_MESSAGE = (
|
||||
"I reached the maximum number of tool call iterations ({max_iterations}) "
|
||||
"without completing the task. You can try breaking the task into smaller steps."
|
||||
@ -110,7 +109,7 @@ class AgentRunner:
|
||||
context.response = response
|
||||
context.usage = iter_usage
|
||||
logger.debug(
|
||||
"LLM usage: prompt=%d completion=%d cached=%s",
|
||||
"LLM usage: prompt={} completion={} cached={}",
|
||||
iter_usage["prompt_tokens"],
|
||||
iter_usage["completion_tokens"],
|
||||
iter_usage.get("cached_tokens", 0),
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user