mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-09 04:33:38 +00:00
feat(status): display cache hit rate in /status command
This commit is contained in:
parent
9a2f38d7a2
commit
f6575effa5
@ -255,14 +255,18 @@ def build_status_content(
|
||||
)
|
||||
last_in = last_usage.get("prompt_tokens", 0)
|
||||
last_out = last_usage.get("completion_tokens", 0)
|
||||
cached = last_usage.get("cached_tokens", 0)
|
||||
ctx_total = max(context_window_tokens, 0)
|
||||
ctx_pct = int((context_tokens_estimate / ctx_total) * 100) if ctx_total > 0 else 0
|
||||
ctx_used_str = f"{context_tokens_estimate // 1000}k" if context_tokens_estimate >= 1000 else str(context_tokens_estimate)
|
||||
ctx_total_str = f"{ctx_total // 1024}k" if ctx_total > 0 else "n/a"
|
||||
token_line = f"\U0001f4ca Tokens: {last_in} in / {last_out} out"
|
||||
if cached and last_in:
|
||||
token_line += f" ({cached * 100 // last_in}% cached)"
|
||||
return "\n".join([
|
||||
f"\U0001f408 nanobot v{version}",
|
||||
f"\U0001f9e0 Model: {model}",
|
||||
f"\U0001f4ca Tokens: {last_in} in / {last_out} out",
|
||||
token_line,
|
||||
f"\U0001f4da Context: {ctx_used_str}/{ctx_total_str} ({ctx_pct}%)",
|
||||
f"\U0001f4ac Session: {session_msg_count} messages",
|
||||
f"\u23f1 Uptime: {uptime}",
|
||||
|
||||
59
tests/test_build_status.py
Normal file
59
tests/test_build_status.py
Normal file
@ -0,0 +1,59 @@
|
||||
"""Tests for build_status_content cache hit rate display."""
|
||||
|
||||
from nanobot.utils.helpers import build_status_content
|
||||
|
||||
|
||||
def test_status_shows_cache_hit_rate():
|
||||
content = build_status_content(
|
||||
version="0.1.0",
|
||||
model="glm-4-plus",
|
||||
start_time=1000000.0,
|
||||
last_usage={"prompt_tokens": 2000, "completion_tokens": 300, "cached_tokens": 1200},
|
||||
context_window_tokens=128000,
|
||||
session_msg_count=10,
|
||||
context_tokens_estimate=5000,
|
||||
)
|
||||
assert "60% cached" in content
|
||||
assert "2000 in / 300 out" in content
|
||||
|
||||
|
||||
def test_status_no_cache_info():
|
||||
"""Without cached_tokens, display should not show cache percentage."""
|
||||
content = build_status_content(
|
||||
version="0.1.0",
|
||||
model="glm-4-plus",
|
||||
start_time=1000000.0,
|
||||
last_usage={"prompt_tokens": 2000, "completion_tokens": 300},
|
||||
context_window_tokens=128000,
|
||||
session_msg_count=10,
|
||||
context_tokens_estimate=5000,
|
||||
)
|
||||
assert "cached" not in content.lower()
|
||||
assert "2000 in / 300 out" in content
|
||||
|
||||
|
||||
def test_status_zero_cached_tokens():
|
||||
"""cached_tokens=0 should not show cache percentage."""
|
||||
content = build_status_content(
|
||||
version="0.1.0",
|
||||
model="glm-4-plus",
|
||||
start_time=1000000.0,
|
||||
last_usage={"prompt_tokens": 2000, "completion_tokens": 300, "cached_tokens": 0},
|
||||
context_window_tokens=128000,
|
||||
session_msg_count=10,
|
||||
context_tokens_estimate=5000,
|
||||
)
|
||||
assert "cached" not in content.lower()
|
||||
|
||||
|
||||
def test_status_100_percent_cached():
|
||||
content = build_status_content(
|
||||
version="0.1.0",
|
||||
model="glm-4-plus",
|
||||
start_time=1000000.0,
|
||||
last_usage={"prompt_tokens": 1000, "completion_tokens": 100, "cached_tokens": 1000},
|
||||
context_window_tokens=128000,
|
||||
session_msg_count=5,
|
||||
context_tokens_estimate=3000,
|
||||
)
|
||||
assert "100% cached" in content
|
||||
Loading…
x
Reference in New Issue
Block a user