mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-02 17:32:39 +00:00
278 lines
9.9 KiB
Python
278 lines
9.9 KiB
Python
"""Tests for OpenAICompatProvider spec-driven behavior.
|
|
|
|
Validates that:
|
|
- OpenRouter (no strip) keeps model names intact.
|
|
- AiHubMix (strip_model_prefix=True) strips provider prefixes.
|
|
- Standard providers pass model names through as-is.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import asyncio
|
|
from types import SimpleNamespace
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
import pytest
|
|
|
|
from nanobot.providers.openai_compat_provider import OpenAICompatProvider
|
|
from nanobot.providers.registry import find_by_name
|
|
|
|
|
|
def _fake_chat_response(content: str = "ok") -> SimpleNamespace:
|
|
"""Build a minimal OpenAI chat completion response."""
|
|
message = SimpleNamespace(
|
|
content=content,
|
|
tool_calls=None,
|
|
reasoning_content=None,
|
|
)
|
|
choice = SimpleNamespace(message=message, finish_reason="stop")
|
|
usage = SimpleNamespace(prompt_tokens=10, completion_tokens=5, total_tokens=15)
|
|
return SimpleNamespace(choices=[choice], usage=usage)
|
|
|
|
|
|
def _fake_tool_call_response() -> SimpleNamespace:
|
|
"""Build a minimal chat response that includes Gemini-style extra_content."""
|
|
function = SimpleNamespace(
|
|
name="exec",
|
|
arguments='{"cmd":"ls"}',
|
|
provider_specific_fields={"inner": "value"},
|
|
)
|
|
tool_call = SimpleNamespace(
|
|
id="call_123",
|
|
index=0,
|
|
type="function",
|
|
function=function,
|
|
extra_content={"google": {"thought_signature": "signed-token"}},
|
|
)
|
|
message = SimpleNamespace(
|
|
content=None,
|
|
tool_calls=[tool_call],
|
|
reasoning_content=None,
|
|
)
|
|
choice = SimpleNamespace(message=message, finish_reason="tool_calls")
|
|
usage = SimpleNamespace(prompt_tokens=10, completion_tokens=5, total_tokens=15)
|
|
return SimpleNamespace(choices=[choice], usage=usage)
|
|
|
|
|
|
class _StalledStream:
|
|
def __aiter__(self):
|
|
return self
|
|
|
|
async def __anext__(self):
|
|
await asyncio.sleep(3600)
|
|
raise StopAsyncIteration
|
|
|
|
|
|
def test_openrouter_spec_is_gateway() -> None:
|
|
spec = find_by_name("openrouter")
|
|
assert spec is not None
|
|
assert spec.is_gateway is True
|
|
assert spec.default_api_base == "https://openrouter.ai/api/v1"
|
|
|
|
|
|
def test_openrouter_sets_default_attribution_headers() -> None:
|
|
spec = find_by_name("openrouter")
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
OpenAICompatProvider(
|
|
api_key="sk-or-test-key",
|
|
api_base="https://openrouter.ai/api/v1",
|
|
default_model="anthropic/claude-sonnet-4-5",
|
|
spec=spec,
|
|
)
|
|
|
|
headers = MockClient.call_args.kwargs["default_headers"]
|
|
assert headers["HTTP-Referer"] == "https://github.com/HKUDS/nanobot"
|
|
assert headers["X-OpenRouter-Title"] == "nanobot"
|
|
assert headers["X-OpenRouter-Categories"] == "cli-agent,personal-agent"
|
|
assert "x-session-affinity" in headers
|
|
|
|
|
|
def test_openrouter_user_headers_override_default_attribution() -> None:
|
|
spec = find_by_name("openrouter")
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
OpenAICompatProvider(
|
|
api_key="sk-or-test-key",
|
|
api_base="https://openrouter.ai/api/v1",
|
|
default_model="anthropic/claude-sonnet-4-5",
|
|
extra_headers={
|
|
"HTTP-Referer": "https://nanobot.ai",
|
|
"X-OpenRouter-Title": "Nanobot Pro",
|
|
"X-Custom-App": "enabled",
|
|
},
|
|
spec=spec,
|
|
)
|
|
|
|
headers = MockClient.call_args.kwargs["default_headers"]
|
|
assert headers["HTTP-Referer"] == "https://nanobot.ai"
|
|
assert headers["X-OpenRouter-Title"] == "Nanobot Pro"
|
|
assert headers["X-OpenRouter-Categories"] == "cli-agent,personal-agent"
|
|
assert headers["X-Custom-App"] == "enabled"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_openrouter_keeps_model_name_intact() -> None:
|
|
"""OpenRouter gateway keeps the full model name (gateway does its own routing)."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("openrouter")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-or-test-key",
|
|
api_base="https://openrouter.ai/api/v1",
|
|
default_model="anthropic/claude-sonnet-4-5",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="anthropic/claude-sonnet-4-5",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "anthropic/claude-sonnet-4-5"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_aihubmix_strips_model_prefix() -> None:
|
|
"""AiHubMix strips the provider prefix (strip_model_prefix=True)."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("aihubmix")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-aihub-test-key",
|
|
api_base="https://aihubmix.com/v1",
|
|
default_model="claude-sonnet-4-5",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="anthropic/claude-sonnet-4-5",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-sonnet-4-5"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_standard_provider_passes_model_through() -> None:
|
|
"""Standard provider (e.g. deepseek) passes model name through as-is."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("deepseek")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-deepseek-test-key",
|
|
default_model="deepseek-chat",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="deepseek-chat",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "deepseek-chat"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_openai_compat_preserves_extra_content_on_tool_calls() -> None:
|
|
"""Gemini extra_content (thought signatures) must survive parse→serialize round-trip."""
|
|
mock_create = AsyncMock(return_value=_fake_tool_call_response())
|
|
spec = find_by_name("gemini")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="test-key",
|
|
api_base="https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
default_model="google/gemini-3.1-pro-preview",
|
|
spec=spec,
|
|
)
|
|
result = await provider.chat(
|
|
messages=[{"role": "user", "content": "run exec"}],
|
|
model="google/gemini-3.1-pro-preview",
|
|
)
|
|
|
|
assert len(result.tool_calls) == 1
|
|
tool_call = result.tool_calls[0]
|
|
assert tool_call.extra_content == {"google": {"thought_signature": "signed-token"}}
|
|
assert tool_call.function_provider_specific_fields == {"inner": "value"}
|
|
|
|
serialized = tool_call.to_openai_tool_call()
|
|
assert serialized["extra_content"] == {"google": {"thought_signature": "signed-token"}}
|
|
assert serialized["function"]["provider_specific_fields"] == {"inner": "value"}
|
|
|
|
|
|
def test_openai_model_passthrough() -> None:
|
|
"""OpenAI models pass through unchanged."""
|
|
spec = find_by_name("openai")
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-test-key",
|
|
default_model="gpt-4o",
|
|
spec=spec,
|
|
)
|
|
assert provider.get_default_model() == "gpt-4o"
|
|
|
|
|
|
def test_openai_compat_strips_message_level_reasoning_fields() -> None:
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
|
provider = OpenAICompatProvider()
|
|
|
|
sanitized = provider._sanitize_messages([
|
|
{
|
|
"role": "assistant",
|
|
"content": "done",
|
|
"reasoning_content": "hidden",
|
|
"extra_content": {"debug": True},
|
|
"tool_calls": [
|
|
{
|
|
"id": "call_1",
|
|
"type": "function",
|
|
"function": {"name": "fn", "arguments": "{}"},
|
|
"extra_content": {"google": {"thought_signature": "sig"}},
|
|
}
|
|
],
|
|
}
|
|
])
|
|
|
|
assert "reasoning_content" not in sanitized[0]
|
|
assert "extra_content" not in sanitized[0]
|
|
assert sanitized[0]["tool_calls"][0]["extra_content"] == {"google": {"thought_signature": "sig"}}
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_openai_compat_stream_watchdog_returns_error_on_stall(monkeypatch) -> None:
|
|
monkeypatch.setenv("NANOBOT_STREAM_IDLE_TIMEOUT_S", "0")
|
|
mock_create = AsyncMock(return_value=_StalledStream())
|
|
spec = find_by_name("openai")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-test-key",
|
|
default_model="gpt-4o",
|
|
spec=spec,
|
|
)
|
|
result = await provider.chat_stream(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="gpt-4o",
|
|
)
|
|
|
|
assert result.finish_reason == "error"
|
|
assert result.content is not None
|
|
assert "stream stalled" in result.content
|