mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-05 10:52:36 +00:00
- Remove litellm dependency entirely (supply chain risk mitigation) - Add AnthropicProvider (native SDK) and OpenAICompatProvider (unified) - Merge CustomProvider into OpenAICompatProvider, delete custom_provider.py - Add ProviderSpec.backend field for declarative provider routing - Remove _resolve_model, find_gateway, find_by_model (dead heuristics) - Pass resolved spec directly into provider — zero internal lookups - Stub out litellm-dependent model database (cli/models.py) - Add anthropic>=0.45.0 to dependencies, remove litellm - 593 tests passed, net -1034 lines
123 lines
4.2 KiB
Python
123 lines
4.2 KiB
Python
"""Tests for OpenAICompatProvider spec-driven behavior.
|
|
|
|
Validates that:
|
|
- OpenRouter (no strip) keeps model names intact.
|
|
- AiHubMix (strip_model_prefix=True) strips provider prefixes.
|
|
- Standard providers pass model names through as-is.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from types import SimpleNamespace
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
import pytest
|
|
|
|
from nanobot.providers.openai_compat_provider import OpenAICompatProvider
|
|
from nanobot.providers.registry import find_by_name
|
|
|
|
|
|
def _fake_chat_response(content: str = "ok") -> SimpleNamespace:
|
|
"""Build a minimal OpenAI chat completion response."""
|
|
message = SimpleNamespace(
|
|
content=content,
|
|
tool_calls=None,
|
|
reasoning_content=None,
|
|
)
|
|
choice = SimpleNamespace(message=message, finish_reason="stop")
|
|
usage = SimpleNamespace(prompt_tokens=10, completion_tokens=5, total_tokens=15)
|
|
return SimpleNamespace(choices=[choice], usage=usage)
|
|
|
|
|
|
def test_openrouter_spec_is_gateway() -> None:
|
|
spec = find_by_name("openrouter")
|
|
assert spec is not None
|
|
assert spec.is_gateway is True
|
|
assert spec.default_api_base == "https://openrouter.ai/api/v1"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_openrouter_keeps_model_name_intact() -> None:
|
|
"""OpenRouter gateway keeps the full model name (gateway does its own routing)."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("openrouter")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-or-test-key",
|
|
api_base="https://openrouter.ai/api/v1",
|
|
default_model="anthropic/claude-sonnet-4-5",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="anthropic/claude-sonnet-4-5",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "anthropic/claude-sonnet-4-5"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_aihubmix_strips_model_prefix() -> None:
|
|
"""AiHubMix strips the provider prefix (strip_model_prefix=True)."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("aihubmix")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-aihub-test-key",
|
|
api_base="https://aihubmix.com/v1",
|
|
default_model="claude-sonnet-4-5",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="anthropic/claude-sonnet-4-5",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "claude-sonnet-4-5"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_standard_provider_passes_model_through() -> None:
|
|
"""Standard provider (e.g. deepseek) passes model name through as-is."""
|
|
mock_create = AsyncMock(return_value=_fake_chat_response())
|
|
spec = find_by_name("deepseek")
|
|
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
|
client_instance = MockClient.return_value
|
|
client_instance.chat.completions.create = mock_create
|
|
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-deepseek-test-key",
|
|
default_model="deepseek-chat",
|
|
spec=spec,
|
|
)
|
|
await provider.chat(
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
model="deepseek-chat",
|
|
)
|
|
|
|
call_kwargs = mock_create.call_args.kwargs
|
|
assert call_kwargs["model"] == "deepseek-chat"
|
|
|
|
|
|
def test_openai_model_passthrough() -> None:
|
|
"""OpenAI models pass through unchanged."""
|
|
spec = find_by_name("openai")
|
|
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
|
provider = OpenAICompatProvider(
|
|
api_key="sk-test-key",
|
|
default_model="gpt-4o",
|
|
spec=spec,
|
|
)
|
|
assert provider.get_default_model() == "gpt-4o"
|