mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-05-10 11:45:53 +00:00
test(providers): cover reasoning_effort="none" and gemma auto-routing
- Anthropic: "none" must not enable extended thinking - Azure: "none" must not suppress temperature or inject reasoning body - DeepSeek/DashScope/Kimi: "none" sends thinking disabled, skips reasoning_effort field - Gemini: gemma keyword enables auto-routing for gemma models
This commit is contained in:
parent
b94bc18e59
commit
2b9b41f9c3
@ -83,3 +83,10 @@ def test_opus_4_7_omits_temperature_none() -> None:
|
||||
kw = _build(_make_provider("claude-opus-4-7"), None)
|
||||
assert "temperature" not in kw
|
||||
assert "thinking" not in kw
|
||||
|
||||
|
||||
def test_reasoning_effort_string_none_does_not_enable_thinking() -> None:
|
||||
"""reasoning_effort='none' must not enable thinking — treated same as disabled."""
|
||||
kw = _build(_make_provider(), "none")
|
||||
assert "thinking" not in kw
|
||||
assert kw["temperature"] == 0.7
|
||||
|
||||
@ -78,6 +78,11 @@ def test_supports_temperature_with_reasoning_effort():
|
||||
assert AzureOpenAIProvider._supports_temperature("gpt-4o", reasoning_effort="medium") is False
|
||||
|
||||
|
||||
def test_supports_temperature_with_reasoning_effort_none_string():
|
||||
"""reasoning_effort='none' must NOT suppress temperature — it means thinking is off."""
|
||||
assert AzureOpenAIProvider._supports_temperature("gpt-4o", reasoning_effort="none") is True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _build_body — Responses API body construction
|
||||
# ---------------------------------------------------------------------------
|
||||
@ -131,6 +136,16 @@ def test_build_body_with_reasoning():
|
||||
assert "temperature" not in body
|
||||
|
||||
|
||||
def test_build_body_reasoning_effort_none_string_omits_reasoning():
|
||||
"""reasoning_effort='none' must not inject a reasoning body and must allow temperature."""
|
||||
provider = AzureOpenAIProvider(api_key="k", api_base="https://r.com", default_model="gpt-4o")
|
||||
body = provider._build_body(
|
||||
[{"role": "user", "content": "hi"}], None, "gpt-4o", 4096, 0.7, "none", None,
|
||||
)
|
||||
assert "reasoning" not in body
|
||||
assert body["temperature"] == 0.7
|
||||
|
||||
|
||||
def test_build_body_image_conversion():
|
||||
"""image_url content blocks should be converted to input_image."""
|
||||
provider = AzureOpenAIProvider(api_key="k", api_base="https://r.com", default_model="gpt-4o")
|
||||
|
||||
@ -121,6 +121,14 @@ def test_openrouter_spec_is_gateway() -> None:
|
||||
assert spec.default_api_base == "https://openrouter.ai/api/v1"
|
||||
|
||||
|
||||
def test_gemma_routes_to_gemini_provider() -> None:
|
||||
"""gemma models (e.g. gemma-3-27b-it) must auto-route to Gemini when GEMINI_API_KEY is set.
|
||||
Users running gemma via the Gemini API endpoint expect automatic provider detection."""
|
||||
spec = find_by_name("gemini")
|
||||
assert spec is not None
|
||||
assert "gemma" in spec.keywords
|
||||
|
||||
|
||||
def test_openrouter_sets_default_attribution_headers() -> None:
|
||||
spec = find_by_name("openrouter")
|
||||
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI") as MockClient:
|
||||
@ -1050,3 +1058,46 @@ def test_kimi_k2_thinking_series_no_thinking_injection() -> None:
|
||||
"""kimi-k2-thinking series models must NOT receive extra_body.thinking."""
|
||||
kw = _build_kwargs_for("moonshot", "kimi-k2-thinking", reasoning_effort="high")
|
||||
assert "extra_body" not in kw
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# reasoning_effort="none" — treated as thinking disabled
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_deepseek_thinking_disabled_for_none_string() -> None:
|
||||
"""reasoning_effort='none' must send thinking.type=disabled and skip reasoning_effort field."""
|
||||
kw = _build_kwargs_for("deepseek", "deepseek-v4-pro", reasoning_effort="none")
|
||||
assert kw.get("extra_body") == {"thinking": {"type": "disabled"}}
|
||||
assert "reasoning_effort" not in kw
|
||||
|
||||
|
||||
def test_kimi_k25_thinking_disabled_for_none_string() -> None:
|
||||
"""reasoning_effort='none' maps to thinking disabled for kimi-k2.5."""
|
||||
kw = _build_kwargs_for("moonshot", "kimi-k2.5", reasoning_effort="none")
|
||||
assert kw.get("extra_body") == {"thinking": {"type": "disabled"}}
|
||||
|
||||
|
||||
def test_dashscope_thinking_disabled_for_none_string() -> None:
|
||||
"""reasoning_effort='none' disables thinking and must not emit reasoning_effort on DashScope."""
|
||||
kw = _build_kwargs_for("dashscope", "qwen3.6-plus", reasoning_effort="none")
|
||||
assert kw.get("extra_body") == {"enable_thinking": False}
|
||||
assert "reasoning_effort" not in kw
|
||||
|
||||
|
||||
def test_deepseek_no_backfill_when_reasoning_effort_none_string() -> None:
|
||||
"""reasoning_effort='none' must NOT trigger reasoning_content backfill (thinking inactive)."""
|
||||
spec = find_by_name("deepseek")
|
||||
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
||||
p = OpenAICompatProvider(api_key="k", default_model="deepseek-v4-pro", spec=spec)
|
||||
messages = [
|
||||
{"role": "user", "content": "hi"},
|
||||
{"role": "assistant", "content": "ok"},
|
||||
{"role": "user", "content": "continue"},
|
||||
]
|
||||
kw = p._build_kwargs(
|
||||
messages=list(messages), tools=None, model="deepseek-v4-pro",
|
||||
max_tokens=1024, temperature=0.7,
|
||||
reasoning_effort="none", tool_choice=None,
|
||||
)
|
||||
assistant = kw["messages"][1]
|
||||
assert "reasoning_content" not in assistant
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user