mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-05 10:52:36 +00:00
fix(openai): use max_completion_tokens for OpenAI provider (#2758)
This commit is contained in:
parent
b2598270bf
commit
475ea06294
@ -200,6 +200,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||
env_key="OPENAI_API_KEY",
|
||||
display_name="OpenAI",
|
||||
backend="openai_compat",
|
||||
supports_max_completion_tokens=True,
|
||||
),
|
||||
# OpenAI Codex: OAuth-based, dedicated provider
|
||||
ProviderSpec(
|
||||
|
||||
34
tests/providers/test_openai_compat_max_completion_tokens.py
Normal file
34
tests/providers/test_openai_compat_max_completion_tokens.py
Normal file
@ -0,0 +1,34 @@
|
||||
"""Regression tests for max_completion_tokens selection in OpenAI-compatible providers."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from nanobot.providers.openai_compat_provider import OpenAICompatProvider
|
||||
from nanobot.providers.registry import find_by_name
|
||||
|
||||
|
||||
def test_openai_provider_uses_max_completion_tokens_when_supported():
|
||||
"""OpenAI registry spec should drive max_completion_tokens payload selection."""
|
||||
spec = find_by_name("openai")
|
||||
assert spec is not None
|
||||
assert spec.supports_max_completion_tokens is True
|
||||
|
||||
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
||||
provider = OpenAICompatProvider(
|
||||
api_key="test-key",
|
||||
api_base=None,
|
||||
default_model="gpt-4.1",
|
||||
spec=spec,
|
||||
)
|
||||
|
||||
payload = provider._build_kwargs(
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
tools=None,
|
||||
model="gpt-4.1",
|
||||
max_tokens=1234,
|
||||
temperature=0.2,
|
||||
reasoning_effort=None,
|
||||
tool_choice=None,
|
||||
)
|
||||
|
||||
assert payload["max_completion_tokens"] == 1234
|
||||
assert "max_tokens" not in payload
|
||||
Loading…
x
Reference in New Issue
Block a user