fix(openai): use max_completion_tokens for OpenAI provider (#2758)

This commit is contained in:
Bob Johnson 2026-04-02 21:24:03 -05:00 committed by GitHub
parent b2598270bf
commit 475ea06294
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 35 additions and 0 deletions

View File

@ -200,6 +200,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
env_key="OPENAI_API_KEY",
display_name="OpenAI",
backend="openai_compat",
supports_max_completion_tokens=True,
),
# OpenAI Codex: OAuth-based, dedicated provider
ProviderSpec(

View File

@ -0,0 +1,34 @@
"""Regression tests for max_completion_tokens selection in OpenAI-compatible providers."""
from unittest.mock import patch
from nanobot.providers.openai_compat_provider import OpenAICompatProvider
from nanobot.providers.registry import find_by_name
def test_openai_provider_uses_max_completion_tokens_when_supported():
"""OpenAI registry spec should drive max_completion_tokens payload selection."""
spec = find_by_name("openai")
assert spec is not None
assert spec.supports_max_completion_tokens is True
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
provider = OpenAICompatProvider(
api_key="test-key",
api_base=None,
default_model="gpt-4.1",
spec=spec,
)
payload = provider._build_kwargs(
messages=[{"role": "user", "content": "Hello"}],
tools=None,
model="gpt-4.1",
max_tokens=1234,
temperature=0.2,
reasoning_effort=None,
tool_choice=None,
)
assert payload["max_completion_tokens"] == 1234
assert "max_tokens" not in payload