diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py index 9b0b5f74f..d73d8271e 100644 --- a/nanobot/providers/registry.py +++ b/nanobot/providers/registry.py @@ -200,6 +200,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = ( env_key="OPENAI_API_KEY", display_name="OpenAI", backend="openai_compat", + supports_max_completion_tokens=True, ), # OpenAI Codex: OAuth-based, dedicated provider ProviderSpec( diff --git a/tests/providers/test_openai_compat_max_completion_tokens.py b/tests/providers/test_openai_compat_max_completion_tokens.py new file mode 100644 index 000000000..ed6692848 --- /dev/null +++ b/tests/providers/test_openai_compat_max_completion_tokens.py @@ -0,0 +1,34 @@ +"""Regression tests for max_completion_tokens selection in OpenAI-compatible providers.""" + +from unittest.mock import patch + +from nanobot.providers.openai_compat_provider import OpenAICompatProvider +from nanobot.providers.registry import find_by_name + + +def test_openai_provider_uses_max_completion_tokens_when_supported(): + """OpenAI registry spec should drive max_completion_tokens payload selection.""" + spec = find_by_name("openai") + assert spec is not None + assert spec.supports_max_completion_tokens is True + + with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"): + provider = OpenAICompatProvider( + api_key="test-key", + api_base=None, + default_model="gpt-4.1", + spec=spec, + ) + + payload = provider._build_kwargs( + messages=[{"role": "user", "content": "Hello"}], + tools=None, + model="gpt-4.1", + max_tokens=1234, + temperature=0.2, + reasoning_effort=None, + tool_choice=None, + ) + + assert payload["max_completion_tokens"] == 1234 + assert "max_tokens" not in payload