diff --git a/nanobot/providers/github_copilot_provider.py b/nanobot/providers/github_copilot_provider.py index eb8b922af..8d50006a0 100644 --- a/nanobot/providers/github_copilot_provider.py +++ b/nanobot/providers/github_copilot_provider.py @@ -164,7 +164,7 @@ class GitHubCopilotProvider(OpenAICompatProvider): self._copilot_access_token: str | None = None self._copilot_expires_at: float = 0.0 super().__init__( - api_key=self._get_copilot_access_token, + api_key="no-key", api_base=DEFAULT_COPILOT_BASE_URL, default_model=default_model, extra_headers={ @@ -205,3 +205,53 @@ class GitHubCopilotProvider(OpenAICompatProvider): self._copilot_expires_at = time.time() + int(refresh_in) self._copilot_access_token = str(token) return self._copilot_access_token + + async def _refresh_client_api_key(self) -> str: + token = await self._get_copilot_access_token() + self.api_key = token + self._client.api_key = token + return token + + async def chat( + self, + messages: list[dict[str, object]], + tools: list[dict[str, object]] | None = None, + model: str | None = None, + max_tokens: int = 4096, + temperature: float = 0.7, + reasoning_effort: str | None = None, + tool_choice: str | dict[str, object] | None = None, + ): + await self._refresh_client_api_key() + return await super().chat( + messages=messages, + tools=tools, + model=model, + max_tokens=max_tokens, + temperature=temperature, + reasoning_effort=reasoning_effort, + tool_choice=tool_choice, + ) + + async def chat_stream( + self, + messages: list[dict[str, object]], + tools: list[dict[str, object]] | None = None, + model: str | None = None, + max_tokens: int = 4096, + temperature: float = 0.7, + reasoning_effort: str | None = None, + tool_choice: str | dict[str, object] | None = None, + on_content_delta: Callable[[str], None] | None = None, + ): + await self._refresh_client_api_key() + return await super().chat_stream( + messages=messages, + tools=tools, + model=model, + max_tokens=max_tokens, + temperature=temperature, + reasoning_effort=reasoning_effort, + tool_choice=tool_choice, + on_content_delta=on_content_delta, + ) diff --git a/tests/cli/test_commands.py b/tests/cli/test_commands.py index b9869e74d..0f6ff8177 100644 --- a/tests/cli/test_commands.py +++ b/tests/cli/test_commands.py @@ -357,6 +357,35 @@ def test_github_copilot_provider_strips_prefixed_model_name(): assert kwargs["model"] == "gpt-5.1" +@pytest.mark.asyncio +async def test_github_copilot_provider_refreshes_client_api_key_before_chat(): + from nanobot.providers.github_copilot_provider import GitHubCopilotProvider + + mock_client = MagicMock() + mock_client.api_key = "no-key" + mock_client.chat.completions.create = AsyncMock(return_value={ + "choices": [{"message": {"content": "ok"}, "finish_reason": "stop"}], + "usage": {"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2}, + }) + + with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI", return_value=mock_client): + provider = GitHubCopilotProvider(default_model="github-copilot/gpt-5.1") + + provider._get_copilot_access_token = AsyncMock(return_value="copilot-access-token") + + response = await provider.chat( + messages=[{"role": "user", "content": "hi"}], + model="github-copilot/gpt-5.1", + max_tokens=16, + temperature=0.1, + ) + + assert response.content == "ok" + assert provider._client.api_key == "copilot-access-token" + provider._get_copilot_access_token.assert_awaited_once() + mock_client.chat.completions.create.assert_awaited_once() + + def test_openai_codex_strip_prefix_supports_hyphen_and_underscore(): assert _strip_model_prefix("openai-codex/gpt-5.1-codex") == "gpt-5.1-codex" assert _strip_model_prefix("openai_codex/gpt-5.1-codex") == "gpt-5.1-codex"