mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-02 09:22:36 +00:00
fix: refresh copilot token before requests
Address PR review feedback by avoiding an async method reference as the OpenAI client api_key. Initialize the client with a placeholder key, refresh the Copilot token before each chat/chat_stream call, and update the runtime client api_key before dispatch. Add a regression test that verifies the client api_key is refreshed to a real string before chat requests. Generated with GitHub Copilot, GPT-5.4.
This commit is contained in:
parent
a37bc26ed3
commit
c5f0997381
@ -164,7 +164,7 @@ class GitHubCopilotProvider(OpenAICompatProvider):
|
||||
self._copilot_access_token: str | None = None
|
||||
self._copilot_expires_at: float = 0.0
|
||||
super().__init__(
|
||||
api_key=self._get_copilot_access_token,
|
||||
api_key="no-key",
|
||||
api_base=DEFAULT_COPILOT_BASE_URL,
|
||||
default_model=default_model,
|
||||
extra_headers={
|
||||
@ -205,3 +205,53 @@ class GitHubCopilotProvider(OpenAICompatProvider):
|
||||
self._copilot_expires_at = time.time() + int(refresh_in)
|
||||
self._copilot_access_token = str(token)
|
||||
return self._copilot_access_token
|
||||
|
||||
async def _refresh_client_api_key(self) -> str:
|
||||
token = await self._get_copilot_access_token()
|
||||
self.api_key = token
|
||||
self._client.api_key = token
|
||||
return token
|
||||
|
||||
async def chat(
|
||||
self,
|
||||
messages: list[dict[str, object]],
|
||||
tools: list[dict[str, object]] | None = None,
|
||||
model: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 0.7,
|
||||
reasoning_effort: str | None = None,
|
||||
tool_choice: str | dict[str, object] | None = None,
|
||||
):
|
||||
await self._refresh_client_api_key()
|
||||
return await super().chat(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
model=model,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
reasoning_effort=reasoning_effort,
|
||||
tool_choice=tool_choice,
|
||||
)
|
||||
|
||||
async def chat_stream(
|
||||
self,
|
||||
messages: list[dict[str, object]],
|
||||
tools: list[dict[str, object]] | None = None,
|
||||
model: str | None = None,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 0.7,
|
||||
reasoning_effort: str | None = None,
|
||||
tool_choice: str | dict[str, object] | None = None,
|
||||
on_content_delta: Callable[[str], None] | None = None,
|
||||
):
|
||||
await self._refresh_client_api_key()
|
||||
return await super().chat_stream(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
model=model,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
reasoning_effort=reasoning_effort,
|
||||
tool_choice=tool_choice,
|
||||
on_content_delta=on_content_delta,
|
||||
)
|
||||
|
||||
@ -357,6 +357,35 @@ def test_github_copilot_provider_strips_prefixed_model_name():
|
||||
assert kwargs["model"] == "gpt-5.1"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_github_copilot_provider_refreshes_client_api_key_before_chat():
|
||||
from nanobot.providers.github_copilot_provider import GitHubCopilotProvider
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.api_key = "no-key"
|
||||
mock_client.chat.completions.create = AsyncMock(return_value={
|
||||
"choices": [{"message": {"content": "ok"}, "finish_reason": "stop"}],
|
||||
"usage": {"prompt_tokens": 1, "completion_tokens": 1, "total_tokens": 2},
|
||||
})
|
||||
|
||||
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI", return_value=mock_client):
|
||||
provider = GitHubCopilotProvider(default_model="github-copilot/gpt-5.1")
|
||||
|
||||
provider._get_copilot_access_token = AsyncMock(return_value="copilot-access-token")
|
||||
|
||||
response = await provider.chat(
|
||||
messages=[{"role": "user", "content": "hi"}],
|
||||
model="github-copilot/gpt-5.1",
|
||||
max_tokens=16,
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
assert response.content == "ok"
|
||||
assert provider._client.api_key == "copilot-access-token"
|
||||
provider._get_copilot_access_token.assert_awaited_once()
|
||||
mock_client.chat.completions.create.assert_awaited_once()
|
||||
|
||||
|
||||
def test_openai_codex_strip_prefix_supports_hyphen_and_underscore():
|
||||
assert _strip_model_prefix("openai-codex/gpt-5.1-codex") == "gpt-5.1-codex"
|
||||
assert _strip_model_prefix("openai_codex/gpt-5.1-codex") == "gpt-5.1-codex"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user