mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-15 07:29:52 +00:00
fix(provider): clarify local 502 recovery hints
This commit is contained in:
parent
f879d81b28
commit
c68b3edb9d
@ -798,8 +798,7 @@ class OpenAICompatProvider(LLMProvider):
|
||||
"error_should_retry": should_retry,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _handle_error(e: Exception) -> LLMResponse:
|
||||
def _handle_error(self, e: Exception) -> LLMResponse:
|
||||
body = (
|
||||
getattr(e, "doc", None)
|
||||
or getattr(e, "body", None)
|
||||
@ -807,6 +806,16 @@ class OpenAICompatProvider(LLMProvider):
|
||||
)
|
||||
body_text = body if isinstance(body, str) else str(body) if body is not None else ""
|
||||
msg = f"Error: {body_text.strip()[:500]}" if body_text.strip() else f"Error calling LLM: {e}"
|
||||
|
||||
spec = self._spec
|
||||
text = f"{body_text} {e}".lower()
|
||||
if spec and spec.is_local and ("502" in text or "connection" in text or "refused" in text):
|
||||
msg += (
|
||||
"\nHint: this is a local model endpoint. Check that the local server is reachable at "
|
||||
f"{self.api_base or spec.default_api_base}, and if you are using a proxy/tunnel, make sure it "
|
||||
"can reach your local Ollama/vLLM service instead of routing localhost through the remote host."
|
||||
)
|
||||
|
||||
response = getattr(e, "response", None)
|
||||
retry_after = LLMProvider._extract_retry_after_from_headers(getattr(response, "headers", None))
|
||||
if retry_after is None:
|
||||
|
||||
@ -4,6 +4,7 @@ from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
from nanobot.providers.openai_compat_provider import OpenAICompatProvider
|
||||
from nanobot.providers.registry import find_by_name
|
||||
|
||||
|
||||
def test_custom_provider_parse_handles_empty_choices() -> None:
|
||||
@ -53,3 +54,16 @@ def test_custom_provider_parse_chunks_accepts_plain_text_chunks() -> None:
|
||||
|
||||
assert result.finish_reason == "stop"
|
||||
assert result.content == "hello world"
|
||||
|
||||
|
||||
def test_local_provider_502_error_includes_reachability_hint() -> None:
|
||||
spec = find_by_name("ollama")
|
||||
with patch("nanobot.providers.openai_compat_provider.AsyncOpenAI"):
|
||||
provider = OpenAICompatProvider(api_base="http://localhost:11434/v1", spec=spec)
|
||||
|
||||
result = provider._handle_error(Exception("Error code: 502"))
|
||||
|
||||
assert result.finish_reason == "error"
|
||||
assert "local model endpoint" in result.content
|
||||
assert "http://localhost:11434/v1" in result.content
|
||||
assert "proxy/tunnel" in result.content
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user