mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-05 02:42:41 +00:00
- Remove litellm dependency entirely (supply chain risk mitigation) - Add AnthropicProvider (native SDK) and OpenAICompatProvider (unified) - Merge CustomProvider into OpenAICompatProvider, delete custom_provider.py - Add ProviderSpec.backend field for declarative provider routing - Remove _resolve_model, find_gateway, find_by_model (dead heuristics) - Pass resolved spec directly into provider — zero internal lookups - Stub out litellm-dependent model database (cli/models.py) - Add anthropic>=0.45.0 to dependencies, remove litellm - 593 tests passed, net -1034 lines
32 lines
777 B
Python
32 lines
777 B
Python
"""Model information helpers for the onboard wizard.
|
|
|
|
Model database / autocomplete is temporarily disabled while litellm is
|
|
being replaced. All public function signatures are preserved so callers
|
|
continue to work without changes.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from typing import Any
|
|
|
|
|
|
def get_all_models() -> list[str]:
|
|
return []
|
|
|
|
|
|
def find_model_info(model_name: str) -> dict[str, Any] | None:
|
|
return None
|
|
|
|
|
|
def get_model_context_limit(model: str, provider: str = "auto") -> int | None:
|
|
return None
|
|
|
|
|
|
def get_model_suggestions(partial: str, provider: str = "auto", limit: int = 20) -> list[str]:
|
|
return []
|
|
|
|
|
|
def format_token_count(tokens: int) -> str:
|
|
"""Format token count for display (e.g., 200000 -> '200,000')."""
|
|
return f"{tokens:,}"
|