mirror of
https://github.com/HKUDS/nanobot.git
synced 2026-04-27 21:35:51 +00:00
Merge branch 'main' into nightly
Resolved conflicts in onboard command to support both interactive and non-interactive modes: - Added --non-interactive flag to skip wizard - Kept --workspace and --config options - Updated tests to use --non-interactive for non-interactive tests
This commit is contained in:
commit
c191fb3708
22
README.md
22
README.md
@ -1162,10 +1162,27 @@ MCP tools are automatically discovered and registered on startup. The LLM can us
|
|||||||
|
|
||||||
## 🧩 Multiple Instances
|
## 🧩 Multiple Instances
|
||||||
|
|
||||||
Run multiple nanobot instances simultaneously with separate configs and runtime data. Use `--config` as the main entrypoint, and optionally use `--workspace` to override the workspace for a specific run.
|
Run multiple nanobot instances simultaneously with separate configs and runtime data. Use `--config` as the main entrypoint. Optionally pass `--workspace` during `onboard` when you want to initialize or update the saved workspace for a specific instance.
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
|
If you want each instance to have its own dedicated workspace from the start, pass both `--config` and `--workspace` during onboarding.
|
||||||
|
|
||||||
|
**Initialize instances:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create separate instance configs and workspaces
|
||||||
|
nanobot onboard --config ~/.nanobot-telegram/config.json --workspace ~/.nanobot-telegram/workspace
|
||||||
|
nanobot onboard --config ~/.nanobot-discord/config.json --workspace ~/.nanobot-discord/workspace
|
||||||
|
nanobot onboard --config ~/.nanobot-feishu/config.json --workspace ~/.nanobot-feishu/workspace
|
||||||
|
```
|
||||||
|
|
||||||
|
**Configure each instance:**
|
||||||
|
|
||||||
|
Edit `~/.nanobot-telegram/config.json`, `~/.nanobot-discord/config.json`, etc. with different channel settings. The workspace you passed during `onboard` is saved into each config as that instance's default workspace.
|
||||||
|
|
||||||
|
**Run instances:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Instance A - Telegram bot
|
# Instance A - Telegram bot
|
||||||
nanobot gateway --config ~/.nanobot-telegram/config.json
|
nanobot gateway --config ~/.nanobot-telegram/config.json
|
||||||
@ -1265,7 +1282,8 @@ nanobot gateway --config ~/.nanobot-telegram/config.json --workspace /tmp/nanobo
|
|||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
| `nanobot onboard` | Initialize config & workspace |
|
| `nanobot onboard` | Initialize config & workspace at `~/.nanobot/` |
|
||||||
|
| `nanobot onboard -c <config> -w <workspace>` | Initialize or refresh a specific instance config and workspace |
|
||||||
| `nanobot agent -m "..."` | Chat with the agent |
|
| `nanobot agent -m "..."` | Chat with the agent |
|
||||||
| `nanobot agent -w <workspace>` | Chat against a specific workspace |
|
| `nanobot agent -w <workspace>` | Chat against a specific workspace |
|
||||||
| `nanobot agent -w <workspace> -c <config>` | Chat against a specific workspace/config |
|
| `nanobot agent -w <workspace> -c <config>` | Chat against a specific workspace/config |
|
||||||
|
|||||||
@ -437,16 +437,39 @@ class FeishuChannel(BaseChannel):
|
|||||||
|
|
||||||
_CODE_BLOCK_RE = re.compile(r"(```[\s\S]*?```)", re.MULTILINE)
|
_CODE_BLOCK_RE = re.compile(r"(```[\s\S]*?```)", re.MULTILINE)
|
||||||
|
|
||||||
@staticmethod
|
# Markdown formatting patterns that should be stripped from plain-text
|
||||||
def _parse_md_table(table_text: str) -> dict | None:
|
# surfaces like table cells and heading text.
|
||||||
|
_MD_BOLD_RE = re.compile(r"\*\*(.+?)\*\*")
|
||||||
|
_MD_BOLD_UNDERSCORE_RE = re.compile(r"__(.+?)__")
|
||||||
|
_MD_ITALIC_RE = re.compile(r"(?<!\*)\*(?!\*)(.+?)(?<!\*)\*(?!\*)")
|
||||||
|
_MD_STRIKE_RE = re.compile(r"~~(.+?)~~")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _strip_md_formatting(cls, text: str) -> str:
|
||||||
|
"""Strip markdown formatting markers from text for plain display.
|
||||||
|
|
||||||
|
Feishu table cells do not support markdown rendering, so we remove
|
||||||
|
the formatting markers to keep the text readable.
|
||||||
|
"""
|
||||||
|
# Remove bold markers
|
||||||
|
text = cls._MD_BOLD_RE.sub(r"\1", text)
|
||||||
|
text = cls._MD_BOLD_UNDERSCORE_RE.sub(r"\1", text)
|
||||||
|
# Remove italic markers
|
||||||
|
text = cls._MD_ITALIC_RE.sub(r"\1", text)
|
||||||
|
# Remove strikethrough markers
|
||||||
|
text = cls._MD_STRIKE_RE.sub(r"\1", text)
|
||||||
|
return text
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _parse_md_table(cls, table_text: str) -> dict | None:
|
||||||
"""Parse a markdown table into a Feishu table element."""
|
"""Parse a markdown table into a Feishu table element."""
|
||||||
lines = [_line.strip() for _line in table_text.strip().split("\n") if _line.strip()]
|
lines = [_line.strip() for _line in table_text.strip().split("\n") if _line.strip()]
|
||||||
if len(lines) < 3:
|
if len(lines) < 3:
|
||||||
return None
|
return None
|
||||||
def split(_line: str) -> list[str]:
|
def split(_line: str) -> list[str]:
|
||||||
return [c.strip() for c in _line.strip("|").split("|")]
|
return [c.strip() for c in _line.strip("|").split("|")]
|
||||||
headers = split(lines[0])
|
headers = [cls._strip_md_formatting(h) for h in split(lines[0])]
|
||||||
rows = [split(_line) for _line in lines[2:]]
|
rows = [[cls._strip_md_formatting(c) for c in split(_line)] for _line in lines[2:]]
|
||||||
columns = [{"tag": "column", "name": f"c{i}", "display_name": h, "width": "auto"}
|
columns = [{"tag": "column", "name": f"c{i}", "display_name": h, "width": "auto"}
|
||||||
for i, h in enumerate(headers)]
|
for i, h in enumerate(headers)]
|
||||||
return {
|
return {
|
||||||
@ -512,12 +535,13 @@ class FeishuChannel(BaseChannel):
|
|||||||
before = protected[last_end:m.start()].strip()
|
before = protected[last_end:m.start()].strip()
|
||||||
if before:
|
if before:
|
||||||
elements.append({"tag": "markdown", "content": before})
|
elements.append({"tag": "markdown", "content": before})
|
||||||
text = m.group(2).strip()
|
text = self._strip_md_formatting(m.group(2).strip())
|
||||||
|
display_text = f"**{text}**" if text else ""
|
||||||
elements.append({
|
elements.append({
|
||||||
"tag": "div",
|
"tag": "div",
|
||||||
"text": {
|
"text": {
|
||||||
"tag": "lark_md",
|
"tag": "lark_md",
|
||||||
"content": f"**{text}**",
|
"content": display_text,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
last_end = m.end()
|
last_end = m.end()
|
||||||
@ -961,10 +985,13 @@ class FeishuChannel(BaseChannel):
|
|||||||
else:
|
else:
|
||||||
key = await loop.run_in_executor(None, self._upload_file_sync, file_path)
|
key = await loop.run_in_executor(None, self._upload_file_sync, file_path)
|
||||||
if key:
|
if key:
|
||||||
# Use msg_type "media" for audio/video so users can play inline;
|
# Use msg_type "audio" for audio, "video" for video, "file" for documents.
|
||||||
# "file" for everything else (documents, archives, etc.)
|
# Feishu requires these specific msg_types for inline playback.
|
||||||
if ext in self._AUDIO_EXTS or ext in self._VIDEO_EXTS:
|
# Note: "media" is only valid as a tag inside "post" messages, not as a standalone msg_type.
|
||||||
media_type = "media"
|
if ext in self._AUDIO_EXTS:
|
||||||
|
media_type = "audio"
|
||||||
|
elif ext in self._VIDEO_EXTS:
|
||||||
|
media_type = "video"
|
||||||
else:
|
else:
|
||||||
media_type = "file"
|
media_type = "file"
|
||||||
await loop.run_in_executor(
|
await loop.run_in_executor(
|
||||||
|
|||||||
@ -38,6 +38,7 @@ class SlackConfig(Base):
|
|||||||
user_token_read_only: bool = True
|
user_token_read_only: bool = True
|
||||||
reply_in_thread: bool = True
|
reply_in_thread: bool = True
|
||||||
react_emoji: str = "eyes"
|
react_emoji: str = "eyes"
|
||||||
|
done_emoji: str = "white_check_mark"
|
||||||
allow_from: list[str] = Field(default_factory=list)
|
allow_from: list[str] = Field(default_factory=list)
|
||||||
group_policy: str = "mention"
|
group_policy: str = "mention"
|
||||||
group_allow_from: list[str] = Field(default_factory=list)
|
group_allow_from: list[str] = Field(default_factory=list)
|
||||||
@ -136,6 +137,12 @@ class SlackChannel(BaseChannel):
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Failed to upload file {}: {}", media_path, e)
|
logger.error("Failed to upload file {}: {}", media_path, e)
|
||||||
|
|
||||||
|
# Update reaction emoji when the final (non-progress) response is sent
|
||||||
|
if not (msg.metadata or {}).get("_progress"):
|
||||||
|
event = slack_meta.get("event", {})
|
||||||
|
await self._update_react_emoji(msg.chat_id, event.get("ts"))
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Error sending Slack message: {}", e)
|
logger.error("Error sending Slack message: {}", e)
|
||||||
|
|
||||||
@ -233,6 +240,28 @@ class SlackChannel(BaseChannel):
|
|||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Error handling Slack message from {}", sender_id)
|
logger.exception("Error handling Slack message from {}", sender_id)
|
||||||
|
|
||||||
|
async def _update_react_emoji(self, chat_id: str, ts: str | None) -> None:
|
||||||
|
"""Remove the in-progress reaction and optionally add a done reaction."""
|
||||||
|
if not self._web_client or not ts:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
await self._web_client.reactions_remove(
|
||||||
|
channel=chat_id,
|
||||||
|
name=self.config.react_emoji,
|
||||||
|
timestamp=ts,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug("Slack reactions_remove failed: {}", e)
|
||||||
|
if self.config.done_emoji:
|
||||||
|
try:
|
||||||
|
await self._web_client.reactions_add(
|
||||||
|
channel=chat_id,
|
||||||
|
name=self.config.done_emoji,
|
||||||
|
timestamp=ts,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug("Slack done reaction failed: {}", e)
|
||||||
|
|
||||||
def _is_allowed(self, sender_id: str, chat_id: str, channel_type: str) -> bool:
|
def _is_allowed(self, sender_id: str, chat_id: str, channel_type: str) -> bool:
|
||||||
if channel_type == "im":
|
if channel_type == "im":
|
||||||
if not self.config.dm.enabled:
|
if not self.config.dm.enabled:
|
||||||
|
|||||||
@ -261,47 +261,90 @@ def main(
|
|||||||
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def onboard():
|
def onboard(
|
||||||
"""Initialize nanobot configuration and workspace with interactive wizard."""
|
workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory"),
|
||||||
from nanobot.config.loader import get_config_path, load_config, save_config
|
config: str | None = typer.Option(None, "--config", "-c", help="Path to config file"),
|
||||||
|
non_interactive: bool = typer.Option(False, "--non-interactive", help="Skip interactive wizard"),
|
||||||
|
):
|
||||||
|
"""Initialize nanobot configuration and workspace."""
|
||||||
|
from nanobot.config.loader import get_config_path, load_config, save_config, set_config_path
|
||||||
from nanobot.config.schema import Config
|
from nanobot.config.schema import Config
|
||||||
|
|
||||||
config_path = get_config_path()
|
if config:
|
||||||
|
config_path = Path(config).expanduser().resolve()
|
||||||
if config_path.exists():
|
set_config_path(config_path)
|
||||||
config = load_config()
|
console.print(f"[dim]Using config: {config_path}[/dim]")
|
||||||
else:
|
else:
|
||||||
config = Config()
|
config_path = get_config_path()
|
||||||
save_config(config)
|
|
||||||
console.print(f"[green]✓[/green] Created config at {config_path}")
|
|
||||||
|
|
||||||
# Run interactive wizard
|
def _apply_workspace_override(loaded: Config) -> Config:
|
||||||
from nanobot.cli.onboard_wizard import run_onboard
|
if workspace:
|
||||||
|
loaded.agents.defaults.workspace = workspace
|
||||||
|
return loaded
|
||||||
|
|
||||||
try:
|
# Non-interactive mode: simple config creation/update
|
||||||
config = run_onboard()
|
if non_interactive:
|
||||||
save_config(config)
|
if config_path.exists():
|
||||||
console.print(f"[green]✓[/green] Config saved at {config_path}")
|
console.print(f"[yellow]Config already exists at {config_path}[/yellow]")
|
||||||
except Exception as e:
|
console.print(" [bold]y[/bold] = overwrite with defaults (existing values will be lost)")
|
||||||
console.print(f"[red]✗[/red] Error during configuration: {e}")
|
console.print(" [bold]N[/bold] = refresh config, keeping existing values and adding new fields")
|
||||||
console.print("[yellow]Please run 'nanobot onboard' again to complete setup.[/yellow]")
|
if typer.confirm("Overwrite?"):
|
||||||
raise typer.Exit(1)
|
config = _apply_workspace_override(Config())
|
||||||
|
save_config(config, config_path)
|
||||||
|
console.print(f"[green]✓[/green] Config reset to defaults at {config_path}")
|
||||||
|
else:
|
||||||
|
config = _apply_workspace_override(load_config(config_path))
|
||||||
|
save_config(config, config_path)
|
||||||
|
console.print(f"[green]✓[/green] Config refreshed at {config_path} (existing values preserved)")
|
||||||
|
else:
|
||||||
|
config = _apply_workspace_override(Config())
|
||||||
|
save_config(config, config_path)
|
||||||
|
console.print(f"[green]✓[/green] Created config at {config_path}")
|
||||||
|
console.print("[dim]Config template now uses `maxTokens` + `contextWindowTokens`; `memoryWindow` is no longer a runtime setting.[/dim]")
|
||||||
|
else:
|
||||||
|
# Interactive mode: use wizard
|
||||||
|
if config_path.exists():
|
||||||
|
config = load_config()
|
||||||
|
else:
|
||||||
|
config = Config()
|
||||||
|
save_config(config)
|
||||||
|
console.print(f"[green]✓[/green] Created config at {config_path}")
|
||||||
|
|
||||||
|
# Run interactive wizard
|
||||||
|
from nanobot.cli.onboard_wizard import run_onboard
|
||||||
|
|
||||||
|
try:
|
||||||
|
config = run_onboard()
|
||||||
|
save_config(config)
|
||||||
|
console.print(f"[green]✓[/green] Config saved at {config_path}")
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"[red]✗[/red] Error during configuration: {e}")
|
||||||
|
console.print("[yellow]Please run 'nanobot onboard' again to complete setup.[/yellow]")
|
||||||
|
raise typer.Exit(1)
|
||||||
|
|
||||||
_onboard_plugins(config_path)
|
_onboard_plugins(config_path)
|
||||||
|
|
||||||
# Create workspace
|
# Create workspace, preferring the configured workspace path.
|
||||||
workspace = get_workspace_path()
|
workspace = get_workspace_path(config.workspace_path)
|
||||||
|
|
||||||
if not workspace.exists():
|
if not workspace.exists():
|
||||||
workspace.mkdir(parents=True, exist_ok=True)
|
workspace.mkdir(parents=True, exist_ok=True)
|
||||||
console.print(f"[green]✓[/green] Created workspace at {workspace}")
|
console.print(f"[green]✓[/green] Created workspace at {workspace}")
|
||||||
|
|
||||||
sync_workspace_templates(workspace)
|
sync_workspace_templates(workspace)
|
||||||
|
|
||||||
|
agent_cmd = 'nanobot agent -m "Hello!"'
|
||||||
|
if config:
|
||||||
|
agent_cmd += f" --config {config_path}"
|
||||||
|
|
||||||
console.print(f"\n{__logo__} nanobot is ready!")
|
console.print(f"\n{__logo__} nanobot is ready!")
|
||||||
console.print("\nNext steps:")
|
console.print("\nNext steps:")
|
||||||
console.print(" 1. Chat: [cyan]nanobot agent -m \"Hello!\"[/cyan]")
|
if non_interactive:
|
||||||
console.print(" 2. Start gateway: [cyan]nanobot gateway[/cyan]")
|
console.print(f" 1. Add your API key to [cyan]{config_path}[/cyan]")
|
||||||
|
console.print(" Get one at: https://openrouter.ai/keys")
|
||||||
|
console.print(f" 2. Chat: [cyan]{agent_cmd}[/cyan]")
|
||||||
|
else:
|
||||||
|
console.print(" 1. Chat: [cyan]nanobot agent -m \"Hello!\"[/cyan]")
|
||||||
|
console.print(" 2. Start gateway: [cyan]nanobot gateway[/cyan]")
|
||||||
console.print("\n[dim]Want Telegram/WhatsApp? See: https://github.com/HKUDS/nanobot#-chat-apps[/dim]")
|
console.print("\n[dim]Want Telegram/WhatsApp? See: https://github.com/HKUDS/nanobot#-chat-apps[/dim]")
|
||||||
|
|
||||||
|
|
||||||
@ -363,6 +406,7 @@ def _make_provider(config: Config):
|
|||||||
api_key=p.api_key if p else "no-key",
|
api_key=p.api_key if p else "no-key",
|
||||||
api_base=config.get_api_base(model) or "http://localhost:8000/v1",
|
api_base=config.get_api_base(model) or "http://localhost:8000/v1",
|
||||||
default_model=model,
|
default_model=model,
|
||||||
|
extra_headers=p.extra_headers if p else None,
|
||||||
)
|
)
|
||||||
# Azure OpenAI: direct Azure OpenAI endpoint with deployment name
|
# Azure OpenAI: direct Azure OpenAI endpoint with deployment name
|
||||||
elif provider_name == "azure_openai":
|
elif provider_name == "azure_openai":
|
||||||
|
|||||||
@ -13,7 +13,6 @@ class Base(BaseModel):
|
|||||||
|
|
||||||
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
|
||||||
|
|
||||||
|
|
||||||
class ChannelsConfig(Base):
|
class ChannelsConfig(Base):
|
||||||
"""Configuration for chat channels.
|
"""Configuration for chat channels.
|
||||||
|
|
||||||
|
|||||||
@ -13,14 +13,25 @@ from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
|
|||||||
|
|
||||||
class CustomProvider(LLMProvider):
|
class CustomProvider(LLMProvider):
|
||||||
|
|
||||||
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
|
def __init__(
|
||||||
|
self,
|
||||||
|
api_key: str = "no-key",
|
||||||
|
api_base: str = "http://localhost:8000/v1",
|
||||||
|
default_model: str = "default",
|
||||||
|
extra_headers: dict[str, str] | None = None,
|
||||||
|
):
|
||||||
super().__init__(api_key, api_base)
|
super().__init__(api_key, api_base)
|
||||||
self.default_model = default_model
|
self.default_model = default_model
|
||||||
# Keep affinity stable for this provider instance to improve backend cache locality.
|
# Keep affinity stable for this provider instance to improve backend cache locality,
|
||||||
|
# while still letting users attach provider-specific headers for custom gateways.
|
||||||
|
default_headers = {
|
||||||
|
"x-session-affinity": uuid.uuid4().hex,
|
||||||
|
**(extra_headers or {}),
|
||||||
|
}
|
||||||
self._client = AsyncOpenAI(
|
self._client = AsyncOpenAI(
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
base_url=api_base,
|
base_url=api_base,
|
||||||
default_headers={"x-session-affinity": uuid.uuid4().hex},
|
default_headers=default_headers,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
|
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
|
||||||
@ -43,6 +54,11 @@ class CustomProvider(LLMProvider):
|
|||||||
return LLMResponse(content=f"Error: {e}", finish_reason="error")
|
return LLMResponse(content=f"Error: {e}", finish_reason="error")
|
||||||
|
|
||||||
def _parse(self, response: Any) -> LLMResponse:
|
def _parse(self, response: Any) -> LLMResponse:
|
||||||
|
if not response.choices:
|
||||||
|
return LLMResponse(
|
||||||
|
content="Error: API returned empty choices. This may indicate a temporary service issue or an invalid model response.",
|
||||||
|
finish_reason="error"
|
||||||
|
)
|
||||||
choice = response.choices[0]
|
choice = response.choices[0]
|
||||||
msg = choice.message
|
msg = choice.message
|
||||||
tool_calls = [
|
tool_calls = [
|
||||||
|
|||||||
@ -1,11 +1,13 @@
|
|||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
import shutil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
from nanobot.cli.commands import app
|
from nanobot.cli.commands import _make_provider, app
|
||||||
from nanobot.config.schema import Config
|
from nanobot.config.schema import Config
|
||||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||||
from nanobot.providers.openai_codex_provider import _strip_model_prefix
|
from nanobot.providers.openai_codex_provider import _strip_model_prefix
|
||||||
@ -24,6 +26,132 @@ def _strip_ansi(text):
|
|||||||
return ansi_escape.sub('', text)
|
return ansi_escape.sub('', text)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_paths():
|
||||||
|
"""Mock config/workspace paths for test isolation."""
|
||||||
|
with patch("nanobot.config.loader.get_config_path") as mock_cp, \
|
||||||
|
patch("nanobot.config.loader.save_config") as mock_sc, \
|
||||||
|
patch("nanobot.config.loader.load_config") as mock_lc, \
|
||||||
|
patch("nanobot.cli.commands.get_workspace_path") as mock_ws:
|
||||||
|
|
||||||
|
base_dir = Path("./test_onboard_data")
|
||||||
|
if base_dir.exists():
|
||||||
|
shutil.rmtree(base_dir)
|
||||||
|
base_dir.mkdir()
|
||||||
|
|
||||||
|
config_file = base_dir / "config.json"
|
||||||
|
workspace_dir = base_dir / "workspace"
|
||||||
|
|
||||||
|
mock_cp.return_value = config_file
|
||||||
|
mock_ws.return_value = workspace_dir
|
||||||
|
mock_lc.side_effect = lambda _config_path=None: Config()
|
||||||
|
|
||||||
|
def _save_config(config: Config, config_path: Path | None = None):
|
||||||
|
target = config_path or config_file
|
||||||
|
target.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
target.write_text(json.dumps(config.model_dump(by_alias=True)), encoding="utf-8")
|
||||||
|
|
||||||
|
mock_sc.side_effect = _save_config
|
||||||
|
|
||||||
|
yield config_file, workspace_dir, mock_ws
|
||||||
|
|
||||||
|
if base_dir.exists():
|
||||||
|
shutil.rmtree(base_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_fresh_install_non_interactive(mock_paths):
|
||||||
|
"""No existing config — should create from scratch in non-interactive mode."""
|
||||||
|
config_file, workspace_dir, mock_ws = mock_paths
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"])
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Created config" in result.stdout
|
||||||
|
assert "Created workspace" in result.stdout
|
||||||
|
assert "nanobot is ready" in result.stdout
|
||||||
|
assert config_file.exists()
|
||||||
|
assert (workspace_dir / "AGENTS.md").exists()
|
||||||
|
assert (workspace_dir / "memory" / "MEMORY.md").exists()
|
||||||
|
expected_workspace = Config().workspace_path
|
||||||
|
assert mock_ws.call_args.args == (expected_workspace,)
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_existing_config_refresh_non_interactive(mock_paths):
|
||||||
|
"""Config exists, user declines overwrite — should refresh (load-merge-save)."""
|
||||||
|
config_file, workspace_dir, _ = mock_paths
|
||||||
|
config_file.write_text('{"existing": true}')
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"], input="n\n")
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Config already exists" in result.stdout
|
||||||
|
assert "existing values preserved" in result.stdout
|
||||||
|
assert workspace_dir.exists()
|
||||||
|
assert (workspace_dir / "AGENTS.md").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_existing_config_overwrite_non_interactive(mock_paths):
|
||||||
|
"""Config exists, user confirms overwrite — should reset to defaults."""
|
||||||
|
config_file, workspace_dir, _ = mock_paths
|
||||||
|
config_file.write_text('{"existing": true}')
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"], input="y\n")
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Config already exists" in result.stdout
|
||||||
|
assert "Config reset to defaults" in result.stdout
|
||||||
|
assert workspace_dir.exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_existing_workspace_safe_create_non_interactive(mock_paths):
|
||||||
|
"""Workspace exists — should not recreate, but still add missing templates."""
|
||||||
|
config_file, workspace_dir, _ = mock_paths
|
||||||
|
workspace_dir.mkdir(parents=True)
|
||||||
|
config_file.write_text("{}")
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"], input="n\n")
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "Created workspace" not in result.stdout
|
||||||
|
assert "Created AGENTS.md" in result.stdout
|
||||||
|
assert (workspace_dir / "AGENTS.md").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_help_shows_workspace_and_config_options():
|
||||||
|
result = runner.invoke(app, ["onboard", "--help"])
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
stripped_output = _strip_ansi(result.stdout)
|
||||||
|
assert "--workspace" in stripped_output
|
||||||
|
assert "-w" in stripped_output
|
||||||
|
assert "--config" in stripped_output
|
||||||
|
assert "-c" in stripped_output
|
||||||
|
assert "--non-interactive" in stripped_output
|
||||||
|
assert "--dir" not in stripped_output
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_uses_explicit_config_and_workspace_paths(tmp_path, monkeypatch):
|
||||||
|
config_path = tmp_path / "instance" / "config.json"
|
||||||
|
workspace_path = tmp_path / "workspace"
|
||||||
|
|
||||||
|
monkeypatch.setattr("nanobot.channels.registry.discover_all", lambda: {})
|
||||||
|
|
||||||
|
result = runner.invoke(
|
||||||
|
app,
|
||||||
|
["onboard", "--config", str(config_path), "--workspace", str(workspace_path), "--non-interactive"],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
saved = Config.model_validate(json.loads(config_path.read_text(encoding="utf-8")))
|
||||||
|
assert saved.workspace_path == workspace_path
|
||||||
|
assert (workspace_path / "AGENTS.md").exists()
|
||||||
|
stripped_output = _strip_ansi(result.stdout)
|
||||||
|
compact_output = stripped_output.replace("\n", "")
|
||||||
|
resolved_config = str(config_path.resolve())
|
||||||
|
assert resolved_config in compact_output
|
||||||
|
assert f"--config {resolved_config}" in compact_output
|
||||||
|
|
||||||
|
|
||||||
def test_config_matches_github_copilot_codex_with_hyphen_prefix():
|
def test_config_matches_github_copilot_codex_with_hyphen_prefix():
|
||||||
config = Config()
|
config = Config()
|
||||||
config.agents.defaults.model = "github-copilot/gpt-5.3-codex"
|
config.agents.defaults.model = "github-copilot/gpt-5.3-codex"
|
||||||
@ -116,6 +244,33 @@ def test_openai_codex_strip_prefix_supports_hyphen_and_underscore():
|
|||||||
assert _strip_model_prefix("openai_codex/gpt-5.1-codex") == "gpt-5.1-codex"
|
assert _strip_model_prefix("openai_codex/gpt-5.1-codex") == "gpt-5.1-codex"
|
||||||
|
|
||||||
|
|
||||||
|
def test_make_provider_passes_extra_headers_to_custom_provider():
|
||||||
|
config = Config.model_validate(
|
||||||
|
{
|
||||||
|
"agents": {"defaults": {"provider": "custom", "model": "gpt-4o-mini"}},
|
||||||
|
"providers": {
|
||||||
|
"custom": {
|
||||||
|
"apiKey": "test-key",
|
||||||
|
"apiBase": "https://example.com/v1",
|
||||||
|
"extraHeaders": {
|
||||||
|
"APP-Code": "demo-app",
|
||||||
|
"x-session-affinity": "sticky-session",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch("nanobot.providers.custom_provider.AsyncOpenAI") as mock_async_openai:
|
||||||
|
_make_provider(config)
|
||||||
|
|
||||||
|
kwargs = mock_async_openai.call_args.kwargs
|
||||||
|
assert kwargs["api_key"] == "test-key"
|
||||||
|
assert kwargs["base_url"] == "https://example.com/v1"
|
||||||
|
assert kwargs["default_headers"]["APP-Code"] == "demo-app"
|
||||||
|
assert kwargs["default_headers"]["x-session-affinity"] == "sticky-session"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_agent_runtime(tmp_path):
|
def mock_agent_runtime(tmp_path):
|
||||||
"""Mock agent command dependencies for focused CLI tests."""
|
"""Mock agent command dependencies for focused CLI tests."""
|
||||||
|
|||||||
@ -1,7 +1,13 @@
|
|||||||
import json
|
import json
|
||||||
|
from types import SimpleNamespace
|
||||||
|
|
||||||
|
from typer.testing import CliRunner
|
||||||
|
|
||||||
|
from nanobot.cli.commands import app
|
||||||
from nanobot.config.loader import load_config, save_config
|
from nanobot.config.loader import load_config, save_config
|
||||||
|
|
||||||
|
runner = CliRunner()
|
||||||
|
|
||||||
|
|
||||||
def test_load_config_keeps_max_tokens_and_warns_on_legacy_memory_window(tmp_path) -> None:
|
def test_load_config_keeps_max_tokens_and_warns_on_legacy_memory_window(tmp_path) -> None:
|
||||||
config_path = tmp_path / "config.json"
|
config_path = tmp_path / "config.json"
|
||||||
@ -50,3 +56,77 @@ def test_save_config_writes_context_window_tokens_but_not_memory_window(tmp_path
|
|||||||
assert defaults["maxTokens"] == 2222
|
assert defaults["maxTokens"] == 2222
|
||||||
assert defaults["contextWindowTokens"] == 65_536
|
assert defaults["contextWindowTokens"] == 65_536
|
||||||
assert "memoryWindow" not in defaults
|
assert "memoryWindow" not in defaults
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_refresh_rewrites_legacy_config_template(tmp_path, monkeypatch) -> None:
|
||||||
|
config_path = tmp_path / "config.json"
|
||||||
|
workspace = tmp_path / "workspace"
|
||||||
|
config_path.write_text(
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"agents": {
|
||||||
|
"defaults": {
|
||||||
|
"maxTokens": 3333,
|
||||||
|
"memoryWindow": 50,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
monkeypatch.setattr("nanobot.config.loader.get_config_path", lambda: config_path)
|
||||||
|
monkeypatch.setattr("nanobot.cli.commands.get_workspace_path", lambda _workspace=None: workspace)
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"], input="n\n")
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
assert "contextWindowTokens" in result.stdout
|
||||||
|
saved = json.loads(config_path.read_text(encoding="utf-8"))
|
||||||
|
defaults = saved["agents"]["defaults"]
|
||||||
|
assert defaults["maxTokens"] == 3333
|
||||||
|
assert defaults["contextWindowTokens"] == 65_536
|
||||||
|
assert "memoryWindow" not in defaults
|
||||||
|
|
||||||
|
|
||||||
|
def test_onboard_refresh_backfills_missing_channel_fields(tmp_path, monkeypatch) -> None:
|
||||||
|
config_path = tmp_path / "config.json"
|
||||||
|
workspace = tmp_path / "workspace"
|
||||||
|
config_path.write_text(
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"channels": {
|
||||||
|
"qq": {
|
||||||
|
"enabled": False,
|
||||||
|
"appId": "",
|
||||||
|
"secret": "",
|
||||||
|
"allowFrom": [],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
monkeypatch.setattr("nanobot.config.loader.get_config_path", lambda: config_path)
|
||||||
|
monkeypatch.setattr("nanobot.cli.commands.get_workspace_path", lambda _workspace=None: workspace)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"nanobot.channels.registry.discover_all",
|
||||||
|
lambda: {
|
||||||
|
"qq": SimpleNamespace(
|
||||||
|
default_config=lambda: {
|
||||||
|
"enabled": False,
|
||||||
|
"appId": "",
|
||||||
|
"secret": "",
|
||||||
|
"allowFrom": [],
|
||||||
|
"msgFormat": "plain",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
result = runner.invoke(app, ["onboard", "--non-interactive"], input="n\n")
|
||||||
|
|
||||||
|
assert result.exit_code == 0
|
||||||
|
saved = json.loads(config_path.read_text(encoding="utf-8"))
|
||||||
|
assert saved["channels"]["qq"]["msgFormat"] == "plain"
|
||||||
|
|||||||
13
tests/test_custom_provider.py
Normal file
13
tests/test_custom_provider.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from types import SimpleNamespace
|
||||||
|
|
||||||
|
from nanobot.providers.custom_provider import CustomProvider
|
||||||
|
|
||||||
|
|
||||||
|
def test_custom_provider_parse_handles_empty_choices() -> None:
|
||||||
|
provider = CustomProvider()
|
||||||
|
response = SimpleNamespace(choices=[])
|
||||||
|
|
||||||
|
result = provider._parse(response)
|
||||||
|
|
||||||
|
assert result.finish_reason == "error"
|
||||||
|
assert "empty choices" in result.content
|
||||||
57
tests/test_feishu_markdown_rendering.py
Normal file
57
tests/test_feishu_markdown_rendering.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
from nanobot.channels.feishu import FeishuChannel
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_md_table_strips_markdown_formatting_in_headers_and_cells() -> None:
|
||||||
|
table = FeishuChannel._parse_md_table(
|
||||||
|
"""
|
||||||
|
| **Name** | __Status__ | *Notes* | ~~State~~ |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| **Alice** | __Ready__ | *Fast* | ~~Old~~ |
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
assert table is not None
|
||||||
|
assert [col["display_name"] for col in table["columns"]] == [
|
||||||
|
"Name",
|
||||||
|
"Status",
|
||||||
|
"Notes",
|
||||||
|
"State",
|
||||||
|
]
|
||||||
|
assert table["rows"] == [
|
||||||
|
{"c0": "Alice", "c1": "Ready", "c2": "Fast", "c3": "Old"}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_split_headings_strips_embedded_markdown_before_bolding() -> None:
|
||||||
|
channel = FeishuChannel.__new__(FeishuChannel)
|
||||||
|
|
||||||
|
elements = channel._split_headings("# **Important** *status* ~~update~~")
|
||||||
|
|
||||||
|
assert elements == [
|
||||||
|
{
|
||||||
|
"tag": "div",
|
||||||
|
"text": {
|
||||||
|
"tag": "lark_md",
|
||||||
|
"content": "**Important status update**",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_split_headings_keeps_markdown_body_and_code_blocks_intact() -> None:
|
||||||
|
channel = FeishuChannel.__new__(FeishuChannel)
|
||||||
|
|
||||||
|
elements = channel._split_headings(
|
||||||
|
"# **Heading**\n\nBody with **bold** text.\n\n```python\nprint('hi')\n```"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert elements[0] == {
|
||||||
|
"tag": "div",
|
||||||
|
"text": {
|
||||||
|
"tag": "lark_md",
|
||||||
|
"content": "**Heading**",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert elements[1]["tag"] == "markdown"
|
||||||
|
assert "Body with **bold** text." in elements[1]["content"]
|
||||||
|
assert "```python\nprint('hi')\n```" in elements[1]["content"]
|
||||||
@ -1,6 +1,7 @@
|
|||||||
"""Tests for Feishu message reply (quote) feature."""
|
"""Tests for Feishu message reply (quote) feature."""
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
|
from pathlib import Path
|
||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
@ -186,6 +187,48 @@ def test_reply_message_sync_returns_false_on_exception() -> None:
|
|||||||
assert ok is False
|
assert ok is False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
("filename", "expected_msg_type"),
|
||||||
|
[
|
||||||
|
("voice.opus", "audio"),
|
||||||
|
("clip.mp4", "video"),
|
||||||
|
("report.pdf", "file"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
async def test_send_uses_expected_feishu_msg_type_for_uploaded_files(
|
||||||
|
tmp_path: Path, filename: str, expected_msg_type: str
|
||||||
|
) -> None:
|
||||||
|
channel = _make_feishu_channel()
|
||||||
|
file_path = tmp_path / filename
|
||||||
|
file_path.write_bytes(b"demo")
|
||||||
|
|
||||||
|
send_calls: list[tuple[str, str, str, str]] = []
|
||||||
|
|
||||||
|
def _record_send(receive_id_type: str, receive_id: str, msg_type: str, content: str) -> None:
|
||||||
|
send_calls.append((receive_id_type, receive_id, msg_type, content))
|
||||||
|
|
||||||
|
with patch.object(channel, "_upload_file_sync", return_value="file-key"), patch.object(
|
||||||
|
channel, "_send_message_sync", side_effect=_record_send
|
||||||
|
):
|
||||||
|
await channel.send(
|
||||||
|
OutboundMessage(
|
||||||
|
channel="feishu",
|
||||||
|
chat_id="oc_test",
|
||||||
|
content="",
|
||||||
|
media=[str(file_path)],
|
||||||
|
metadata={},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(send_calls) == 1
|
||||||
|
receive_id_type, receive_id, msg_type, content = send_calls[0]
|
||||||
|
assert receive_id_type == "chat_id"
|
||||||
|
assert receive_id == "oc_test"
|
||||||
|
assert msg_type == expected_msg_type
|
||||||
|
assert json.loads(content) == {"file_key": "file-key"}
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# send() — reply routing tests
|
# send() — reply routing tests
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|||||||
@ -12,6 +12,8 @@ class _FakeAsyncWebClient:
|
|||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.chat_post_calls: list[dict[str, object | None]] = []
|
self.chat_post_calls: list[dict[str, object | None]] = []
|
||||||
self.file_upload_calls: list[dict[str, object | None]] = []
|
self.file_upload_calls: list[dict[str, object | None]] = []
|
||||||
|
self.reactions_add_calls: list[dict[str, object | None]] = []
|
||||||
|
self.reactions_remove_calls: list[dict[str, object | None]] = []
|
||||||
|
|
||||||
async def chat_postMessage(
|
async def chat_postMessage(
|
||||||
self,
|
self,
|
||||||
@ -43,6 +45,36 @@ class _FakeAsyncWebClient:
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def reactions_add(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
channel: str,
|
||||||
|
name: str,
|
||||||
|
timestamp: str,
|
||||||
|
) -> None:
|
||||||
|
self.reactions_add_calls.append(
|
||||||
|
{
|
||||||
|
"channel": channel,
|
||||||
|
"name": name,
|
||||||
|
"timestamp": timestamp,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def reactions_remove(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
channel: str,
|
||||||
|
name: str,
|
||||||
|
timestamp: str,
|
||||||
|
) -> None:
|
||||||
|
self.reactions_remove_calls.append(
|
||||||
|
{
|
||||||
|
"channel": channel,
|
||||||
|
"name": name,
|
||||||
|
"timestamp": timestamp,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_send_uses_thread_for_channel_messages() -> None:
|
async def test_send_uses_thread_for_channel_messages() -> None:
|
||||||
@ -88,3 +120,28 @@ async def test_send_omits_thread_for_dm_messages() -> None:
|
|||||||
assert fake_web.chat_post_calls[0]["thread_ts"] is None
|
assert fake_web.chat_post_calls[0]["thread_ts"] is None
|
||||||
assert len(fake_web.file_upload_calls) == 1
|
assert len(fake_web.file_upload_calls) == 1
|
||||||
assert fake_web.file_upload_calls[0]["thread_ts"] is None
|
assert fake_web.file_upload_calls[0]["thread_ts"] is None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_send_updates_reaction_when_final_response_sent() -> None:
|
||||||
|
channel = SlackChannel(SlackConfig(enabled=True, react_emoji="eyes"), MessageBus())
|
||||||
|
fake_web = _FakeAsyncWebClient()
|
||||||
|
channel._web_client = fake_web
|
||||||
|
|
||||||
|
await channel.send(
|
||||||
|
OutboundMessage(
|
||||||
|
channel="slack",
|
||||||
|
chat_id="C123",
|
||||||
|
content="done",
|
||||||
|
metadata={
|
||||||
|
"slack": {"event": {"ts": "1700000000.000100"}, "channel_type": "channel"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
assert fake_web.reactions_remove_calls == [
|
||||||
|
{"channel": "C123", "name": "eyes", "timestamp": "1700000000.000100"}
|
||||||
|
]
|
||||||
|
assert fake_web.reactions_add_calls == [
|
||||||
|
{"channel": "C123", "name": "white_check_mark", "timestamp": "1700000000.000100"}
|
||||||
|
]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user