Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 58 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
</p>
</div>

🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [Clawdbot](https://github.com/openclaw/openclaw)
🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [Clawdbot](https://github.com/openclaw/openclaw)

⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines.

Expand Down Expand Up @@ -571,6 +571,63 @@ That's it! Environment variables, model prefixing, config matching, and `nanobot

</details>

### Model Aliases

> [!TIP]
> Model aliases let you define short, memorable names for complex model identifiers. The provider prefix is added automatically based on which provider you configure the alias under.

**Example: NVIDIA NIM with aliases**

```json
{
"providers": {
"vllm": {
"apiKey": "nvapi-xxx",
"apiBase": "https://integrate.api.nvidia.com/v1",
"models": {
"glm4": "z-ai/glm4.7",
"llama": "meta-llama/Llama-3.1-8B-Instruct",
"mistral": "mistralai/Mistral-7B-Instruct-v0.3"
}
}
},
"agents": {
"defaults": {
"model": "glm4"
}
}
}
```

**Example: Business-friendly aliases**

```json
{
"providers": {
"openrouter": {
"apiKey": "sk-or-xxx",
"models": {
"fast": "anthropic/claude-haiku-3.5",
"smart": "anthropic/claude-opus-4-5",
"code": "openai/gpt-4-turbo"
}
}
},
"agents": {
"defaults": {
"model": "smart"
}
}
}
```

**How it works:**

1. When you specify a model (e.g., `"glm4"`), nanobot checks all provider `models` dictionaries
2. If found as an alias under `providers.vllm`, it adds the `hosted_vllm/` prefix → `hosted_vllm/z-ai/glm4.7`
3. If found under `providers.openrouter`, it adds the `openrouter/` prefix → `openrouter/anthropic/claude-haiku-3.5`
4. If no alias matches, falls back to keyword matching (existing behavior)


### Security

Expand Down
19 changes: 16 additions & 3 deletions nanobot/cli/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,16 @@ def _create_workspace_templates(workspace: Path):
def _make_provider(config):
"""Create LiteLLMProvider from config. Exits if no API key found."""
from nanobot.providers.litellm_provider import LiteLLMProvider
p = config.get_provider()
model = config.agents.defaults.model
# Resolve alias and check if it's an alias
actual_model, provider_config = config.resolve_model(model)
if provider_config:
p = provider_config
model = actual_model
is_resolved = True
else:
p = config.get_provider()
is_resolved = False
if not (p and p.api_key) and not model.startswith("bedrock/"):
console.print("[red]Error: No API key configured.[/red]")
console.print("Set one in ~/.nanobot/config.json under providers section")
Expand All @@ -310,6 +318,7 @@ def _make_provider(config):
default_model=model,
extra_headers=p.extra_headers if p else None,
provider_name=config.get_provider_name(),
is_resolved=is_resolved,
)


Expand Down Expand Up @@ -353,7 +362,6 @@ def gateway(
bus=bus,
provider=provider,
workspace=config.workspace_path,
model=config.agents.defaults.model,
max_iterations=config.agents.defaults.max_tool_iterations,
brave_api_key=config.tools.web.search.api_key or None,
exec_config=config.tools.exec,
Expand Down Expand Up @@ -838,7 +846,12 @@ def status():
if config_path.exists():
from nanobot.providers.registry import PROVIDERS

console.print(f"Model: {config.agents.defaults.model}")
raw_model = config.agents.defaults.model
resolved_model, _ = config.resolve_model(raw_model)
if raw_model != resolved_model:
console.print(f"Model: {raw_model} [dim]→ {resolved_model}[/dim]")
else:
console.print(f"Model: {raw_model}")

# Check API keys from registry
for spec in PROVIDERS:
Expand Down
54 changes: 49 additions & 5 deletions nanobot/config/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ class ProviderConfig(BaseModel):
api_key: str = ""
api_base: str | None = None
extra_headers: dict[str, str] | None = None # Custom headers (e.g. APP-Code for AiHubMix)
models: dict[str, str] = Field(default_factory=dict) # Model aliases: alias -> actual model name


class ProvidersConfig(BaseModel):
Expand Down Expand Up @@ -189,12 +190,12 @@ class Config(BaseSettings):
providers: ProvidersConfig = Field(default_factory=ProvidersConfig)
gateway: GatewayConfig = Field(default_factory=GatewayConfig)
tools: ToolsConfig = Field(default_factory=ToolsConfig)

@property
def workspace_path(self) -> Path:
"""Get expanded workspace path."""
return Path(self.agents.defaults.workspace).expanduser()

def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | None", str | None]:
"""Match provider config and its registry name. Returns (config, spec_name)."""
from nanobot.providers.registry import PROVIDERS
Expand All @@ -213,9 +214,52 @@ def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | N
return p, spec.name
return None, None

def resolve_model(self, model: str) -> tuple[str, ProviderConfig | None]:
"""Resolve model alias to actual model with provider prefix.

Args:
model: Model name or alias

Returns:
(resolved_model, provider_config) tuple.
If alias found, returns (model_with_prefix, provider_config).
If alias not found, returns (model, None).

Note:
The alias config should NOT include the provider prefix.
This function adds the prefix based on which provider the alias came from.
"""
p = self.providers
# Provider map: (provider_name, provider_config, prefix)
provider_map = [
("vllm", p.vllm, "hosted_vllm"),
("openrouter", p.openrouter, "openrouter"),
("aihubmix", p.aihubmix, "openai"),
("anthropic", p.anthropic, "anthropic"),
("openai", p.openai, "openai"),
("deepseek", p.deepseek, "deepseek"),
("gemini", p.gemini, "gemini"),
("zhipu", p.zhipu, "zhipu"),
("dashscope", p.dashscope, "dashscope"),
("moonshot", p.moonshot, "moonshot"),
("groq", p.groq, "groq"),
]
for provider_name, provider_config, prefix in provider_map:
if model in provider_config.models:
actual_model = provider_config.models[model]
# Add provider prefix if not already present
if not actual_model.startswith(f"{prefix}/"):
actual_model = f"{prefix}/{actual_model}"
return actual_model, provider_config
return model, None

def get_provider(self, model: str | None = None) -> ProviderConfig | None:
"""Get matched provider config (api_key, api_base, extra_headers). Falls back to first available."""
p, _ = self._match_provider(model)
model = model or self.agents.defaults.model
actual_model, actual_provider = self.resolve_model(model)
if actual_provider:
return actual_provider
p, _ = self._match_provider(actual_model)
return p

def get_provider_name(self, model: str | None = None) -> str | None:
Expand All @@ -227,7 +271,7 @@ def get_api_key(self, model: str | None = None) -> str | None:
"""Get API key for the given model. Falls back to first available key."""
p = self.get_provider(model)
return p.api_key if p else None

def get_api_base(self, model: str | None = None) -> str | None:
"""Get API base URL for the given model. Applies default URLs for known gateways."""
from nanobot.providers.registry import find_by_name
Expand All @@ -242,7 +286,7 @@ def get_api_base(self, model: str | None = None) -> str | None:
if spec and spec.is_gateway and spec.default_api_base:
return spec.default_api_base
return None

class Config:
env_prefix = "NANOBOT_"
env_nested_delimiter = "__"
25 changes: 16 additions & 9 deletions nanobot/providers/litellm_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,26 @@
class LiteLLMProvider(LLMProvider):
"""
LLM provider using LiteLLM for multi-provider support.

Supports OpenRouter, Anthropic, OpenAI, Gemini, and many other providers through
a unified interface. Provider-specific logic is driven by the registry
(see providers/registry.py) — no if-elif chains needed here.
"""

def __init__(
self,
api_key: str | None = None,
self,
api_key: str | None = None,
api_base: str | None = None,
default_model: str = "anthropic/claude-opus-4-5",
extra_headers: dict[str, str] | None = None,
provider_name: str | None = None,
is_resolved: bool = False,
):
super().__init__(api_key, api_base)
self.default_model = default_model
self.extra_headers = extra_headers or {}

self.is_resolved = is_resolved

# Detect gateway / local deployment.
# provider_name (from config key) is the primary signal;
# api_key / api_base are fallback for auto-detection.
Expand Down Expand Up @@ -109,19 +111,24 @@ async def chat(
) -> LLMResponse:
"""
Send a chat completion request via LiteLLM.

Args:
messages: List of message dicts with 'role' and 'content'.
tools: Optional list of tool definitions in OpenAI format.
model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5').
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.

Returns:
LLMResponse with content and/or tool calls.
"""
model = self._resolve_model(model or self.default_model)

model = model or self.default_model

# Auto-prefix model names for known providers
# Skip for model aliases (is_resolved=True) - model already has prefix in config
if not self.is_resolved:
model = self._resolve_model(model)

kwargs: dict[str, Any] = {
"model": model,
"messages": messages,
Expand Down