diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..8f26ea2
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,30 @@
+PYTHON ?= python3
+PIP ?= $(PYTHON) -m pip
+RUFF ?= ruff
+PYTEST ?= pytest
+MATURIN ?= maturin
+PYO3_USE_ABI3_FORWARD_COMPATIBILITY ?= 1
+
+.PHONY: lint lint-check format format-rust test install build deps-build
+
+lint:
+ $(RUFF) format debot/
+ cargo fmt --manifest-path rust/Cargo.toml
+
+deps-build:
+ $(PIP) install maturin
+
+build: deps-build
+ PYO3_USE_ABI3_FORWARD_COMPATIBILITY=$(PYO3_USE_ABI3_FORWARD_COMPATIBILITY) \
+ $(MATURIN) build --release -m rust/Cargo.toml
+
+install:
+ PYO3_USE_ABI3_FORWARD_COMPATIBILITY=$(PYO3_USE_ABI3_FORWARD_COMPATIBILITY) \
+ $(PIP) install .
+
+test: build
+ @WHEEL=$$(ls -1t rust/target/wheels/*.whl | head -n 1); \
+ PYO3_USE_ABI3_FORWARD_COMPATIBILITY=$(PYO3_USE_ABI3_FORWARD_COMPATIBILITY) \
+ $(PIP) install $$WHEEL
+ $(PIP) install ".[dev]"
+ $(PYTEST) tests/ -v --tb=short
diff --git a/README.md b/README.md
index 2b60bd6..0618bf7 100644
--- a/README.md
+++ b/README.md
@@ -42,29 +42,6 @@ If you need to specify a particular Python executable for maturin builds, set `P
-## ✨ Features
-
-
-
- 📈 24/7 Real-Time Market Analysis |
- 🚀 Full-Stack Software Engineer |
- 📅 Smart Daily Routine Manager |
- 📚 Personal Knowledge Assistant |
-
-
- 
|
- 
|
- 
|
- 
|
-
-
- | Discovery • Insights • Trends |
- Develop • Deploy • Scale |
- Schedule • Automate • Organize |
- Learn • Memory • Reasoning |
-
-
-
### Core Capabilities
| Category | What Debot Can Do |
@@ -123,6 +100,12 @@ debot onboard
"providers": {
"openrouter": {
"apiKey": "sk-or-v1-xxx"
+ },
+ "anthropic": {
+ "apiKey": "sk-ant-xxx"
+ },
+ "groq": {
+ "apiKey": "gsk_xxx"
}
},
"agents": {
@@ -136,6 +119,9 @@ debot onboard
}
```
+> [!TIP]
+> Adding multiple provider keys enables **cross-provider fallback**. If one provider's credits run out, Debot automatically routes to another.
+
**3. Chat**
@@ -231,6 +217,17 @@ Debot includes a **built-in intelligent router** (powered by Rust) that automati
The router runs automatically — no configuration needed. You can customize the tier-to-model mapping by editing the Rust router config (see `rust/src/router/config.rs`).
+**Automatic Fallback & Escalation:**
+
+When a model fails, Debot doesn't just give up — it automatically retries with alternative models:
+
+1. **Pre-check**: Before calling the API, estimates token count and compares against the model's context window. If the prompt is too large, skips straight to a bigger model.
+2. **Billing fallback (402 / insufficient credits)**: Tries same-tier alternatives from cheaper providers first (e.g. Groq free tier → DeepSeek → OpenAI), then escalates to the next tier.
+3. **Context window exceeded**: Escalates to the next tier with a larger context window.
+4. **Cross-provider routing**: If your OpenRouter credits run out, Debot automatically routes to providers where you have direct API keys (Anthropic, Groq, OpenAI, etc.).
+
+> Configure multiple provider keys in `~/.debot/config.json` to enable cross-provider fallback — see [Configuration](#%EF%B8%8F-configuration).
+
**Cost savings benchmark:**
We ran 33 representative prompts (greetings, code tasks, architecture design, formal proofs) through the router and simulated a typical daily workload of 70 queries (see `experiments/router_cost_savings.py`):
@@ -506,8 +503,17 @@ Config file: `~/.debot/config.json`
"openrouter": {
"apiKey": "sk-or-v1-xxx"
},
+ "anthropic": {
+ "apiKey": "sk-ant-xxx"
+ },
+ "openai": {
+ "apiKey": "sk-xxx"
+ },
"groq": {
"apiKey": "gsk_xxx"
+ },
+ "gemini": {
+ "apiKey": "AIza-xxx"
}
},
"channels": {
@@ -615,6 +621,36 @@ docker pull ghcr.io/BotMesh/debot:v1.0.0
For more info, see [Container Publishing Guide](./.github/CONTAINER_PUBLISHING.md)
+## 🛠️ Development
+
+A `Makefile` is provided for common development tasks:
+
+```bash
+make install # Install debot (builds Rust extension via maturin)
+make build # Build the Rust extension only (release mode)
+make test # Build + install + run pytest
+make lint # Run ruff linter
+```
+
+**First-time setup:**
+
+```bash
+git clone https://github.com/BotMesh/debot.git
+cd debot
+python3 -m venv .venv
+source .venv/bin/activate
+pip install patchelf # Linux only
+make install
+```
+
+**Running tests:**
+
+```bash
+make test
+```
+
+This builds the Rust extension, installs the wheel, installs dev dependencies, and runs the full test suite.
+
## 🤝 Contribute & Roadmap
PRs welcome! The codebase is intentionally small and readable. 🤗
diff --git a/case/code.gif b/case/code.gif
deleted file mode 100644
index 159dad8..0000000
Binary files a/case/code.gif and /dev/null differ
diff --git a/case/memory.gif b/case/memory.gif
deleted file mode 100644
index fc91f55..0000000
Binary files a/case/memory.gif and /dev/null differ
diff --git a/case/scedule.gif b/case/scedule.gif
deleted file mode 100644
index a2e3073..0000000
Binary files a/case/scedule.gif and /dev/null differ
diff --git a/case/search.gif b/case/search.gif
deleted file mode 100644
index fd3d067..0000000
Binary files a/case/search.gif and /dev/null differ
diff --git a/debot/agent/_context_py.py b/debot/agent/_context_py.py
index 1c3210f..6c446d5 100644
--- a/debot/agent/_context_py.py
+++ b/debot/agent/_context_py.py
@@ -180,9 +180,7 @@ def add_tool_result(
Returns:
Updated message list.
"""
- messages.append(
- {"role": "tool", "tool_call_id": tool_call_id, "name": tool_name, "content": result}
- )
+ messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": tool_name, "content": result})
return messages
def add_assistant_message(
diff --git a/debot/agent/_memory_py.py b/debot/agent/_memory_py.py
index ab7d474..7418baa 100644
--- a/debot/agent/_memory_py.py
+++ b/debot/agent/_memory_py.py
@@ -178,9 +178,7 @@ def search(self, query: str, max_results: int = 5, min_score: float = 0.0) -> Li
scored.sort(key=lambda x: x[0], reverse=True)
results = []
for score, e in scored[:max_results]:
- results.append(
- {"path": e.get("path", ""), "snippet": e.get("text", ""), "score": score}
- )
+ results.append({"path": e.get("path", ""), "snippet": e.get("text", ""), "score": score})
return results
diff --git a/debot/agent/_skills_py.py b/debot/agent/_skills_py.py
index a368137..dff29e3 100644
--- a/debot/agent/_skills_py.py
+++ b/debot/agent/_skills_py.py
@@ -41,9 +41,7 @@ def list_skills(self, filter_unavailable: bool = True) -> list[dict[str, str]]:
if skill_dir.is_dir():
skill_file = skill_dir / "SKILL.md"
if skill_file.exists():
- skills.append(
- {"name": skill_dir.name, "path": str(skill_file), "source": "workspace"}
- )
+ skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "workspace"})
# Built-in skills
if self.builtin_skills and self.builtin_skills.exists():
@@ -51,9 +49,7 @@ def list_skills(self, filter_unavailable: bool = True) -> list[dict[str, str]]:
if skill_dir.is_dir():
skill_file = skill_dir / "SKILL.md"
if skill_file.exists() and not any(s["name"] == skill_dir.name for s in skills):
- skills.append(
- {"name": skill_dir.name, "path": str(skill_file), "source": "builtin"}
- )
+ skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "builtin"})
# Filter by requirements
if filter_unavailable:
diff --git a/debot/agent/loop.py b/debot/agent/loop.py
index 0289b56..451b4e7 100644
--- a/debot/agent/loop.py
+++ b/debot/agent/loop.py
@@ -193,19 +193,13 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
if compaction_enabled:
# Naive token estimate: 1 token ~= chars_per_token characters
- estimated_tokens = sum(len(str(m.get("content", ""))) for m in messages) // max(
- 1, chars_per_token
- )
+ estimated_tokens = sum(len(str(m.get("content", ""))) for m in messages) // max(1, chars_per_token)
if estimated_tokens >= int(max_tokens * compaction_trigger_ratio):
if not compaction_silent:
- logger.info(
- f"Context near limit ({estimated_tokens}/{max_tokens} tokens). Running compaction."
- )
+ logger.info(f"Context near limit ({estimated_tokens}/{max_tokens} tokens). Running compaction.")
# Compact the session using configured keep_last
try:
- compacted = self.sessions.compact_session(
- msg.session_key, keep_last=compaction_keep_last
- )
+ compacted = self.sessions.compact_session(msg.session_key, keep_last=compaction_keep_last)
if compacted > 0:
# Rebuild messages from the compacted history
session = self.sessions.get_or_create(msg.session_key)
@@ -215,9 +209,7 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
media=msg.media if msg.media else None,
)
if not compaction_silent:
- logger.info(
- f"Auto-compaction completed: {compacted} messages compacted."
- )
+ logger.info(f"Auto-compaction completed: {compacted} messages compacted.")
except Exception as e:
logger.warning(f"Auto-compaction failed: {e}")
@@ -304,8 +296,10 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
tried.add(alt["model"])
logger.warning(
"Billing fallback: {} failed [{}] → trying same-tier {} (${:.2f}/M)",
- chosen_model, response.finish_reason,
- alt["model"], alt["cost"],
+ chosen_model,
+ response.finish_reason,
+ alt["model"],
+ alt["cost"],
)
try:
_debot_rust.record_escalation()
@@ -313,7 +307,9 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
pass
chosen_model = alt["model"]
response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=chosen_model
+ messages=messages,
+ tools=self.tools.get_definitions(),
+ model=chosen_model,
)
if response.finish_reason not in _fail_reasons:
rerouted = True
@@ -332,7 +328,8 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
tried.add(fb["model"])
logger.warning(
"Billing fallback: same-tier exhausted, escalating → {} ({})",
- fb["model"], fb["tier"],
+ fb["model"],
+ fb["tier"],
)
try:
_debot_rust.record_escalation()
@@ -342,7 +339,9 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
current_tier = fb["tier"]
esc_tier = fb["tier"]
response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=chosen_model
+ messages=messages,
+ tools=self.tools.get_definitions(),
+ model=chosen_model,
)
if response.finish_reason not in _fail_reasons:
break
@@ -355,8 +354,11 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
fb = json.loads(fb_json)
logger.warning(
"Escalating: {} ({}) failed [{}] → {} ({})",
- chosen_model, current_tier, response.finish_reason,
- fb["model"], fb["tier"],
+ chosen_model,
+ current_tier,
+ response.finish_reason,
+ fb["model"],
+ fb["tier"],
)
try:
_debot_rust.record_escalation()
@@ -365,7 +367,9 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
chosen_model = fb["model"]
current_tier = fb["tier"]
response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=chosen_model
+ messages=messages,
+ tools=self.tools.get_definitions(),
+ model=chosen_model,
)
if response.finish_reason not in _fail_reasons:
break
@@ -397,18 +401,14 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
}
for tc in response.tool_calls
]
- messages = self.context.add_assistant_message(
- messages, response.content, tool_call_dicts
- )
+ messages = self.context.add_assistant_message(messages, response.content, tool_call_dicts)
# Execute tools
for tool_call in response.tool_calls:
args_str = json.dumps(tool_call.arguments)
logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
result = await self.tools.execute(tool_call.name, tool_call.arguments)
- messages = self.context.add_tool_result(
- messages, tool_call.id, tool_call.name, result
- )
+ messages = self.context.add_tool_result(messages, tool_call.id, tool_call.name, result)
else:
# No tool calls, we're done
final_content = response.content
@@ -457,9 +457,7 @@ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage
spawn_tool.set_context(origin_channel, origin_chat_id)
# Build messages with the announce content
- messages = self.context.build_messages(
- history=session.get_history(), current_message=msg.content
- )
+ messages = self.context.build_messages(history=session.get_history(), current_message=msg.content)
# Agent loop (limited for announce handling)
iteration = 0
@@ -468,9 +466,7 @@ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage
while iteration < self.max_iterations:
iteration += 1
- response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=self.model
- )
+ response = await self.provider.chat(messages=messages, tools=self.tools.get_definitions(), model=self.model)
if response.has_tool_calls:
tool_call_dicts = [
@@ -481,17 +477,13 @@ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage
}
for tc in response.tool_calls
]
- messages = self.context.add_assistant_message(
- messages, response.content, tool_call_dicts
- )
+ messages = self.context.add_assistant_message(messages, response.content, tool_call_dicts)
for tool_call in response.tool_calls:
args_str = json.dumps(tool_call.arguments)
logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
result = await self.tools.execute(tool_call.name, tool_call.arguments)
- messages = self.context.add_tool_result(
- messages, tool_call.id, tool_call.name, result
- )
+ messages = self.context.add_tool_result(messages, tool_call.id, tool_call.name, result)
else:
final_content = response.content
break
@@ -504,9 +496,7 @@ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage
session.add_message("assistant", final_content)
self.sessions.save(session)
- return OutboundMessage(
- channel=origin_channel, chat_id=origin_chat_id, content=final_content
- )
+ return OutboundMessage(channel=origin_channel, chat_id=origin_chat_id, content=final_content)
async def process_direct(self, content: str, session_key: str = "cli:direct") -> str:
"""
diff --git a/debot/agent/subagent.py b/debot/agent/subagent.py
index c663235..20c527f 100644
--- a/debot/agent/subagent.py
+++ b/debot/agent/subagent.py
@@ -201,9 +201,7 @@ async def _announce_result(
)
await self.bus.publish_inbound(msg)
- logger.debug(
- f"Subagent [{task_id}] announced result to {origin['channel']}:{origin['chat_id']}"
- )
+ logger.debug(f"Subagent [{task_id}] announced result to {origin['channel']}:{origin['chat_id']}")
def _build_subagent_prompt(self, task: str) -> str:
"""Build a focused system prompt for the subagent."""
diff --git a/debot/agent/tools/_web_py.py b/debot/agent/tools/_web_py.py
index 478b60e..1ef9877 100644
--- a/debot/agent/tools/_web_py.py
+++ b/debot/agent/tools/_web_py.py
@@ -113,9 +113,7 @@ class WebFetchTool(Tool):
def __init__(self, max_chars: int = 50000):
self.max_chars = max_chars
- async def execute(
- self, url: str, extractMode: str = "markdown", maxChars: int | None = None, **kwargs: Any
- ) -> str:
+ async def execute(self, url: str, extractMode: str = "markdown", maxChars: int | None = None, **kwargs: Any) -> str:
from readability import Document
max_chars = maxChars or self.max_chars
@@ -126,9 +124,7 @@ async def execute(
return json.dumps({"error": f"URL validation failed: {error_msg}", "url": url})
try:
- async with httpx.AsyncClient(
- follow_redirects=True, max_redirects=MAX_REDIRECTS, timeout=30.0
- ) as client:
+ async with httpx.AsyncClient(follow_redirects=True, max_redirects=MAX_REDIRECTS, timeout=30.0) as client:
r = await client.get(url, headers={"User-Agent": USER_AGENT})
r.raise_for_status()
@@ -140,11 +136,7 @@ async def execute(
# HTML
elif "text/html" in ctype or r.text[:256].lower().startswith((" str:
text,
flags=re.I,
)
- text = re.sub(
- r"]*>([\s\S]*?)", lambda m: f"\n- {_strip_tags(m[1])}", text, flags=re.I
- )
+ text = re.sub(r"]*>([\s\S]*?)", lambda m: f"\n- {_strip_tags(m[1])}", text, flags=re.I)
text = re.sub(r"(p|div|section|article)>", "\n\n", text, flags=re.I)
text = re.sub(r"<(br|hr)\s*/?>", "\n", text, flags=re.I)
return _normalize(_strip_tags(text))
diff --git a/debot/agent/tools/message.py b/debot/agent/tools/message.py
index 2d96ae7..ce50de1 100644
--- a/debot/agent/tools/message.py
+++ b/debot/agent/tools/message.py
@@ -51,9 +51,7 @@ def parameters(self) -> dict[str, Any]:
"required": ["content"],
}
- async def execute(
- self, content: str, channel: str | None = None, chat_id: str | None = None, **kwargs: Any
- ) -> str:
+ async def execute(self, content: str, channel: str | None = None, chat_id: str | None = None, **kwargs: Any) -> str:
channel = channel or self._default_channel
chat_id = chat_id or self._default_chat_id
diff --git a/debot/bus/_queue_py.py b/debot/bus/_queue_py.py
index c1a5ad3..d3d1a73 100644
--- a/debot/bus/_queue_py.py
+++ b/debot/bus/_queue_py.py
@@ -19,9 +19,7 @@ class MessageBus:
def __init__(self):
self.inbound: asyncio.Queue[InboundMessage] = asyncio.Queue()
self.outbound: asyncio.Queue[OutboundMessage] = asyncio.Queue()
- self._outbound_subscribers: dict[
- str, list[Callable[[OutboundMessage], Awaitable[None]]]
- ] = {}
+ self._outbound_subscribers: dict[str, list[Callable[[OutboundMessage], Awaitable[None]]]] = {}
self._running = False
async def publish_inbound(self, msg: InboundMessage) -> None:
@@ -40,9 +38,7 @@ async def consume_outbound(self) -> OutboundMessage:
"""Consume the next outbound message (blocks until available)."""
return await self.outbound.get()
- def subscribe_outbound(
- self, channel: str, callback: Callable[[OutboundMessage], Awaitable[None]]
- ) -> None:
+ def subscribe_outbound(self, channel: str, callback: Callable[[OutboundMessage], Awaitable[None]]) -> None:
"""Subscribe to outbound messages for a specific channel."""
if channel not in self._outbound_subscribers:
self._outbound_subscribers[channel] = []
diff --git a/debot/channels/manager.py b/debot/channels/manager.py
index 75d44ed..fb83178 100644
--- a/debot/channels/manager.py
+++ b/debot/channels/manager.py
@@ -121,10 +121,7 @@ def get_channel(self, name: str) -> BaseChannel | None:
def get_status(self) -> dict[str, Any]:
"""Get status of all channels."""
- return {
- name: {"enabled": True, "running": channel.is_running}
- for name, channel in self.channels.items()
- }
+ return {name: {"enabled": True, "running": channel.is_running} for name, channel in self.channels.items()}
@property
def enabled_channels(self) -> list[str]:
diff --git a/debot/cli/commands.py b/debot/cli/commands.py
index 1b66681..0024d22 100644
--- a/debot/cli/commands.py
+++ b/debot/cli/commands.py
@@ -68,9 +68,7 @@ def onboard():
console.print(" 1. Add your API key to [cyan]~/.debot/config.json[/cyan]")
console.print(" Get one at: https://openrouter.ai/keys")
console.print(' 2. Chat: [cyan]debot agent -m "Hello!"[/cyan]')
- console.print(
- "\n[dim]Want Telegram/WhatsApp? See: https://github.com/BotMesh/debot#-chat-apps[/dim]"
- )
+ console.print("\n[dim]Want Telegram/WhatsApp? See: https://github.com/BotMesh/debot#-chat-apps[/dim]")
def _create_workspace_templates(workspace: Path):
@@ -155,18 +153,10 @@ def _create_workspace_templates(workspace: Path):
@config_app.command("compaction")
def config_compaction(
show: bool = typer.Option(True, "--show/--no-show", help="Show current compaction settings"),
- enabled: bool | None = typer.Option(
- None, "--enabled/--disabled", help="Enable or disable auto-compaction"
- ),
- keep_last: int | None = typer.Option(
- None, "--keep-last", "-k", help="Number of recent messages to keep"
- ),
- trigger_ratio: float | None = typer.Option(
- None, "--trigger-ratio", help="Trigger ratio (0.0-1.0) of model tokens"
- ),
- silent: bool | None = typer.Option(
- None, "--silent/--no-silent", help="Run compactions silently (no logs)"
- ),
+ enabled: bool | None = typer.Option(None, "--enabled/--disabled", help="Enable or disable auto-compaction"),
+ keep_last: int | None = typer.Option(None, "--keep-last", "-k", help="Number of recent messages to keep"),
+ trigger_ratio: float | None = typer.Option(None, "--trigger-ratio", help="Trigger ratio (0.0-1.0) of model tokens"),
+ silent: bool | None = typer.Option(None, "--silent/--no-silent", help="Run compactions silently (no logs)"),
chars_per_token: int | None = typer.Option(
None, "--chars-per-token", help="Characters per token for naive estimator"
),
@@ -212,18 +202,10 @@ def config_compaction(
@config_app.command("compaction-model")
def config_compaction_model(
model: str = typer.Argument(..., help="Model name (e.g., anthropic/claude-opus-4-5)"),
- show: bool = typer.Option(
- True, "--show/--no-show", help="Show current model-specific settings"
- ),
- keep_last: int | None = typer.Option(
- None, "--keep-last", "-k", help="Override keep_last for this model"
- ),
- trigger_ratio: float | None = typer.Option(
- None, "--trigger-ratio", help="Override trigger_ratio for this model"
- ),
- silent: bool | None = typer.Option(
- None, "--silent/--no-silent", help="Override silent for this model"
- ),
+ show: bool = typer.Option(True, "--show/--no-show", help="Show current model-specific settings"),
+ keep_last: int | None = typer.Option(None, "--keep-last", "-k", help="Override keep_last for this model"),
+ trigger_ratio: float | None = typer.Option(None, "--trigger-ratio", help="Override trigger_ratio for this model"),
+ silent: bool | None = typer.Option(None, "--silent/--no-silent", help="Override silent for this model"),
clear: bool = typer.Option(False, "--clear", help="Remove all overrides for this model"),
):
"""View or set per-model compaction overrides."""
@@ -315,7 +297,9 @@ def gateway(
raise typer.Exit(1)
provider = LiteLLMProvider(
- api_key=api_key, api_base=api_base, default_model=config.agents.defaults.model,
+ api_key=api_key,
+ api_base=api_base,
+ default_model=config.agents.defaults.model,
all_api_keys=config.get_all_api_keys(),
)
@@ -424,7 +408,9 @@ def agent(
bus = MessageBus()
provider = LiteLLMProvider(
- api_key=api_key, api_base=api_base, default_model=config.agents.defaults.model,
+ api_key=api_key,
+ api_base=api_base,
+ default_model=config.agents.defaults.model,
all_api_keys=config.get_all_api_keys(),
)
@@ -493,27 +479,19 @@ async def run_interactive():
@sessions_app.command("compact")
def sessions_compact(
session_key: str = typer.Argument(..., help="Session key, e.g. telegram:12345"),
- keep_last: int = typer.Option(
- 50, "--keep-last", "-k", help="Number of recent messages to keep"
- ),
- instruction: str | None = typer.Option(
- None, "--instruction", "-i", help="Optional compaction instruction"
- ),
+ keep_last: int = typer.Option(50, "--keep-last", "-k", help="Number of recent messages to keep"),
+ instruction: str | None = typer.Option(None, "--instruction", "-i", help="Optional compaction instruction"),
):
"""Compact an existing session's history into a compact summary entry."""
from debot.config.loader import load_config
config = load_config()
- sm = __import__("debot.session._manager_py", fromlist=["SessionManager"]).SessionManager(
- config.workspace_path
- )
+ sm = __import__("debot.session._manager_py", fromlist=["SessionManager"]).SessionManager(config.workspace_path)
try:
compacted = sm.compact_session(session_key, keep_last=keep_last, instruction=instruction)
if compacted:
- console.print(
- f"[green]✓[/green] Compacted {compacted} messages for session {session_key}"
- )
+ console.print(f"[green]✓[/green] Compacted {compacted} messages for session {session_key}")
else:
console.print(f"[yellow]No messages to compact for session {session_key}[/yellow]")
except Exception as e:
@@ -747,9 +725,7 @@ def cron_list(
# Format next run
next_run = ""
if job.state.next_run_at_ms:
- next_time = time.strftime(
- "%Y-%m-%d %H:%M", time.localtime(job.state.next_run_at_ms / 1000)
- )
+ next_time = time.strftime("%Y-%m-%d %H:%M", time.localtime(job.state.next_run_at_ms / 1000))
next_run = next_time
status = "[green]enabled[/green]" if job.enabled else "[dim]disabled[/dim]"
@@ -768,9 +744,7 @@ def cron_add(
at: str = typer.Option(None, "--at", help="Run once at time (ISO format)"),
deliver: bool = typer.Option(False, "--deliver", "-d", help="Deliver response to channel"),
to: str = typer.Option(None, "--to", help="Recipient for delivery"),
- channel: str = typer.Option(
- None, "--channel", help="Channel for delivery (e.g. 'telegram', 'whatsapp')"
- ),
+ channel: str = typer.Option(None, "--channel", help="Channel for delivery (e.g. 'telegram', 'whatsapp')"),
):
"""Add a scheduled job."""
from debot.config.loader import get_data_dir
@@ -879,12 +853,8 @@ def status():
console.print(f"{__logo__} debot Status\n")
- console.print(
- f"Config: {config_path} {'[green]✓[/green]' if config_path.exists() else '[red]✗[/red]'}"
- )
- console.print(
- f"Workspace: {workspace} {'[green]✓[/green]' if workspace.exists() else '[red]✗[/red]'}"
- )
+ console.print(f"Config: {config_path} {'[green]✓[/green]' if config_path.exists() else '[red]✗[/red]'}")
+ console.print(f"Workspace: {workspace} {'[green]✓[/green]' if workspace.exists() else '[red]✗[/red]'}")
if config_path.exists():
console.print(f"Model: {config.agents.defaults.model}")
@@ -896,19 +866,11 @@ def status():
has_gemini = bool(config.providers.gemini.api_key)
has_vllm = bool(config.providers.vllm.api_base)
- console.print(
- f"OpenRouter API: {'[green]✓[/green]' if has_openrouter else '[dim]not set[/dim]'}"
- )
- console.print(
- f"Anthropic API: {'[green]✓[/green]' if has_anthropic else '[dim]not set[/dim]'}"
- )
+ console.print(f"OpenRouter API: {'[green]✓[/green]' if has_openrouter else '[dim]not set[/dim]'}")
+ console.print(f"Anthropic API: {'[green]✓[/green]' if has_anthropic else '[dim]not set[/dim]'}")
console.print(f"OpenAI API: {'[green]✓[/green]' if has_openai else '[dim]not set[/dim]'}")
console.print(f"Gemini API: {'[green]✓[/green]' if has_gemini else '[dim]not set[/dim]'}")
- vllm_status = (
- f"[green]✓ {config.providers.vllm.api_base}[/green]"
- if has_vllm
- else "[dim]not set[/dim]"
- )
+ vllm_status = f"[green]✓ {config.providers.vllm.api_base}[/green]" if has_vllm else "[dim]not set[/dim]"
console.print(f"vLLM/Local: {vllm_status}")
# Router status
@@ -981,14 +943,7 @@ def router_test(
for dim, score in sorted(scores.items(), key=lambda x: -x[1]):
bar_len = int(score * 20)
- bar = (
- "[green]"
- + "\u2588" * bar_len
- + "[/green]"
- + "[dim]"
- + "\u2591" * (20 - bar_len)
- + "[/dim]"
- )
+ bar = "[green]" + "\u2588" * bar_len + "[/green]" + "[dim]" + "\u2591" * (20 - bar_len) + "[/dim]"
table.add_row(dim, f"{score:.3f}", bar)
console.print(table)
@@ -1040,8 +995,7 @@ def router_metrics():
last = metrics.get("last_decision")
if last:
console.print(
- f"\nLast: [cyan]{last['model']}[/cyan] "
- f"(tier={last['tier']}, confidence={last['confidence']:.2f})"
+ f"\nLast: [cyan]{last['model']}[/cyan] (tier={last['tier']}, confidence={last['confidence']:.2f})"
)
except ImportError:
diff --git a/debot/cron/_service_py.py b/debot/cron/_service_py.py
index c80f654..cf1444b 100644
--- a/debot/cron/_service_py.py
+++ b/debot/cron/_service_py.py
@@ -154,9 +154,7 @@ async def start(self) -> None:
self._recompute_next_runs()
self._save_store()
self._arm_timer()
- logger.info(
- f"Cron service started with {len(self._store.jobs if self._store else [])} jobs"
- )
+ logger.info(f"Cron service started with {len(self._store.jobs if self._store else [])} jobs")
def stop(self) -> None:
"""Stop the cron service."""
@@ -178,9 +176,7 @@ def _get_next_wake_ms(self) -> int | None:
"""Get the earliest next run time across all jobs."""
if not self._store:
return None
- times = [
- j.state.next_run_at_ms for j in self._store.jobs if j.enabled and j.state.next_run_at_ms
- ]
+ times = [j.state.next_run_at_ms for j in self._store.jobs if j.enabled and j.state.next_run_at_ms]
return min(times) if times else None
def _arm_timer(self) -> None:
@@ -209,9 +205,7 @@ async def _on_timer(self) -> None:
now = _now_ms()
due_jobs = [
- j
- for j in self._store.jobs
- if j.enabled and j.state.next_run_at_ms and now >= j.state.next_run_at_ms
+ j for j in self._store.jobs if j.enabled and j.state.next_run_at_ms and now >= j.state.next_run_at_ms
]
for job in due_jobs:
diff --git a/debot/providers/litellm_provider.py b/debot/providers/litellm_provider.py
index 5ea5bb1..157f0ab 100644
--- a/debot/providers/litellm_provider.py
+++ b/debot/providers/litellm_provider.py
@@ -28,9 +28,7 @@ def __init__(
self.default_model = default_model
# Detect OpenRouter by api_key prefix or explicit api_base
- self.is_openrouter = (api_key and api_key.startswith("sk-or-")) or (
- api_base and "openrouter" in api_base
- )
+ self.is_openrouter = (api_key and api_key.startswith("sk-or-")) or (api_base and "openrouter" in api_base)
# Track if using custom endpoint (vLLM, etc.)
self.is_vllm = bool(api_base) and not self.is_openrouter
@@ -114,9 +112,7 @@ async def chat(
# For Zhipu/Z.ai, ensure prefix is present
# Handle cases like "glm-4.7-flash" -> "zhipu/glm-4.7-flash"
if ("glm" in model.lower() or "zhipu" in model.lower()) and not (
- model.startswith("zhipu/")
- or model.startswith("zai/")
- or model.startswith("openrouter/")
+ model.startswith("zhipu/") or model.startswith("zai/") or model.startswith("openrouter/")
):
model = f"zhipu/{model}"
@@ -155,13 +151,22 @@ async def chat(
# words like "max_tokens" (e.g. "requires more credits, or fewer
# max_tokens"), so must be checked before context keywords.
billing_keywords = (
- "credits", "afford", "402", "billing",
- "payment", "quota", "budget",
+ "credits",
+ "afford",
+ "402",
+ "billing",
+ "payment",
+ "quota",
+ "budget",
)
context_keywords = (
- "context_length", "context window", "maximum context",
- "token limit", "too many tokens",
- "input too long", "reduce your prompt",
+ "context_length",
+ "context window",
+ "maximum context",
+ "token limit",
+ "too many tokens",
+ "input too long",
+ "reduce your prompt",
)
if any(kw in err_str for kw in billing_keywords):
finish_reason = "insufficient_credits"
diff --git a/debot/providers/transcription.py b/debot/providers/transcription.py
index 24b365a..2ee74e5 100644
--- a/debot/providers/transcription.py
+++ b/debot/providers/transcription.py
@@ -48,9 +48,7 @@ async def transcribe(self, file_path: str | Path) -> str:
"Authorization": f"Bearer {self.api_key}",
}
- response = await client.post(
- self.api_url, headers=headers, files=files, timeout=60.0
- )
+ response = await client.post(self.api_url, headers=headers, files=files, timeout=60.0)
response.raise_for_status()
data = response.json()
diff --git a/debot/session/_manager_py.py b/debot/session/_manager_py.py
index e5202f0..05a6321 100644
--- a/debot/session/_manager_py.py
+++ b/debot/session/_manager_py.py
@@ -104,9 +104,7 @@ def get_history(self, max_messages: int = 50) -> list[dict[str, Any]]:
List of messages in LLM format.
"""
# Get recent messages
- recent = (
- self.messages[-max_messages:] if len(self.messages) > max_messages else self.messages
- )
+ recent = self.messages[-max_messages:] if len(self.messages) > max_messages else self.messages
# Convert to LLM format (just role and content)
return [{"role": m["role"], "content": m["content"]} for m in recent]
@@ -178,11 +176,7 @@ def _load(self, key: str) -> Session | None:
if data.get("_type") == "metadata":
metadata = data.get("metadata", {})
- created_at = (
- datetime.fromisoformat(data["created_at"])
- if data.get("created_at")
- else None
- )
+ created_at = datetime.fromisoformat(data["created_at"]) if data.get("created_at") else None
else:
messages.append(data)
diff --git a/debot/skills/installer.py b/debot/skills/installer.py
index 1734977..737b379 100644
--- a/debot/skills/installer.py
+++ b/debot/skills/installer.py
@@ -82,9 +82,7 @@ def install_from_system(name: str, workspace: Optional[Path] = None) -> Path:
src = pkg_skills / alt
if not src.exists() or not src.is_dir():
# list available system skills
- available = sorted(
- [p.name for p in pkg_skills.iterdir() if p.is_dir() and (p / "SKILL.md").exists()]
- )
+ available = sorted([p.name for p in pkg_skills.iterdir() if p.is_dir() and (p / "SKILL.md").exists()])
raise RuntimeError(f"System skill '{name}' not found. Available: {', '.join(available)}")
dest = skills_dir / name
diff --git a/pyproject.toml b/pyproject.toml
index 6fde117..7498f24 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -60,8 +60,8 @@ include = [
]
[tool.ruff]
-line-length = 100
target-version = "py311"
+line-length = 120
[tool.ruff.lint]
select = ["E", "F", "I", "N", "W"]
diff --git a/rust/src/router/config.rs b/rust/src/router/config.rs
index b034686..ac408dd 100644
--- a/rust/src/router/config.rs
+++ b/rust/src/router/config.rs
@@ -33,43 +33,45 @@ pub fn next_tier(current: &str) -> Option<&'static str> {
TIER_ORDER.get(idx + 1).copied()
}
-/// Returns the next cheaper tier for downgrade, or None if already at bottom.
-pub fn prev_tier(current: &str) -> Option<&'static str> {
- let idx = TIER_ORDER.iter().position(|t| *t == current)?;
- if idx == 0 {
- None
- } else {
- Some(TIER_ORDER[idx - 1])
- }
-}
-
/// Alternative models per tier, sorted by cost ascending (cheapest first).
/// Includes models from multiple providers for cross-provider billing fallback.
pub fn tier_alternatives() -> HashMap<&'static str, Vec<&'static str>> {
let mut m = HashMap::new();
- m.insert("SIMPLE", vec![
- "groq/llama-3.3-70b-versatile", // free tier
- "deepseek/deepseek-chat", // $0.42
- "openai/gpt-4o-mini", // $0.60
- "openai/gpt-3.5-turbo", // $1.50
- ]);
- m.insert("MEDIUM", vec![
- "groq/llama-3.3-70b-versatile", // free tier
- "deepseek/deepseek-chat", // $0.42
- "openai/gpt-4o-mini", // $0.60
- "minimax/minimax-m2", // $1.20
- ]);
- m.insert("COMPLEX", vec![
- "groq/llama-3.3-70b-versatile", // free tier (best-effort)
- "anthropic/claude-sonnet-4-5", // $15.00
- "openai/gpt-4o", // $10.00
- "anthropic/claude-opus-4-5", // $25.00
- ]);
- m.insert("REASONING", vec![
- "groq/llama-3.3-70b-versatile", // free tier (best-effort)
- "deepseek/deepseek-reasoner", // $2.19
- "openai/o3-mini", // $4.40
- "openai/o3", // $8.00
- ]);
+ m.insert(
+ "SIMPLE",
+ vec![
+ "groq/llama-3.3-70b-versatile", // free tier
+ "deepseek/deepseek-chat", // $0.42
+ "openai/gpt-4o-mini", // $0.60
+ "openai/gpt-3.5-turbo", // $1.50
+ ],
+ );
+ m.insert(
+ "MEDIUM",
+ vec![
+ "groq/llama-3.3-70b-versatile", // free tier
+ "deepseek/deepseek-chat", // $0.42
+ "openai/gpt-4o-mini", // $0.60
+ "minimax/minimax-m2", // $1.20
+ ],
+ );
+ m.insert(
+ "COMPLEX",
+ vec![
+ "groq/llama-3.3-70b-versatile", // free tier (best-effort)
+ "anthropic/claude-sonnet-4-5", // $15.00
+ "openai/gpt-4o", // $10.00
+ "anthropic/claude-opus-4-5", // $25.00
+ ],
+ );
+ m.insert(
+ "REASONING",
+ vec![
+ "groq/llama-3.3-70b-versatile", // free tier (best-effort)
+ "deepseek/deepseek-reasoner", // $2.19
+ "openai/o3-mini", // $4.40
+ "openai/o3", // $8.00
+ ],
+ );
m
}
diff --git a/test_docker.sh b/test_docker.sh
deleted file mode 100755
index dc4579f..0000000
--- a/test_docker.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env bash
-set -euo pipefail
-
-IMAGE_NAME="debot-test"
-
-echo "=== Building Docker image ==="
-docker build -t "$IMAGE_NAME" .
-
-echo ""
-echo "=== Running 'debot onboard' ==="
-docker run --name debot-test-run "$IMAGE_NAME" onboard
-STATUS_OUTPUT=$(docker commit debot-test-run debot-test-onboarded > /dev/null && \
- docker run --rm debot-test-onboarded status 2>&1) || true
-
-echo "$STATUS_OUTPUT"
-
-echo ""
-echo "=== Validating output ==="
-PASS=true
-
-check() {
- if echo "$STATUS_OUTPUT" | grep -q "$1"; then
- echo " PASS: found '$1'"
- else
- echo " FAIL: missing '$1'"
- PASS=false
- fi
-}
-
-check "debot Status"
-check "Config:"
-check "Workspace:"
-check "Model:"
-check "OpenRouter API:"
-check "Anthropic API:"
-check "OpenAI API:"
-
-echo ""
-if $PASS; then
- echo "=== All checks passed ==="
-else
- echo "=== Some checks FAILED ==="
- exit 1
-fi
-
-# Cleanup
-echo ""
-echo "=== Cleanup ==="
-docker rm -f debot-test-run 2>/dev/null || true
-docker rmi -f debot-test-onboarded 2>/dev/null || true
-docker rmi -f "$IMAGE_NAME" 2>/dev/null || true
-echo "Done."