Add three-tier model routing with VRAM management and benchmark suite

- Three-tier routing: light (router answers directly ~3s), medium (qwen3:4b
  + tools ~60s), complex (/think prefix → qwen3:8b + subagents ~140s)
- Router: qwen2.5:1.5b, temp=0, regex pre-classifier + raw-text LLM classify
- VRAMManager: explicit flush/poll/prewarm to prevent Ollama CPU-spill bug
- agent_factory: build_medium_agent and build_complex_agent using deepagents
  (TodoListMiddleware + SubAgentMiddleware with research/memory subagents)
- Fix: split Telegram replies >4000 chars into multiple messages
- Benchmark: 30 questions (easy/medium/hard) — 10/10/10 verified passing
  easy→light, medium→medium, hard→complex with VRAM flush confirmed

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Alvis
2026-02-28 17:54:51 +00:00
parent 1718d70203
commit ea77b2308b
8 changed files with 1400 additions and 308 deletions

54
agent_factory.py Normal file
View File

@@ -0,0 +1,54 @@
from deepagents import create_deep_agent, SubAgent
def build_medium_agent(model, agent_tools: list, system_prompt: str):
"""Medium agent: create_deep_agent with TodoList planning, no subagents."""
return create_deep_agent(
model=model,
tools=agent_tools,
system_prompt=system_prompt,
)
def build_complex_agent(model, agent_tools: list, system_prompt: str):
"""Complex agent: create_deep_agent with TodoList planning + research/memory subagents."""
web_tools = [t for t in agent_tools if getattr(t, "name", "") == "web_search"]
memory_tools = [
t for t in agent_tools
if getattr(t, "name", "") in ("search_memory", "get_all_memories")
]
research_sub: SubAgent = {
"name": "research",
"description": (
"Runs multiple web searches in parallel and synthesizes findings. "
"Use for thorough research tasks requiring several queries."
),
"system_prompt": (
"You are a research specialist. Search the web thoroughly using multiple queries. "
"Cite sources and synthesize information into a clear summary."
),
"tools": web_tools,
"model": model,
}
memory_sub: SubAgent = {
"name": "memory",
"description": (
"Searches and retrieves all relevant memories about the user comprehensively. "
"Use to gather full context from past conversations."
),
"system_prompt": (
"You are a memory specialist. Search broadly using multiple queries. "
"Return all relevant facts and context you find."
),
"tools": memory_tools,
"model": model,
}
return create_deep_agent(
model=model,
tools=agent_tools,
system_prompt=system_prompt,
subagents=[research_sub, memory_sub],
)