import re
from typing import Optional
from langchain_core.messages import SystemMessage, HumanMessage
# ── Regex pre-classifier ──────────────────────────────────────────────────────
# Catches obvious light-tier patterns before calling the LLM.
# Keyed by regex → compiled pattern.
_LIGHT_PATTERNS = re.compile(
r"^("
# Greetings / farewells
r"hi|hello|hey|yo|sup|howdy|good morning|good evening|good night|good afternoon"
r"|bye|goodbye|see you|cya|later|ttyl"
# Acknowledgements / small talk
r"|thanks?|thank you|thx|ty|ok|okay|k|cool|great|awesome|perfect|sounds good|got it|nice|sure"
r"|how are you|how are you\?|how are you doing(\s+today)?[?!.]*"
r"|what.?s up"
# Calendar facts: "what day comes after X?" / "what comes after X?"
r"|what\s+day\s+(comes\s+after|follows|is\s+after)\s+\w+[?!.]*"
r"|what\s+comes\s+after\s+\w+[?!.]*"
# Acronym expansions: "what does X stand for?"
r"|what\s+does\s+\w+\s+stand\s+for[?!.]*"
r")[\s!.?]*$",
re.IGNORECASE,
)
# Queries that require live data — never answer from static knowledge
_MEDIUM_FORCE_PATTERNS = re.compile(
r"\b(weather|forecast|temperature|rain(ing)?|snow(ing)?|humidity|wind speed"
r"|today.?s news|breaking news|latest news|news today|current events"
r"|bitcoin price|crypto price|stock price|exchange rate|usd|eur|btc"
r"|right now|currently|at the moment|live score|score now|score today"
r"|open now|hours today|is .+ open)\b",
re.IGNORECASE,
)
# ── LLM classification prompt ─────────────────────────────────────────────────
CLASSIFY_PROMPT = """Classify the message. Output ONLY one word: light, medium, or complex.
LIGHT = answerable from general knowledge, no internet needed:
what is 2+2 / what is the capital of France / name the three primary colors
tell me a short joke / is the sky blue / is water wet
MEDIUM = requires web search or the user's stored memories:
current weather / today's news / Bitcoin price / what did we talk about
what is my name / where do I live / what is my job / do I have any pets
what do you know about me / what are my preferences / what did I tell you
COMPLEX = /think prefix only:
/think compare frameworks / /think plan a trip
Message: {message}
Output (one word only — light, medium, or complex):"""
LIGHT_REPLY_PROMPT = """You are a helpful Telegram assistant. Answer briefly and naturally (1-3 sentences). Be friendly."""
def _format_history(history: list[dict]) -> str:
if not history:
return "(none)"
lines = []
for msg in history:
role = msg.get("role", "?")
content = str(msg.get("content", ""))[:200]
lines.append(f"{role}: {content}")
return "\n".join(lines)
def _parse_tier(text: str) -> str:
"""Extract tier from raw model output. Default to medium."""
t = text.strip().lower()
snippet = t[:60]
if "complex" in snippet:
return "complex"
if "medium" in snippet:
return "medium"
if "light" in snippet:
return "light"
# Model invented a descriptive category (e.g. "simplefact", "trivial", "basic") →
# treat as light since it recognised the question doesn't need tools
if any(w in snippet for w in ("simple", "fact", "trivial", "basic", "easy", "general")):
return "light"
return "medium" # safe default
class Router:
def __init__(self, model):
self.model = model
async def route(
self,
message: str,
history: list[dict],
force_complex: bool = False,
) -> tuple[str, Optional[str]]:
"""
Returns (tier, reply_or_None).
For light tier: also generates the reply with a second call.
For medium/complex: reply is None.
"""
if force_complex:
return "complex", None
# Step 0a: force medium for real-time / live-data queries
if _MEDIUM_FORCE_PATTERNS.search(message.strip()):
print(f"[router] regex→medium (real-time query)", flush=True)
return "medium", None
# Step 0b: regex pre-classification for obvious light patterns
if _LIGHT_PATTERNS.match(message.strip()):
print(f"[router] regex→light", flush=True)
return await self._generate_light_reply(message, history)
# Step 1: LLM classification with raw text output
try:
classify_response = await self.model.ainvoke([
HumanMessage(content=CLASSIFY_PROMPT.format(message=message)),
])
raw = classify_response.content or ""
raw = re.sub(r".*?", "", raw, flags=re.DOTALL).strip()
tier = _parse_tier(raw)
if tier == "complex" and not message.startswith("/think"):
tier = "medium"
print(f"[router] raw={raw[:30]!r} → tier={tier}", flush=True)
except Exception as e:
print(f"[router] classify error, defaulting to medium: {e}", flush=True)
return "medium", None
if tier != "light":
return tier, None
return await self._generate_light_reply(message, history)
async def _generate_light_reply(
self, message: str, history: list[dict]
) -> tuple[str, Optional[str]]:
"""Generate a short reply using the router model for light-tier messages."""
history_text = _format_history(history)
context = f"\nConversation history:\n{history_text}" if history else ""
try:
reply_response = await self.model.ainvoke([
SystemMessage(content=LIGHT_REPLY_PROMPT + context),
HumanMessage(content=message),
])
reply_text = reply_response.content or ""
reply_text = re.sub(r".*?", "", reply_text, flags=re.DOTALL).strip()
if not reply_text:
print("[router] light reply empty, falling back to medium", flush=True)
return "medium", None
print(f"[router] light reply: {len(reply_text)} chars", flush=True)
return "light", reply_text
except Exception as e:
print(f"[router] light reply error, falling back to medium: {e}", flush=True)
return "medium", None