Skip preflight IO (memory/URL/fast-tools) when no_inference=True

In no_inference mode only the routing decision matters — fetching
memories and URLs adds latency without affecting the classification.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-24 07:37:55 +00:00
parent b7d5896076
commit 4d37ac65b2

View File

@@ -446,16 +446,20 @@ async def _run_agent_pipeline(
print(f"[agent] running: {clean_message[:80]!r}", flush=True) print(f"[agent] running: {clean_message[:80]!r}", flush=True)
# Fetch URL content, memories, and fast-tool context concurrently # Fetch URL content, memories, and fast-tool context concurrently
url_context, memories, fast_context = await asyncio.gather( # Skip preflight IO in no_inference mode — only routing decision needed
_fetch_urls_from_message(clean_message), if no_inference:
_retrieve_memories(clean_message, session_id), url_context = memories = fast_context = None
_fast_tool_runner.run_matching(clean_message), else:
) url_context, memories, fast_context = await asyncio.gather(
if url_context: _fetch_urls_from_message(clean_message),
print(f"[agent] crawl4ai: {len(url_context)} chars fetched", flush=True) _retrieve_memories(clean_message, session_id),
if fast_context: _fast_tool_runner.run_matching(clean_message),
names = _fast_tool_runner.matching_names(clean_message) )
print(f"[agent] fast_tools={names}: {len(fast_context)} chars injected", flush=True) if url_context:
print(f"[agent] crawl4ai: {len(url_context)} chars fetched", flush=True)
if fast_context:
names = _fast_tool_runner.matching_names(clean_message)
print(f"[agent] fast_tools={names}: {len(fast_context)} chars injected", flush=True)
# Build enriched history # Build enriched history
enriched_history = list(history) enriched_history = list(history)