atlasbot/testing/fakes.py

109 lines
3.4 KiB
Python
Raw Permalink Normal View History

"""Reusable test doubles and settings factories."""
from __future__ import annotations
import asyncio
from atlasbot.config import Settings
class FakeLLM:
"""Deterministic LLM double for pipeline tests.
Why:
- keeps the answer engine tests fast and predictable.
Input/Output:
- accepts the same `chat()` signature as the real client;
- returns canned JSON or text snippets based on the prompt content.
"""
def __init__(self) -> None:
self.calls: list[str] = []
async def chat(self, messages, *, model=None, timeout_sec=None):
"""Return a prompt-shaped response and remember the last user prompt."""
prompt = messages[-1]["content"]
self.calls.append(prompt)
if "normalized" in prompt and "keywords" in prompt:
return '{"normalized":"What is Atlas?","keywords":["atlas"]}'
if "needs_snapshot" in prompt:
return '{"needs_snapshot": true, "answer_style": "direct"}'
if "sub-questions" in prompt:
return '[{"id":"q1","question":"What is Atlas?","priority":1}]'
if "sub-question" in prompt:
return "Atlas has 22 nodes."
if "Answer using only the Fact Sheet" in prompt:
return "Atlas has 22 nodes."
if "final response" in prompt:
return "Atlas has 22 nodes."
if "Score response quality" in prompt:
return '{"confidence":80,"relevance":90,"satisfaction":85,"hallucination_risk":"low"}'
if "claims list" in prompt:
return '{"claims": []}'
return "{}"
class SlowFakeLLM(FakeLLM):
"""Variant that sleeps briefly so timeout guards can be exercised."""
async def chat(self, messages, *, model=None, timeout_sec=None):
"""Delay before answering to make budget handling deterministic."""
await asyncio.sleep(0.02)
return await super().chat(messages, model=model, timeout_sec=timeout_sec)
def build_test_settings() -> Settings:
"""Create a fully populated `Settings` instance for unit tests."""
return Settings(
matrix_base="",
auth_base="",
bot_user="",
bot_pass="",
room_alias="",
server_name="",
bot_mentions=(),
matrix_bots=(),
ollama_url="",
ollama_model="base",
ollama_model_fast="fast",
ollama_model_smart="smart",
ollama_model_genius="genius",
ollama_fallback_model="",
ollama_timeout_sec=1.0,
ollama_retries=0,
ollama_api_key="",
http_port=8090,
internal_token="",
kb_dir="",
vm_url="",
ariadne_state_url="",
ariadne_state_token="",
snapshot_ttl_sec=30,
thinking_interval_sec=30,
quick_time_budget_sec=15.0,
smart_time_budget_sec=45.0,
genius_time_budget_sec=180.0,
conversation_ttl_sec=300,
snapshot_pin_enabled=False,
queue_enabled=False,
nats_url="",
nats_stream="",
nats_subject="",
nats_result_bucket="",
fast_max_angles=1,
smart_max_angles=1,
genius_max_angles=1,
fast_max_candidates=1,
smart_max_candidates=1,
genius_max_candidates=1,
fast_llm_calls_max=9,
smart_llm_calls_max=17,
genius_llm_calls_max=32,
llm_limit_multiplier=1.5,
state_db_path="/tmp/atlasbot_test_state.db",
)