atlasbot: refine insightful answers

This commit is contained in:
Brad Stein 2026-01-29 20:53:28 -03:00
parent 010cfdb07c
commit 0e471ecc37
3 changed files with 41 additions and 3 deletions

View File

@ -85,7 +85,7 @@ class AnswerEngine:
) )
if observer: if observer:
observer("candidates", "drafting answers") observer("candidates", "drafting answers")
candidates = await self._candidates(question, angles, base_context, mode) candidates = await self._candidates(question, angles, base_context, classify, mode)
log.info( log.info(
"atlasbot_candidates", "atlasbot_candidates",
extra={"extra": {"mode": mode, "count": len(candidates)}}, extra={"extra": {"mode": mode, "count": len(candidates)}},
@ -131,6 +131,9 @@ class AnswerEngine:
angles = _parse_json_list(raw) angles = _parse_json_list(raw)
if not angles: if not angles:
return [{"name": "primary", "question": question, "relevance": 100}] return [{"name": "primary", "question": question, "relevance": 100}]
if classify.get("answer_style") == "insightful":
if not any("implication" in (a.get("name") or "").lower() for a in angles):
angles.append({"name": "implications", "question": f"What are the implications of the data for: {question}", "relevance": 85})
return angles[:max_angles] return angles[:max_angles]
async def _candidates( async def _candidates(
@ -138,6 +141,7 @@ class AnswerEngine:
question: str, question: str,
angles: list[dict[str, Any]], angles: list[dict[str, Any]],
context: str, context: str,
classify: dict[str, Any],
mode: str, mode: str,
) -> list[dict[str, Any]]: ) -> list[dict[str, Any]]:
limit = _candidates_limit(self._settings, mode) limit = _candidates_limit(self._settings, mode)
@ -147,6 +151,8 @@ class AnswerEngine:
for angle in selected: for angle in selected:
angle_q = angle.get("question") or question angle_q = angle.get("question") or question
prompt = prompts.CANDIDATE_PROMPT + "\nQuestion: " + angle_q prompt = prompts.CANDIDATE_PROMPT + "\nQuestion: " + angle_q
if classify.get("answer_style"):
prompt += f"\nAnswerStyle: {classify.get('answer_style')}"
messages = build_messages(prompts.CLUSTER_SYSTEM, prompt, context=context) messages = build_messages(prompts.CLUSTER_SYSTEM, prompt, context=context)
tasks.append(self._llm.chat(messages, model=model)) tasks.append(self._llm.chat(messages, model=model))
replies = await asyncio.gather(*tasks) replies = await asyncio.gather(*tasks)
@ -203,7 +209,12 @@ class AnswerEngine:
messages = build_messages(prompts.CLUSTER_SYSTEM, prompt, context=context) messages = build_messages(prompts.CLUSTER_SYSTEM, prompt, context=context)
model = _synthesis_model(self._settings, mode) model = _synthesis_model(self._settings, mode)
reply = await self._llm.chat(messages, model=model) reply = await self._llm.chat(messages, model=model)
return reply needs_refine = _needs_refine(reply, classify)
if not needs_refine:
return reply
refine_prompt = prompts.REFINE_PROMPT + "\nQuestion: " + question + "\nDraft: " + reply
refine_messages = build_messages(prompts.CLUSTER_SYSTEM, refine_prompt, context=context)
return await self._llm.chat(refine_messages, model=model)
def _join_context(parts: list[str]) -> str: def _join_context(parts: list[str]) -> str:
@ -261,6 +272,19 @@ def _parse_json_block(text: str, *, fallback: dict[str, Any]) -> dict[str, Any]:
return parse_json(raw, fallback=fallback) return parse_json(raw, fallback=fallback)
def _needs_refine(reply: str, classify: dict[str, Any]) -> bool:
if not reply:
return False
style = classify.get("answer_style") if isinstance(classify, dict) else None
if style != "insightful":
return False
metric_markers = ["cpu", "ram", "pods", "connections", "%"]
lower = reply.lower()
metric_hits = sum(1 for m in metric_markers if m in lower)
sentence_count = reply.count(".") + reply.count("!") + reply.count("?")
return metric_hits >= 2 and sentence_count <= 2
def _parse_json_list(text: str) -> list[dict[str, Any]]: def _parse_json_list(text: str) -> list[dict[str, Any]]:
raw = text.strip() raw = text.strip()
match = re.search(r"\[.*\]", raw, flags=re.S) match = re.search(r"\[.*\]", raw, flags=re.S)

View File

@ -6,6 +6,8 @@ CLUSTER_SYSTEM = (
"If the question is about Atlas, respond in short paragraphs. " "If the question is about Atlas, respond in short paragraphs. "
"Avoid commands unless explicitly asked. " "Avoid commands unless explicitly asked. "
"If information is missing, say so clearly and avoid guessing. " "If information is missing, say so clearly and avoid guessing. "
"If the question is open-ended, provide grounded interpretation or implications, "
"not just a list of metrics. "
"Do not mention the context, snapshot, or knowledge base unless the user asks about sources. " "Do not mention the context, snapshot, or knowledge base unless the user asks about sources. "
) )
@ -13,17 +15,21 @@ CLASSIFY_PROMPT = (
"Classify the user question. Return JSON with fields: " "Classify the user question. Return JSON with fields: "
"needs_snapshot (bool), needs_kb (bool), needs_metrics (bool), " "needs_snapshot (bool), needs_kb (bool), needs_metrics (bool), "
"needs_general (bool), intent (short string), ambiguity (0-1), " "needs_general (bool), intent (short string), ambiguity (0-1), "
"answer_style (direct|insightful)." "answer_style (direct|insightful), topic_summary (short string), "
"follow_up (bool), question_type (metric|diagnostic|planning|open_ended)."
) )
ANGLE_PROMPT = ( ANGLE_PROMPT = (
"Generate up to {max_angles} possible angles to answer the question. " "Generate up to {max_angles} possible angles to answer the question. "
"If the question is open-ended, include at least one angle that focuses on implications. "
"Return JSON list of objects with: name, question, relevance (0-100)." "Return JSON list of objects with: name, question, relevance (0-100)."
) )
CANDIDATE_PROMPT = ( CANDIDATE_PROMPT = (
"Answer this angle using the provided context. " "Answer this angle using the provided context. "
"Keep it concise, 2-4 sentences. " "Keep it concise, 2-4 sentences. "
"If the question is open-ended, include one grounded interpretation or implication. "
"Avoid dumping raw metrics unless asked; prefer what the numbers imply. "
"Do not mention the context or snapshot unless explicitly asked." "Do not mention the context or snapshot unless explicitly asked."
) )
@ -43,6 +49,11 @@ SYNTHESIZE_PROMPT = (
"Do not include confidence scores or evaluation metadata." "Do not include confidence scores or evaluation metadata."
) )
REFINE_PROMPT = (
"Improve the answer if it reads like a raw metric dump or ignores the question's intent. "
"Keep it grounded in the context. If you cannot add insight, say so explicitly."
)
STOCK_SYSTEM = ( STOCK_SYSTEM = (
"You are Atlas, a helpful assistant. " "You are Atlas, a helpful assistant. "
"Be concise and truthful. " "Be concise and truthful. "

View File

@ -30,6 +30,7 @@ def _settings() -> Settings:
ollama_model="base", ollama_model="base",
ollama_model_fast="fast", ollama_model_fast="fast",
ollama_model_smart="smart", ollama_model_smart="smart",
ollama_model_genius="genius",
ollama_fallback_model="", ollama_fallback_model="",
ollama_timeout_sec=1.0, ollama_timeout_sec=1.0,
ollama_retries=0, ollama_retries=0,
@ -49,8 +50,10 @@ def _settings() -> Settings:
nats_result_bucket="", nats_result_bucket="",
fast_max_angles=1, fast_max_angles=1,
smart_max_angles=1, smart_max_angles=1,
genius_max_angles=1,
fast_max_candidates=1, fast_max_candidates=1,
smart_max_candidates=1, smart_max_candidates=1,
genius_max_candidates=1,
) )