atlasbot: strengthen evidence and units

This commit is contained in:
Brad Stein 2026-02-02 12:38:11 -03:00
parent 4e45e1985d
commit 3885abd7fe
3 changed files with 25 additions and 4 deletions

View File

@ -163,6 +163,7 @@ class AnswerEngine:
lexicon_ctx = _lexicon_context(summary)
key_facts: list[str] = []
metric_facts: list[str] = []
facts_used: list[str] = []
started = time.monotonic()
reply = ""
@ -385,6 +386,7 @@ class AnswerEngine:
"top_scored": scored_preview,
},
)
facts_used = list(dict.fromkeys(key_facts)) if key_facts else list(dict.fromkeys(metric_facts))
snapshot_context = "ClusterSnapshot:\n" + "\n".join([chunk["text"] for chunk in selected])
if key_facts:
snapshot_context = "KeyFacts:\n" + "\n".join(key_facts) + "\n\n" + snapshot_context
@ -417,6 +419,8 @@ class AnswerEngine:
runbook_fix = _needs_runbook_fix(reply, runbook_paths)
runbook_needed = _needs_runbook_reference(normalized, runbook_paths, reply)
needs_evidence = _needs_evidence_fix(reply, classify)
if classify.get("question_type") in {"open_ended", "planning"} and metric_facts:
needs_evidence = True
resolved_runbook = None
if runbook_paths and (runbook_fix or runbook_needed):
resolver_prompt = prompts.RUNBOOK_SELECT_PROMPT + "\nQuestion: " + normalized
@ -640,7 +644,7 @@ class AnswerEngine:
reply = await self._dedup_reply(reply, plan, call_llm, tag="dedup")
scores = await self._score_answer(normalized, reply, plan, call_llm)
claims = await self._extract_claims(normalized, reply, summary, call_llm)
claims = await self._extract_claims(normalized, reply, summary, facts_used, call_llm)
except LLMLimitReached:
if not reply:
reply = "I started working on this but hit my reasoning limit. Ask again with 'Run limitless' for a deeper pass."
@ -736,13 +740,24 @@ class AnswerEngine:
question: str,
reply: str,
summary: dict[str, Any],
facts_used: list[str],
call_llm: Callable[..., Any],
) -> list[ClaimItem]:
if not reply or not summary:
return []
summary_json = _json_excerpt(summary)
prompt = prompts.CLAIM_MAP_PROMPT + "\nQuestion: " + question + "\nAnswer: " + reply
raw = await call_llm(prompts.CLAIM_SYSTEM, prompt, context=f"SnapshotSummaryJson:{summary_json}", model=self._settings.ollama_model_fast, tag="claim_map")
facts_used = [line.strip() for line in (facts_used or []) if line and line.strip()]
facts_block = ""
if facts_used:
facts_block = "\nFactsUsed:\n" + "\n".join([f"- {line}" for line in facts_used[:12]])
prompt = prompts.CLAIM_MAP_PROMPT + "\nQuestion: " + question + "\nAnswer: " + reply + facts_block
raw = await call_llm(
prompts.CLAIM_SYSTEM,
prompt,
context=f"SnapshotSummaryJson:{summary_json}",
model=self._settings.ollama_model_fast,
tag="claim_map",
)
data = _parse_json_block(raw, fallback={})
claims_raw = data.get("claims") if isinstance(data, dict) else None
claims: list[ClaimItem] = []
@ -1745,6 +1760,8 @@ def _best_runbook_match(candidate: str, allowed: list[str]) -> str | None:
def _resolve_path(data: Any, path: str) -> Any | None:
if path.startswith("line:"):
return path.split("line:", 1)[1].strip()
cursor = data
for part in re.split(r"\.(?![^\[]*\])", path):
if not part:

View File

@ -187,7 +187,9 @@ CLAIM_SYSTEM = (
)
CLAIM_MAP_PROMPT = (
"Return JSON with claims list; each claim: id, claim, evidence (list of {path, reason})."
"Return JSON with claims list; each claim: id, claim, evidence (list of {path, reason}). "
"If FactsUsed is provided, prefer evidence paths of the form line:<exact line> from FactsUsed. "
"Otherwise use SnapshotSummaryJson paths."
)
FOLLOWUP_SYSTEM = (

View File

@ -1364,6 +1364,8 @@ def _append_hottest(lines: list[str], summary: dict[str, Any]) -> None:
value = _format_rate_bytes(entry.get("value"))
else:
value = _format_float(entry.get("value"))
if value and key in {"cpu", "ram", "disk"}:
value = f"{value}%"
if node:
label = node
if hardware: