atlasbot: strengthen evidence and units
This commit is contained in:
parent
4e45e1985d
commit
3885abd7fe
@ -163,6 +163,7 @@ class AnswerEngine:
|
|||||||
lexicon_ctx = _lexicon_context(summary)
|
lexicon_ctx = _lexicon_context(summary)
|
||||||
key_facts: list[str] = []
|
key_facts: list[str] = []
|
||||||
metric_facts: list[str] = []
|
metric_facts: list[str] = []
|
||||||
|
facts_used: list[str] = []
|
||||||
|
|
||||||
started = time.monotonic()
|
started = time.monotonic()
|
||||||
reply = ""
|
reply = ""
|
||||||
@ -385,6 +386,7 @@ class AnswerEngine:
|
|||||||
"top_scored": scored_preview,
|
"top_scored": scored_preview,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
facts_used = list(dict.fromkeys(key_facts)) if key_facts else list(dict.fromkeys(metric_facts))
|
||||||
snapshot_context = "ClusterSnapshot:\n" + "\n".join([chunk["text"] for chunk in selected])
|
snapshot_context = "ClusterSnapshot:\n" + "\n".join([chunk["text"] for chunk in selected])
|
||||||
if key_facts:
|
if key_facts:
|
||||||
snapshot_context = "KeyFacts:\n" + "\n".join(key_facts) + "\n\n" + snapshot_context
|
snapshot_context = "KeyFacts:\n" + "\n".join(key_facts) + "\n\n" + snapshot_context
|
||||||
@ -417,6 +419,8 @@ class AnswerEngine:
|
|||||||
runbook_fix = _needs_runbook_fix(reply, runbook_paths)
|
runbook_fix = _needs_runbook_fix(reply, runbook_paths)
|
||||||
runbook_needed = _needs_runbook_reference(normalized, runbook_paths, reply)
|
runbook_needed = _needs_runbook_reference(normalized, runbook_paths, reply)
|
||||||
needs_evidence = _needs_evidence_fix(reply, classify)
|
needs_evidence = _needs_evidence_fix(reply, classify)
|
||||||
|
if classify.get("question_type") in {"open_ended", "planning"} and metric_facts:
|
||||||
|
needs_evidence = True
|
||||||
resolved_runbook = None
|
resolved_runbook = None
|
||||||
if runbook_paths and (runbook_fix or runbook_needed):
|
if runbook_paths and (runbook_fix or runbook_needed):
|
||||||
resolver_prompt = prompts.RUNBOOK_SELECT_PROMPT + "\nQuestion: " + normalized
|
resolver_prompt = prompts.RUNBOOK_SELECT_PROMPT + "\nQuestion: " + normalized
|
||||||
@ -640,7 +644,7 @@ class AnswerEngine:
|
|||||||
reply = await self._dedup_reply(reply, plan, call_llm, tag="dedup")
|
reply = await self._dedup_reply(reply, plan, call_llm, tag="dedup")
|
||||||
|
|
||||||
scores = await self._score_answer(normalized, reply, plan, call_llm)
|
scores = await self._score_answer(normalized, reply, plan, call_llm)
|
||||||
claims = await self._extract_claims(normalized, reply, summary, call_llm)
|
claims = await self._extract_claims(normalized, reply, summary, facts_used, call_llm)
|
||||||
except LLMLimitReached:
|
except LLMLimitReached:
|
||||||
if not reply:
|
if not reply:
|
||||||
reply = "I started working on this but hit my reasoning limit. Ask again with 'Run limitless' for a deeper pass."
|
reply = "I started working on this but hit my reasoning limit. Ask again with 'Run limitless' for a deeper pass."
|
||||||
@ -736,13 +740,24 @@ class AnswerEngine:
|
|||||||
question: str,
|
question: str,
|
||||||
reply: str,
|
reply: str,
|
||||||
summary: dict[str, Any],
|
summary: dict[str, Any],
|
||||||
|
facts_used: list[str],
|
||||||
call_llm: Callable[..., Any],
|
call_llm: Callable[..., Any],
|
||||||
) -> list[ClaimItem]:
|
) -> list[ClaimItem]:
|
||||||
if not reply or not summary:
|
if not reply or not summary:
|
||||||
return []
|
return []
|
||||||
summary_json = _json_excerpt(summary)
|
summary_json = _json_excerpt(summary)
|
||||||
prompt = prompts.CLAIM_MAP_PROMPT + "\nQuestion: " + question + "\nAnswer: " + reply
|
facts_used = [line.strip() for line in (facts_used or []) if line and line.strip()]
|
||||||
raw = await call_llm(prompts.CLAIM_SYSTEM, prompt, context=f"SnapshotSummaryJson:{summary_json}", model=self._settings.ollama_model_fast, tag="claim_map")
|
facts_block = ""
|
||||||
|
if facts_used:
|
||||||
|
facts_block = "\nFactsUsed:\n" + "\n".join([f"- {line}" for line in facts_used[:12]])
|
||||||
|
prompt = prompts.CLAIM_MAP_PROMPT + "\nQuestion: " + question + "\nAnswer: " + reply + facts_block
|
||||||
|
raw = await call_llm(
|
||||||
|
prompts.CLAIM_SYSTEM,
|
||||||
|
prompt,
|
||||||
|
context=f"SnapshotSummaryJson:{summary_json}",
|
||||||
|
model=self._settings.ollama_model_fast,
|
||||||
|
tag="claim_map",
|
||||||
|
)
|
||||||
data = _parse_json_block(raw, fallback={})
|
data = _parse_json_block(raw, fallback={})
|
||||||
claims_raw = data.get("claims") if isinstance(data, dict) else None
|
claims_raw = data.get("claims") if isinstance(data, dict) else None
|
||||||
claims: list[ClaimItem] = []
|
claims: list[ClaimItem] = []
|
||||||
@ -1745,6 +1760,8 @@ def _best_runbook_match(candidate: str, allowed: list[str]) -> str | None:
|
|||||||
|
|
||||||
|
|
||||||
def _resolve_path(data: Any, path: str) -> Any | None:
|
def _resolve_path(data: Any, path: str) -> Any | None:
|
||||||
|
if path.startswith("line:"):
|
||||||
|
return path.split("line:", 1)[1].strip()
|
||||||
cursor = data
|
cursor = data
|
||||||
for part in re.split(r"\.(?![^\[]*\])", path):
|
for part in re.split(r"\.(?![^\[]*\])", path):
|
||||||
if not part:
|
if not part:
|
||||||
|
|||||||
@ -187,7 +187,9 @@ CLAIM_SYSTEM = (
|
|||||||
)
|
)
|
||||||
|
|
||||||
CLAIM_MAP_PROMPT = (
|
CLAIM_MAP_PROMPT = (
|
||||||
"Return JSON with claims list; each claim: id, claim, evidence (list of {path, reason})."
|
"Return JSON with claims list; each claim: id, claim, evidence (list of {path, reason}). "
|
||||||
|
"If FactsUsed is provided, prefer evidence paths of the form line:<exact line> from FactsUsed. "
|
||||||
|
"Otherwise use SnapshotSummaryJson paths."
|
||||||
)
|
)
|
||||||
|
|
||||||
FOLLOWUP_SYSTEM = (
|
FOLLOWUP_SYSTEM = (
|
||||||
|
|||||||
@ -1364,6 +1364,8 @@ def _append_hottest(lines: list[str], summary: dict[str, Any]) -> None:
|
|||||||
value = _format_rate_bytes(entry.get("value"))
|
value = _format_rate_bytes(entry.get("value"))
|
||||||
else:
|
else:
|
||||||
value = _format_float(entry.get("value"))
|
value = _format_float(entry.get("value"))
|
||||||
|
if value and key in {"cpu", "ram", "disk"}:
|
||||||
|
value = f"{value}%"
|
||||||
if node:
|
if node:
|
||||||
label = node
|
label = node
|
||||||
if hardware:
|
if hardware:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user