atlasbot: add metric fallback for direct questions
This commit is contained in:
parent
f6bb0b90dd
commit
9e15ced721
@ -161,6 +161,7 @@ class AnswerEngine:
|
|||||||
history_ctx = _format_history(history)
|
history_ctx = _format_history(history)
|
||||||
lexicon_ctx = _lexicon_context(summary)
|
lexicon_ctx = _lexicon_context(summary)
|
||||||
key_facts: list[str] = []
|
key_facts: list[str] = []
|
||||||
|
metric_facts: list[str] = []
|
||||||
|
|
||||||
started = time.monotonic()
|
started = time.monotonic()
|
||||||
reply = ""
|
reply = ""
|
||||||
@ -257,6 +258,7 @@ class AnswerEngine:
|
|||||||
scored = await _score_chunks(call_llm, chunks, normalized, sub_questions, plan)
|
scored = await _score_chunks(call_llm, chunks, normalized, sub_questions, plan)
|
||||||
selected = _select_chunks(chunks, scored, plan, keyword_tokens)
|
selected = _select_chunks(chunks, scored, plan, keyword_tokens)
|
||||||
key_facts = _key_fact_lines(summary_lines, keyword_tokens)
|
key_facts = _key_fact_lines(summary_lines, keyword_tokens)
|
||||||
|
metric_facts = [line for line in key_facts if re.search(r"\\d", line)]
|
||||||
if self._settings.debug_pipeline:
|
if self._settings.debug_pipeline:
|
||||||
scored_preview = sorted(
|
scored_preview = sorted(
|
||||||
[{"id": c["id"], "score": scored.get(c["id"], 0.0), "summary": c["summary"]} for c in chunks],
|
[{"id": c["id"], "score": scored.get(c["id"], 0.0), "summary": c["summary"]} for c in chunks],
|
||||||
@ -328,10 +330,8 @@ class AnswerEngine:
|
|||||||
extra_bits.append("AllowedRunbooks: " + ", ".join(runbook_paths))
|
extra_bits.append("AllowedRunbooks: " + ", ".join(runbook_paths))
|
||||||
if resolved_runbook:
|
if resolved_runbook:
|
||||||
extra_bits.append("ResolvedRunbook: " + resolved_runbook)
|
extra_bits.append("ResolvedRunbook: " + resolved_runbook)
|
||||||
if key_facts:
|
if metric_facts:
|
||||||
metric_facts = [line for line in key_facts if re.search(r"\\d", line)]
|
extra_bits.append("MustUseFacts: " + "; ".join(metric_facts[:4]))
|
||||||
if metric_facts:
|
|
||||||
extra_bits.append("MustUseFacts: " + "; ".join(metric_facts[:4]))
|
|
||||||
if allowed_nodes:
|
if allowed_nodes:
|
||||||
extra_bits.append("AllowedNodes: " + ", ".join(allowed_nodes))
|
extra_bits.append("AllowedNodes: " + ", ".join(allowed_nodes))
|
||||||
if allowed_namespaces:
|
if allowed_namespaces:
|
||||||
@ -403,6 +403,15 @@ class AnswerEngine:
|
|||||||
model=plan.model,
|
model=plan.model,
|
||||||
tag="focus_fix",
|
tag="focus_fix",
|
||||||
)
|
)
|
||||||
|
if classify.get("question_type") in {"metric", "diagnostic"} and metric_facts and not re.search(r"\\d", reply):
|
||||||
|
best_line = None
|
||||||
|
lowered = normalized.lower()
|
||||||
|
for line in metric_facts:
|
||||||
|
if any(token in line.lower() for token in lowered.split()):
|
||||||
|
best_line = line
|
||||||
|
break
|
||||||
|
best_line = best_line or metric_facts[0]
|
||||||
|
reply = f\"From the latest snapshot: {best_line}.\"
|
||||||
|
|
||||||
if plan.use_critic:
|
if plan.use_critic:
|
||||||
if observer:
|
if observer:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user