atlasbot: tighten runbook selection and chunk hits
This commit is contained in:
parent
d6d9ac442a
commit
eea9003d69
@ -308,7 +308,9 @@ class AnswerEngine:
|
||||
tag="runbook_select",
|
||||
)
|
||||
resolver = _parse_json_block(resolver_raw, fallback={})
|
||||
resolved_runbook = resolver.get("path") if isinstance(resolver.get("path"), str) else None
|
||||
candidate = resolver.get("path") if isinstance(resolver.get("path"), str) else None
|
||||
if candidate and candidate in runbook_paths:
|
||||
resolved_runbook = candidate
|
||||
if (snapshot_context and needs_evidence) or unknown_nodes or unknown_namespaces or runbook_fix or runbook_needed:
|
||||
if observer:
|
||||
observer("evidence_fix", "repairing missing evidence")
|
||||
@ -351,6 +353,34 @@ class AnswerEngine:
|
||||
model=plan.model,
|
||||
tag="runbook_enforce",
|
||||
)
|
||||
if runbook_paths:
|
||||
invalid = [
|
||||
token
|
||||
for token in re.findall(r"runbooks/[A-Za-z0-9._-]+", reply)
|
||||
if token.lower() not in {p.lower() for p in runbook_paths}
|
||||
]
|
||||
if invalid:
|
||||
if observer:
|
||||
observer("runbook_enforce", "replacing invalid runbook path")
|
||||
resolver_prompt = prompts.RUNBOOK_SELECT_PROMPT + "\nQuestion: " + normalized
|
||||
resolver_raw = await call_llm(
|
||||
prompts.RUNBOOK_SELECT_SYSTEM,
|
||||
resolver_prompt,
|
||||
context="AllowedRunbooks:\n" + "\n".join(runbook_paths),
|
||||
model=plan.fast_model,
|
||||
tag="runbook_select",
|
||||
)
|
||||
resolver = _parse_json_block(resolver_raw, fallback={})
|
||||
candidate = resolver.get("path") if isinstance(resolver.get("path"), str) else None
|
||||
if candidate and candidate in runbook_paths:
|
||||
enforce_prompt = prompts.RUNBOOK_ENFORCE_PROMPT.format(path=candidate)
|
||||
reply = await call_llm(
|
||||
prompts.RUNBOOK_ENFORCE_SYSTEM,
|
||||
enforce_prompt + "\nAnswer: " + reply,
|
||||
context=context,
|
||||
model=plan.model,
|
||||
tag="runbook_enforce",
|
||||
)
|
||||
|
||||
if _needs_focus_fix(normalized, reply, classify):
|
||||
if observer:
|
||||
@ -746,6 +776,7 @@ def _select_chunks(
|
||||
head = chunks[0]
|
||||
selected.append(head)
|
||||
keyword_hits: list[dict[str, Any]] = []
|
||||
raw_keywords = [kw.lower() for kw in (keywords or []) if kw]
|
||||
focused = _focused_keywords(keywords or [])
|
||||
if focused:
|
||||
lowered = [kw.lower() for kw in focused if kw]
|
||||
@ -753,6 +784,13 @@ def _select_chunks(
|
||||
text = item.get("text", "").lower()
|
||||
if any(kw in text for kw in lowered):
|
||||
keyword_hits.append(item)
|
||||
if raw_keywords:
|
||||
for item in ranked:
|
||||
if len(keyword_hits) >= plan.chunk_top:
|
||||
break
|
||||
text = item.get("text", "").lower()
|
||||
if any(kw in text for kw in raw_keywords):
|
||||
keyword_hits.append(item)
|
||||
for item in keyword_hits:
|
||||
if len(selected) >= plan.chunk_top:
|
||||
break
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user