atlasbot: simplify insight guard inputs

This commit is contained in:
Brad Stein 2026-02-04 22:23:52 -03:00
parent 6def6c167c
commit 7174f42895

View File

@ -46,6 +46,17 @@ class AnswerResult:
meta: dict[str, Any] meta: dict[str, Any]
@dataclass(frozen=True)
class InsightGuardInput:
question: str
reply: str
classify: dict[str, Any]
context: str
plan: "ModePlan"
call_llm: Callable[..., Awaitable[str]]
facts: list[str]
@dataclass @dataclass
class EvidenceItem: class EvidenceItem:
path: str path: str
@ -783,13 +794,15 @@ class AnswerEngine:
if observer: if observer:
observer("insight_guard", "checking for concrete signals") observer("insight_guard", "checking for concrete signals")
reply = await _apply_insight_guard( reply = await _apply_insight_guard(
normalized, InsightGuardInput(
reply, question=normalized,
classify, reply=reply,
context, classify=classify,
plan, context=context,
call_llm, plan=plan,
metric_facts or key_facts, call_llm=call_llm,
facts=metric_facts or key_facts,
)
) )
if plan.use_critic: if plan.use_critic:
@ -2447,36 +2460,28 @@ def _should_use_insight_guard(classify: dict[str, Any]) -> bool:
return style == "insightful" or qtype in {"open_ended", "planning"} return style == "insightful" or qtype in {"open_ended", "planning"}
async def _apply_insight_guard( async def _apply_insight_guard(inputs: InsightGuardInput) -> str:
question: str, if not inputs.reply or not _should_use_insight_guard(inputs.classify):
reply: str, return inputs.reply
classify: dict[str, Any], guard_prompt = prompts.INSIGHT_GUARD_PROMPT.format(question=inputs.question, answer=inputs.reply)
context: str, guard_raw = await inputs.call_llm(
plan: ModePlan,
call_llm: Callable[..., Awaitable[str]],
facts: list[str],
) -> str:
if not reply or not _should_use_insight_guard(classify):
return reply
guard_prompt = prompts.INSIGHT_GUARD_PROMPT.format(question=question, answer=reply)
guard_raw = await call_llm(
prompts.INSIGHT_GUARD_SYSTEM, prompts.INSIGHT_GUARD_SYSTEM,
guard_prompt, guard_prompt,
context=context, context=inputs.context,
model=plan.fast_model, model=inputs.plan.fast_model,
tag="insight_guard", tag="insight_guard",
) )
guard = _parse_json_block(guard_raw, fallback={}) guard = _parse_json_block(guard_raw, fallback={})
if guard.get("ok") is True: if guard.get("ok") is True:
return reply return inputs.reply
fix_prompt = prompts.INSIGHT_FIX_PROMPT.format(question=question, answer=reply) fix_prompt = prompts.INSIGHT_FIX_PROMPT.format(question=inputs.question, answer=inputs.reply)
if facts: if inputs.facts:
fix_prompt = fix_prompt + "\nFacts:\n" + "\n".join(facts[:6]) fix_prompt = fix_prompt + "\nFacts:\n" + "\n".join(inputs.facts[:6])
return await call_llm( return await inputs.call_llm(
prompts.INSIGHT_FIX_SYSTEM, prompts.INSIGHT_FIX_SYSTEM,
fix_prompt, fix_prompt,
context=context, context=inputs.context,
model=plan.model, model=inputs.plan.model,
tag="insight_fix", tag="insight_fix",
) )