atlasbot: fix metric fact detection
This commit is contained in:
parent
9827db8a49
commit
d8249bba37
@ -260,7 +260,7 @@ class AnswerEngine:
|
|||||||
scored = await _score_chunks(call_llm, chunks, normalized, sub_questions, plan)
|
scored = await _score_chunks(call_llm, chunks, normalized, sub_questions, plan)
|
||||||
selected = _select_chunks(chunks, scored, plan, keyword_tokens)
|
selected = _select_chunks(chunks, scored, plan, keyword_tokens)
|
||||||
key_facts = _key_fact_lines(summary_lines, keyword_tokens)
|
key_facts = _key_fact_lines(summary_lines, keyword_tokens)
|
||||||
metric_facts = [line for line in key_facts if re.search(r"\\d", line)]
|
metric_facts = [line for line in key_facts if re.search(r"\d", line)]
|
||||||
if self._settings.debug_pipeline:
|
if self._settings.debug_pipeline:
|
||||||
scored_preview = sorted(
|
scored_preview = sorted(
|
||||||
[{"id": c["id"], "score": scored.get(c["id"], 0.0), "summary": c["summary"]} for c in chunks],
|
[{"id": c["id"], "score": scored.get(c["id"], 0.0), "summary": c["summary"]} for c in chunks],
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user