atlasbot: improve fast fallback and usage filtering
This commit is contained in:
parent
7c0a25a0eb
commit
da94cc6f97
@ -16,7 +16,7 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: atlasbot
|
app: atlasbot
|
||||||
annotations:
|
annotations:
|
||||||
checksum/atlasbot-configmap: manual-atlasbot-94
|
checksum/atlasbot-configmap: manual-atlasbot-95
|
||||||
vault.hashicorp.com/agent-inject: "true"
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
vault.hashicorp.com/role: "comms"
|
vault.hashicorp.com/role: "comms"
|
||||||
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
||||||
|
|||||||
@ -923,7 +923,7 @@ def _nodes_by_arch(inventory: list[dict[str, Any]]) -> dict[str, list[str]]:
|
|||||||
grouped[(node.get("arch") or "unknown")].append(node["name"])
|
grouped[(node.get("arch") or "unknown")].append(node["name"])
|
||||||
return {k: sorted(v) for k, v in grouped.items()}
|
return {k: sorted(v) for k, v in grouped.items()}
|
||||||
|
|
||||||
def _node_usage_table(metrics: dict[str, Any]) -> list[dict[str, Any]]:
|
def _node_usage_table(metrics: dict[str, Any], *, allowed_nodes: set[str] | None = None) -> list[dict[str, Any]]:
|
||||||
usage = metrics.get("node_usage") if isinstance(metrics.get("node_usage"), dict) else {}
|
usage = metrics.get("node_usage") if isinstance(metrics.get("node_usage"), dict) else {}
|
||||||
per_node: dict[str, dict[str, Any]] = {}
|
per_node: dict[str, dict[str, Any]] = {}
|
||||||
for metric_name, entries in usage.items() if isinstance(usage, dict) else []:
|
for metric_name, entries in usage.items() if isinstance(usage, dict) else []:
|
||||||
@ -935,6 +935,8 @@ def _node_usage_table(metrics: dict[str, Any]) -> list[dict[str, Any]]:
|
|||||||
node = entry.get("node")
|
node = entry.get("node")
|
||||||
if not isinstance(node, str) or not node:
|
if not isinstance(node, str) or not node:
|
||||||
continue
|
continue
|
||||||
|
if allowed_nodes and node not in allowed_nodes:
|
||||||
|
continue
|
||||||
per_node.setdefault(node, {})[metric_name] = entry.get("value")
|
per_node.setdefault(node, {})[metric_name] = entry.get("value")
|
||||||
return [{"node": node, **vals} for node, vals in sorted(per_node.items())]
|
return [{"node": node, **vals} for node, vals in sorted(per_node.items())]
|
||||||
|
|
||||||
@ -1139,7 +1141,8 @@ def facts_context(
|
|||||||
if items:
|
if items:
|
||||||
lines.append(f"- top_restarts_1h: {', '.join(items)}")
|
lines.append(f"- top_restarts_1h: {', '.join(items)}")
|
||||||
|
|
||||||
usage_table = _node_usage_table(metrics)
|
allowed_nodes = {node.get("name") for node in inv if isinstance(node, dict) and node.get("name")}
|
||||||
|
usage_table = _node_usage_table(metrics, allowed_nodes=allowed_nodes or None)
|
||||||
if usage_table:
|
if usage_table:
|
||||||
lines.append("- node_usage (cpu/ram/net/io):")
|
lines.append("- node_usage (cpu/ram/net/io):")
|
||||||
for entry in usage_table:
|
for entry in usage_table:
|
||||||
@ -3906,6 +3909,31 @@ def _is_quantitative_prompt(prompt: str) -> bool:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _is_list_prompt(prompt: str) -> bool:
|
||||||
|
q = normalize_query(prompt)
|
||||||
|
if not q:
|
||||||
|
return False
|
||||||
|
if any(phrase in q for phrase in ("list", "names", "name", "show")):
|
||||||
|
return True
|
||||||
|
if any(phrase in q for phrase in ("which nodes", "what nodes", "what are the nodes")):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _needs_full_fact_pack(prompt: str) -> bool:
|
||||||
|
q = normalize_query(prompt)
|
||||||
|
tokens = set(_tokens(prompt))
|
||||||
|
if _is_quantitative_prompt(prompt) or _is_list_prompt(prompt):
|
||||||
|
return True
|
||||||
|
if tokens & {"workload", "pods", "namespace"}:
|
||||||
|
return True
|
||||||
|
if _NAME_INDEX and tokens & _NAME_INDEX:
|
||||||
|
return True
|
||||||
|
if any(phrase in q for phrase in ("where does", "where is", "where are", "running", "run on", "hosted on", "primary node")):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def _open_ended_fast_single(
|
def _open_ended_fast_single(
|
||||||
prompt: str,
|
prompt: str,
|
||||||
*,
|
*,
|
||||||
@ -3937,6 +3965,8 @@ def _open_ended_fast_single(
|
|||||||
fallback = _fallback_fact_answer(prompt, context)
|
fallback = _fallback_fact_answer(prompt, context)
|
||||||
if fallback and (_is_quantitative_prompt(prompt) or not _has_body_lines(reply)):
|
if fallback and (_is_quantitative_prompt(prompt) or not _has_body_lines(reply)):
|
||||||
reply = fallback
|
reply = fallback
|
||||||
|
if not _has_body_lines(reply):
|
||||||
|
reply = "I don't have enough data in the current snapshot to answer that."
|
||||||
if state:
|
if state:
|
||||||
state.update("done", step=_open_ended_total_steps("fast"))
|
state.update("done", step=_open_ended_total_steps("fast"))
|
||||||
return _ensure_scores(reply)
|
return _ensure_scores(reply)
|
||||||
@ -3967,7 +3997,7 @@ def _open_ended_fast(
|
|||||||
)
|
)
|
||||||
selected_meta = _fact_pack_meta(selected_lines)
|
selected_meta = _fact_pack_meta(selected_lines)
|
||||||
selected_pack = _fact_pack_text(selected_lines, selected_meta)
|
selected_pack = _fact_pack_text(selected_lines, selected_meta)
|
||||||
if _is_quantitative_prompt(prompt) or not selected_lines:
|
if _needs_full_fact_pack(prompt) or not selected_lines:
|
||||||
selected_pack = fact_pack
|
selected_pack = fact_pack
|
||||||
if state:
|
if state:
|
||||||
state.total_steps = _open_ended_total_steps("fast")
|
state.total_steps = _open_ended_total_steps("fast")
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user