448 lines
19 KiB
Python
448 lines
19 KiB
Python
from __future__ import annotations
|
|
|
|
from typing import Any
|
|
|
|
from .format_b import *
|
|
def _append_signals(lines: list[str], summary: dict[str, Any]) -> None:
|
|
signals = summary.get("signals") if isinstance(summary.get("signals"), list) else []
|
|
if not signals:
|
|
return
|
|
lines.append("signals:")
|
|
for entry in signals[:8]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
scope = entry.get("scope") or ""
|
|
target = entry.get("target") or ""
|
|
metric = entry.get("metric") or ""
|
|
current = entry.get("current")
|
|
delta = entry.get("delta_pct")
|
|
severity = entry.get("severity") or ""
|
|
detail = f"{scope}:{target} {metric}={current}"
|
|
if delta is not None:
|
|
detail += f" delta={delta}%"
|
|
if severity:
|
|
detail += f" severity={severity}"
|
|
lines.append(f"- {detail}")
|
|
|
|
|
|
def _append_profiles(lines: list[str], summary: dict[str, Any]) -> None: # noqa: C901
|
|
profiles = summary.get("profiles") if isinstance(summary.get("profiles"), dict) else {}
|
|
if not profiles:
|
|
return
|
|
nodes = profiles.get("nodes") if isinstance(profiles.get("nodes"), list) else []
|
|
namespaces = profiles.get("namespaces") if isinstance(profiles.get("namespaces"), list) else []
|
|
workloads = profiles.get("workloads") if isinstance(profiles.get("workloads"), list) else []
|
|
if nodes:
|
|
lines.append("node_profiles:")
|
|
for entry in nodes[:3]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
lines.append(
|
|
f"- {entry.get('node')}: load={entry.get('load_index')} cpu={entry.get('cpu')} ram={entry.get('ram')} "
|
|
f"pods={entry.get('pods_total')} hw={entry.get('hardware')}"
|
|
)
|
|
if namespaces:
|
|
lines.append("namespace_profiles:")
|
|
for entry in namespaces[:3]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
lines.append(
|
|
f"- {entry.get('namespace')}: pods={entry.get('pods_total')} cpu={entry.get('cpu_usage')} "
|
|
f"mem={entry.get('mem_usage')} primary={entry.get('primary_node')}"
|
|
)
|
|
if workloads:
|
|
lines.append("workload_profiles:")
|
|
for entry in workloads[:3]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
lines.append(
|
|
f"- {entry.get('namespace')}/{entry.get('workload')}: pods={entry.get('pods_total')} "
|
|
f"running={entry.get('pods_running')} node={entry.get('primary_node')}"
|
|
)
|
|
|
|
|
|
def _append_units_windows(lines: list[str], summary: dict[str, Any]) -> None:
|
|
metrics = summary.get("metrics") if isinstance(summary.get("metrics"), dict) else {}
|
|
units = metrics.get("units") if isinstance(metrics.get("units"), dict) else {}
|
|
windows = metrics.get("windows") if isinstance(metrics.get("windows"), dict) else {}
|
|
if units:
|
|
lines.append("units: " + _format_kv_map(units))
|
|
else:
|
|
lines.append("units: cpu_pct, ram_pct, net=bytes_per_sec, io=bytes_per_sec")
|
|
if windows:
|
|
lines.append("windows: " + _format_kv_map(windows))
|
|
else:
|
|
lines.append("windows: rates=5m, restarts=1h")
|
|
|
|
|
|
def _append_node_load_summary(lines: list[str], summary: dict[str, Any]) -> None:
|
|
node_load = summary.get("node_load_summary")
|
|
if not isinstance(node_load, dict) or not node_load:
|
|
return
|
|
hardware_by_node = summary.get("hardware_by_node")
|
|
hardware_by_node = hardware_by_node if isinstance(hardware_by_node, dict) else {}
|
|
top = node_load.get("top")
|
|
if isinstance(top, list) and top:
|
|
parts = []
|
|
for entry in top[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
node = entry.get("node") or ""
|
|
load = entry.get("load_index")
|
|
cpu = entry.get("cpu")
|
|
ram = entry.get("ram")
|
|
io = entry.get("io")
|
|
net = entry.get("net")
|
|
pods_total = entry.get("pods_total")
|
|
label = f"{node} idx={_format_float(load)}"
|
|
if node and node in hardware_by_node:
|
|
label += f" hw={hardware_by_node.get(node)}"
|
|
if isinstance(pods_total, (int, float)):
|
|
label += f" pods={int(pods_total)}"
|
|
label += f" cpu={_format_float(cpu)} ram={_format_float(ram)}"
|
|
label += f" io={_format_rate_bytes(io)} net={_format_rate_bytes(net)}"
|
|
parts.append(label)
|
|
if parts:
|
|
lines.append("node_load_top: " + "; ".join(parts))
|
|
outliers = node_load.get("outliers")
|
|
if isinstance(outliers, list) and outliers:
|
|
names = [entry.get("node") for entry in outliers if isinstance(entry, dict)]
|
|
names = [name for name in names if isinstance(name, str) and name]
|
|
if names:
|
|
lines.append("node_load_outliers: " + _format_names(names))
|
|
|
|
|
|
def _append_hardware_usage(lines: list[str], summary: dict[str, Any]) -> None: # noqa: C901
|
|
usage = summary.get("hardware_usage_avg")
|
|
if not isinstance(usage, list) or not usage:
|
|
return
|
|
parts = []
|
|
tops: dict[str, tuple[str, float]] = {}
|
|
for entry in usage[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
hardware = entry.get("hardware")
|
|
load = entry.get("load_index")
|
|
cpu = entry.get("cpu")
|
|
ram = entry.get("ram")
|
|
io = entry.get("io")
|
|
net = entry.get("net")
|
|
if not hardware:
|
|
continue
|
|
label = f"{hardware} idx={_format_float(load)}"
|
|
label += f" cpu={_format_float(cpu)} ram={_format_float(ram)}"
|
|
label += f" io={_format_rate_bytes(io)} net={_format_rate_bytes(net)}"
|
|
parts.append(label)
|
|
for metric, value in (("cpu", cpu), ("ram", ram), ("io", io), ("net", net), ("load", load)):
|
|
if isinstance(value, (int, float)):
|
|
current = tops.get(metric)
|
|
if current is None or float(value) > current[1]:
|
|
tops[metric] = (hardware, float(value))
|
|
if parts:
|
|
lines.append("hardware_usage_avg: " + "; ".join(parts))
|
|
if tops:
|
|
top_parts = []
|
|
for metric in ("cpu", "ram", "io", "net", "load"):
|
|
entry = tops.get(metric)
|
|
if not entry:
|
|
continue
|
|
hardware, value = entry
|
|
if metric in {"io", "net"}:
|
|
rendered = _format_rate_bytes(value)
|
|
else:
|
|
rendered = _format_float(value)
|
|
top_parts.append(f"{metric}={hardware} ({rendered})")
|
|
if top_parts:
|
|
lines.append("hardware_usage_top: " + "; ".join(top_parts))
|
|
|
|
|
|
def _append_cluster_watchlist(lines: list[str], summary: dict[str, Any]) -> None:
|
|
watchlist = summary.get("cluster_watchlist")
|
|
if not isinstance(watchlist, list) or not watchlist:
|
|
return
|
|
lines.append("cluster_watchlist: " + "; ".join(watchlist))
|
|
|
|
|
|
def _append_baseline_deltas(lines: list[str], summary: dict[str, Any]) -> None:
|
|
deltas = summary.get("baseline_deltas") if isinstance(summary.get("baseline_deltas"), dict) else {}
|
|
nodes = deltas.get("nodes") if isinstance(deltas.get("nodes"), dict) else {}
|
|
namespaces = deltas.get("namespaces") if isinstance(deltas.get("namespaces"), dict) else {}
|
|
for scope, block in (("nodes", nodes), ("namespaces", namespaces)):
|
|
if not isinstance(block, dict):
|
|
continue
|
|
for metric, entries in block.items():
|
|
if not isinstance(entries, list) or not entries:
|
|
continue
|
|
parts: list[str] = []
|
|
for entry in entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
name = entry.get("node") if scope == "nodes" else entry.get("namespace")
|
|
delta = entry.get("delta")
|
|
severity = entry.get("severity")
|
|
if not isinstance(name, str) or not name or not isinstance(delta, (int, float)):
|
|
continue
|
|
suffix = f" ({severity})" if isinstance(severity, str) and severity else ""
|
|
parts.append(f"{name}={_format_float(delta)}%{suffix}")
|
|
if parts:
|
|
lines.append(f"{scope}_baseline_delta_{metric}: " + "; ".join(parts))
|
|
|
|
|
|
def _append_pod_issue_summary(lines: list[str], summary: dict[str, Any]) -> None:
|
|
issues = summary.get("pod_issue_summary") if isinstance(summary.get("pod_issue_summary"), dict) else {}
|
|
waiting = issues.get("waiting_reasons_top") if isinstance(issues.get("waiting_reasons_top"), list) else []
|
|
phases = issues.get("phase_reasons_top") if isinstance(issues.get("phase_reasons_top"), list) else []
|
|
namespace_issue = issues.get("namespace_issue_top") if isinstance(issues.get("namespace_issue_top"), dict) else {}
|
|
waiting_line = _reason_line(waiting, "pod_waiting_reasons_top")
|
|
if waiting_line:
|
|
lines.append(waiting_line)
|
|
phase_line = _reason_line(phases, "pod_phase_reasons_top")
|
|
if phase_line:
|
|
lines.append(phase_line)
|
|
if namespace_issue:
|
|
_append_namespace_issue_lines(lines, namespace_issue)
|
|
|
|
|
|
def _reason_line(entries: list[dict[str, Any]], label: str) -> str:
|
|
parts = []
|
|
for entry in entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
reason = entry.get("reason")
|
|
count = entry.get("count")
|
|
if reason:
|
|
parts.append(f"{reason}={count}")
|
|
if parts:
|
|
return f"{label}: " + "; ".join(parts)
|
|
return ""
|
|
|
|
|
|
def _append_namespace_issue_lines(lines: list[str], namespace_issue: dict[str, Any]) -> None:
|
|
for key, entries in namespace_issue.items():
|
|
if not isinstance(entries, list) or not entries:
|
|
continue
|
|
parts: list[str] = []
|
|
for entry in entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
ns = entry.get("namespace")
|
|
value = entry.get("value")
|
|
if ns:
|
|
parts.append(f"{ns}={value}")
|
|
if parts:
|
|
lines.append(f"namespace_issue_top_{key}: " + "; ".join(parts))
|
|
|
|
|
|
def _build_cluster_watchlist(summary: dict[str, Any]) -> dict[str, Any]:
|
|
items: list[str] = []
|
|
nodes_summary = summary.get("nodes_summary") if isinstance(summary.get("nodes_summary"), dict) else {}
|
|
not_ready = int(nodes_summary.get("not_ready") or 0)
|
|
if not_ready > 0:
|
|
items.append(f"not_ready_nodes={not_ready}")
|
|
pressure = summary.get("pressure_nodes") if isinstance(summary.get("pressure_nodes"), dict) else {}
|
|
pressure_nodes = pressure.get("names") if isinstance(pressure.get("names"), list) else []
|
|
if pressure_nodes:
|
|
items.append(f"pressure_nodes={len(pressure_nodes)}")
|
|
pod_issues = summary.get("pod_issues") if isinstance(summary.get("pod_issues"), dict) else {}
|
|
pending_over = int(pod_issues.get("pending_over_15m") or 0)
|
|
if pending_over > 0:
|
|
items.append(f"pods_pending_over_15m={pending_over}")
|
|
workloads = summary.get("workloads_health") if isinstance(summary.get("workloads_health"), dict) else {}
|
|
deployments = workloads.get("deployments") if isinstance(workloads.get("deployments"), dict) else {}
|
|
statefulsets = workloads.get("statefulsets") if isinstance(workloads.get("statefulsets"), dict) else {}
|
|
daemonsets = workloads.get("daemonsets") if isinstance(workloads.get("daemonsets"), dict) else {}
|
|
total_not_ready = int(deployments.get("not_ready") or 0) + int(statefulsets.get("not_ready") or 0) + int(daemonsets.get("not_ready") or 0)
|
|
if total_not_ready > 0:
|
|
items.append(f"workloads_not_ready={total_not_ready}")
|
|
flux = summary.get("flux") if isinstance(summary.get("flux"), dict) else {}
|
|
flux_not_ready = int(flux.get("not_ready") or 0)
|
|
if flux_not_ready > 0:
|
|
items.append(f"flux_not_ready={flux_not_ready}")
|
|
pvc_usage = summary.get("pvc_usage_top") if isinstance(summary.get("pvc_usage_top"), list) else []
|
|
high_pvc = [
|
|
entry for entry in pvc_usage if isinstance(entry, dict) and (entry.get("value") or 0) >= PVC_USAGE_CRITICAL
|
|
]
|
|
if high_pvc:
|
|
items.append(f"pvc_usage>={PVC_USAGE_CRITICAL}%")
|
|
return {"cluster_watchlist": items} if items else {}
|
|
|
|
|
|
def _capacity_ratio_parts(entries: list[dict[str, Any]], ratio_key: str, usage_key: str, req_key: str) -> list[str]:
|
|
parts: list[str] = []
|
|
for entry in entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
ns = entry.get("namespace") or ""
|
|
ratio = entry.get(ratio_key)
|
|
usage = entry.get(usage_key)
|
|
req = entry.get(req_key)
|
|
if ns:
|
|
parts.append(
|
|
f"{ns}={_format_float(ratio)} (usage={_format_float(usage)} req={_format_float(req)})"
|
|
)
|
|
return parts
|
|
|
|
|
|
def _capacity_headroom_parts(entries: list[dict[str, Any]]) -> list[str]:
|
|
parts: list[str] = []
|
|
for entry in entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
ns = entry.get("namespace") or ""
|
|
headroom = entry.get("headroom")
|
|
if ns:
|
|
parts.append(f"{ns}={_format_float(headroom)}")
|
|
return parts
|
|
|
|
|
|
def _append_namespace_capacity_summary( # noqa: C901
|
|
lines: list[str],
|
|
summary: dict[str, Any],
|
|
) -> None:
|
|
cap = summary.get("namespace_capacity_summary")
|
|
if not isinstance(cap, dict) or not cap:
|
|
return
|
|
cpu_ratio = cap.get("cpu_ratio_top")
|
|
if isinstance(cpu_ratio, list):
|
|
parts = _capacity_ratio_parts(cpu_ratio, "cpu_usage_ratio", "cpu_usage", "cpu_requests")
|
|
if parts:
|
|
lines.append("namespace_cpu_ratio_top: " + "; ".join(parts))
|
|
mem_ratio = cap.get("mem_ratio_top")
|
|
if isinstance(mem_ratio, list):
|
|
parts = _capacity_ratio_parts(mem_ratio, "mem_usage_ratio", "mem_usage", "mem_requests")
|
|
if parts:
|
|
lines.append("namespace_mem_ratio_top: " + "; ".join(parts))
|
|
cpu_headroom = cap.get("cpu_headroom_low")
|
|
if isinstance(cpu_headroom, list):
|
|
parts = _capacity_headroom_parts(cpu_headroom)
|
|
if parts:
|
|
lines.append("namespace_cpu_headroom_low: " + "; ".join(parts))
|
|
mem_headroom = cap.get("mem_headroom_low")
|
|
if isinstance(mem_headroom, list):
|
|
parts = _capacity_headroom_parts(mem_headroom)
|
|
if parts:
|
|
lines.append("namespace_mem_headroom_low: " + "; ".join(parts))
|
|
cpu_over = cap.get("cpu_overcommitted")
|
|
mem_over = cap.get("mem_overcommitted")
|
|
if cpu_over is not None or mem_over is not None:
|
|
lines.append(f"namespace_overcommitted: cpu={cpu_over} mem={mem_over}")
|
|
cpu_over_names = cap.get("cpu_overcommitted_names")
|
|
if isinstance(cpu_over_names, list) and cpu_over_names:
|
|
names = [name for name in cpu_over_names if isinstance(name, str) and name]
|
|
if names:
|
|
lines.append("namespace_cpu_overcommitted_names: " + _format_names(names))
|
|
mem_over_names = cap.get("mem_overcommitted_names")
|
|
if isinstance(mem_over_names, list) and mem_over_names:
|
|
names = [name for name in mem_over_names if isinstance(name, str) and name]
|
|
if names:
|
|
lines.append("namespace_mem_overcommitted_names: " + _format_names(names))
|
|
|
|
|
|
def _append_workloads_by_namespace(lines: list[str], summary: dict[str, Any]) -> None:
|
|
workloads = summary.get("workloads")
|
|
if not isinstance(workloads, list) or not workloads:
|
|
return
|
|
by_ns: dict[str, list[dict[str, Any]]] = {}
|
|
for item in workloads:
|
|
if not isinstance(item, dict):
|
|
continue
|
|
ns = item.get("namespace") or ""
|
|
name = item.get("workload") or ""
|
|
if not ns or not name:
|
|
continue
|
|
by_ns.setdefault(ns, []).append(item)
|
|
for ns, items in sorted(by_ns.items()):
|
|
items.sort(
|
|
key=lambda item: (-int(item.get("pods_total") or 0), item.get("workload") or "")
|
|
)
|
|
parts = []
|
|
for entry in items[:2]:
|
|
name = entry.get("workload") or ""
|
|
pods = entry.get("pods_total")
|
|
primary = entry.get("primary_node")
|
|
label = f"{name}({pods})" if pods is not None else name
|
|
if primary:
|
|
label = f"{label}@{primary}"
|
|
if label:
|
|
parts.append(label)
|
|
if parts:
|
|
lines.append(f"workloads_top_{ns}: " + "; ".join(parts))
|
|
|
|
|
|
def _append_lexicon(lines: list[str], summary: dict[str, Any]) -> None:
|
|
lexicon = summary.get("lexicon")
|
|
if not isinstance(lexicon, dict):
|
|
return
|
|
terms = lexicon.get("terms") if isinstance(lexicon.get("terms"), list) else []
|
|
aliases = lexicon.get("aliases") if isinstance(lexicon.get("aliases"), dict) else {}
|
|
for entry in terms[:8]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
term = entry.get("term")
|
|
meaning = entry.get("meaning")
|
|
if term and meaning:
|
|
lines.append(f"lexicon_term: {term} => {meaning}")
|
|
for key, value in list(aliases.items())[:6]:
|
|
if key and value:
|
|
lines.append(f"lexicon_alias: {key} => {value}")
|
|
|
|
|
|
def _append_cross_stats(lines: list[str], summary: dict[str, Any]) -> None: # noqa: C901
|
|
cross_stats = summary.get("cross_stats")
|
|
if not isinstance(cross_stats, dict):
|
|
return
|
|
node_entries = cross_stats.get("node_metric_top") if isinstance(cross_stats.get("node_metric_top"), list) else []
|
|
for entry in node_entries[:10]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
metric = entry.get("metric")
|
|
node = entry.get("node")
|
|
value = entry.get("value")
|
|
cpu = entry.get("cpu")
|
|
ram = entry.get("ram")
|
|
net = entry.get("net")
|
|
io = entry.get("io")
|
|
pods = entry.get("pods_total")
|
|
if metric and node:
|
|
parts = [
|
|
f"value={_format_float(value)}",
|
|
f"cpu={_format_float(cpu)}",
|
|
f"ram={_format_float(ram)}",
|
|
f"net={_format_float(net)}",
|
|
f"io={_format_float(io)}",
|
|
]
|
|
if pods is not None:
|
|
parts.append(f"pods={pods}")
|
|
lines.append(f"cross_node_{metric}: {node} " + " ".join(parts))
|
|
ns_entries = cross_stats.get("namespace_metric_top") if isinstance(cross_stats.get("namespace_metric_top"), list) else []
|
|
for entry in ns_entries[:10]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
metric = entry.get("metric")
|
|
namespace = entry.get("namespace")
|
|
value = entry.get("value")
|
|
pods = entry.get("pods_total")
|
|
cpu_ratio = entry.get("cpu_ratio")
|
|
mem_ratio = entry.get("mem_ratio")
|
|
if metric and namespace:
|
|
parts = [
|
|
f"value={_format_float(value)}",
|
|
f"cpu_ratio={_format_float(cpu_ratio)}",
|
|
f"mem_ratio={_format_float(mem_ratio)}",
|
|
]
|
|
if pods is not None:
|
|
parts.append(f"pods={pods}")
|
|
lines.append(f"cross_namespace_{metric}: {namespace} " + " ".join(parts))
|
|
pvc_entries = cross_stats.get("pvc_top") if isinstance(cross_stats.get("pvc_top"), list) else []
|
|
for entry in pvc_entries[:5]:
|
|
if not isinstance(entry, dict):
|
|
continue
|
|
namespace = entry.get("namespace")
|
|
pvc = entry.get("pvc")
|
|
used = entry.get("used_percent")
|
|
if namespace and pvc:
|
|
lines.append(f"cross_pvc_usage: {namespace}/{pvc} used={_format_float(used)}")
|
|
|
|
|
|
__all__ = [name for name in globals() if not name.startswith("__")]
|