ai: use atlasbot internal answers before LLM

This commit is contained in:
Brad Stein 2026-01-26 22:44:16 -03:00
parent 6e970c3b56
commit caf6d87c5d
2 changed files with 27 additions and 1 deletions

View File

@ -23,6 +23,11 @@ def register(app) -> None:
if not user_message:
return jsonify({"error": "message required"}), 400
atlasbot_reply = _atlasbot_answer(user_message)
if atlasbot_reply:
elapsed_ms = int((time.time() - started) * 1000)
return jsonify({"reply": atlasbot_reply, "latency_ms": elapsed_ms, "source": "atlasbot"})
messages: list[dict[str, str]] = []
if settings.AI_CHAT_SYSTEM_PROMPT:
messages.append({"role": "system", "content": settings.AI_CHAT_SYSTEM_PROMPT})
@ -58,6 +63,25 @@ def register(app) -> None:
_start_keep_warm()
def _atlasbot_answer(message: str) -> str:
endpoint = settings.AI_ATLASBOT_ENDPOINT
if not endpoint:
return ""
headers: dict[str, str] = {}
if settings.AI_ATLASBOT_TOKEN:
headers["X-Internal-Token"] = settings.AI_ATLASBOT_TOKEN
try:
with httpx.Client(timeout=settings.AI_ATLASBOT_TIMEOUT_SEC) as client:
resp = client.post(endpoint, json={"prompt": message}, headers=headers)
if resp.status_code != 200:
return ""
data = resp.json()
answer = (data.get("answer") or "").strip()
return answer
except (httpx.RequestError, ValueError):
return ""
def _discover_ai_meta() -> dict[str, str]:
meta = {
"node": settings.AI_NODE_NAME,
@ -136,4 +160,3 @@ def _start_keep_warm() -> None:
continue
threading.Thread(target=loop, daemon=True, name="ai-keep-warm").start()

View File

@ -26,6 +26,9 @@ AI_CHAT_SYSTEM_PROMPT = os.getenv(
"You are the Titan Lab assistant for bstein.dev. Be concise and helpful.",
)
AI_CHAT_TIMEOUT_SEC = float(os.getenv("AI_CHAT_TIMEOUT_SEC", "20"))
AI_ATLASBOT_ENDPOINT = os.getenv("AI_ATLASBOT_ENDPOINT", "").strip()
AI_ATLASBOT_TOKEN = os.getenv("AI_ATLASBOT_TOKEN", "").strip()
AI_ATLASBOT_TIMEOUT_SEC = float(os.getenv("AI_ATLASBOT_TIMEOUT_SEC", "5"))
AI_NODE_NAME = os.getenv("AI_CHAT_NODE_NAME") or os.getenv("AI_NODE_NAME") or "ai-cluster"
AI_GPU_DESC = os.getenv("AI_CHAT_GPU_DESC") or "local GPU (dynamic)"
AI_PUBLIC_ENDPOINT = os.getenv("AI_PUBLIC_CHAT_ENDPOINT", "https://chat.ai.bstein.dev/api/chat")