From 497ac908588132d096cea70443f426cf2d164e87 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 20 Dec 2025 14:24:52 -0300 Subject: [PATCH] ai-llm: use phi3 mini model --- services/ai-llm/deployment.yaml | 2 +- services/bstein-dev-home/backend-deployment.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/ai-llm/deployment.yaml b/services/ai-llm/deployment.yaml index 71a54ed1..25a7f2fa 100644 --- a/services/ai-llm/deployment.yaml +++ b/services/ai-llm/deployment.yaml @@ -35,7 +35,7 @@ spec: - name: OLLAMA_MODELS value: /root/.ollama - name: OLLAMA_MODEL - value: phi3:mini-4k-instruct-q4_0 + value: phi3:mini command: - /bin/sh - -c diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index c80a9acc..4044efee 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -28,7 +28,7 @@ spec: - name: AI_CHAT_API value: http://ollama.ai.svc.cluster.local:11434 - name: AI_CHAT_MODEL - value: phi3:mini-4k-instruct-q4_0 + value: phi3:mini - name: AI_CHAT_TIMEOUT_SEC value: "20" ports: