From 39a914effd8ae9d35f9726befacc19894bbe472c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 20 Dec 2025 14:24:52 -0300 Subject: [PATCH] ai-llm: use phi3 mini model --- services/ai-llm/deployment.yaml | 2 +- services/bstein-dev-home/backend-deployment.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/ai-llm/deployment.yaml b/services/ai-llm/deployment.yaml index 71a54ed..25a7f2f 100644 --- a/services/ai-llm/deployment.yaml +++ b/services/ai-llm/deployment.yaml @@ -35,7 +35,7 @@ spec: - name: OLLAMA_MODELS value: /root/.ollama - name: OLLAMA_MODEL - value: phi3:mini-4k-instruct-q4_0 + value: phi3:mini command: - /bin/sh - -c diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index c80a9ac..4044efe 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -28,7 +28,7 @@ spec: - name: AI_CHAT_API value: http://ollama.ai.svc.cluster.local:11434 - name: AI_CHAT_MODEL - value: phi3:mini-4k-instruct-q4_0 + value: phi3:mini - name: AI_CHAT_TIMEOUT_SEC value: "20" ports: