Files
docker-compose/Local Homelab/ollama/compose/compose.yml
2025-12-24 00:50:47 -05:00

87 lines
2.0 KiB
YAML

version: '3.8'
services:
# --- THE BRAIN (Backend Only - P52) ---
ollama:
image: ollama/ollama:rocm
networks:
- ai_internal
deploy:
replicas: 1
placement:
constraints:
- node.labels.gpu == true
restart_policy:
condition: on-failure
user: root
volumes:
# P52 Bind Mount (Already working)
- /mnt/local-ssd/ollama:/root/.ollama
- /dev/kfd:/dev/kfd
- /dev/dri:/dev/dri
environment:
- HSA_OVERRIDE_GFX_VERSION=10.3.0
- ROCR_VISIBLE_DEVICES=all
- OLLAMA_HOST=0.0.0.0
- OLLAMA_KEEP_ALIVE=5m
# --- UI 1: Open WebUI (HP Mini 1) ---
openwebui:
image: ghcr.io/open-webui/open-webui:main
networks:
- ai_internal
- Proxy
ports:
- "3000:8080"
environment:
- OLLAMA_BASE_URL=http://ollama:11434
volumes:
# BIND MOUNT: Easy to backup config/users
- /home/phunter/docker-data/openwebui:/app/backend/data
deploy:
placement:
constraints:
- node.role == manager
# --- UI 2: AnythingLLM (HP Mini 1) ---
anythingllm:
image: mintplexlabs/anythingllm
networks:
- ai_internal
- Proxy
ports:
- "3001:3001"
environment:
- STORAGE_DIR=/app/server/storage
- LLM_PROVIDER=ollama
- OLLAMA_BASE_PATH=http://ollama:11434
- OLLAMA_MODEL_PREF=llama3
volumes:
# BIND MOUNT: Easy to backup PDFs/Workspaces
- /home/phunter/docker-data/anythingllm:/app/server/storage
deploy:
placement:
constraints:
- node.role == manager
# --- UI 3: Lobe Chat (HP Mini 1) ---
lobechat:
image: lobehub/lobe-chat
networks:
- ai_internal
- Proxy
ports:
- "3210:3210"
environment:
- OLLAMA_PROXY_URL=http://ollama:11434/v1
deploy:
placement:
constraints:
- node.role == manager
networks:
ai_internal:
driver: overlay
attachable: true
Proxy:
external: true