Add web ui's

This commit is contained in:
2025-12-24 00:50:47 -05:00
parent 8095c7b951
commit 9323a0624a

View File

@@ -1,8 +1,11 @@
version: '3.8' version: '3.8'
services: services:
# --- THE BRAIN (Backend Only - P52) ---
ollama: ollama:
image: ollama/ollama:rocm image: ollama/ollama:rocm
networks:
- ai_internal
deploy: deploy:
replicas: 1 replicas: 1
placement: placement:
@@ -10,28 +13,75 @@ services:
- node.labels.gpu == true - node.labels.gpu == true
restart_policy: restart_policy:
condition: on-failure condition: on-failure
# We use "host" mode for devices in Swarm to avoid permission headaches
# This allows the container to see the /dev/kfd and /dev/dri paths we mount below
user: root user: root
volumes: volumes:
- /mnt/local-ssd/docker/ollama:/root/.ollama # P52 Bind Mount (Already working)
# Mount the Kernel Fusion Driver (Required for ROCm) - /mnt/local-ssd/ollama:/root/.ollama
- /dev/kfd:/dev/kfd - /dev/kfd:/dev/kfd
# Mount the Direct Rendering Interface (The actual cards)
- /dev/dri:/dev/dri - /dev/dri:/dev/dri
networks:
- proxy
environment: environment:
# Force support for Navi 21 (6900 XT)
- HSA_OVERRIDE_GFX_VERSION=10.3.0 - HSA_OVERRIDE_GFX_VERSION=10.3.0
# Tell ROCm to verify usage
- ROCR_VISIBLE_DEVICES=all - ROCR_VISIBLE_DEVICES=all
- OLLAMA_HOST=0.0.0.0 - OLLAMA_HOST=0.0.0.0
- OLLAMA_KEEP_ALIVE=5m
# --- UI 1: Open WebUI (HP Mini 1) ---
openwebui:
image: ghcr.io/open-webui/open-webui:main
networks:
- ai_internal
- Proxy
ports:
- "3000:8080"
environment:
- OLLAMA_BASE_URL=http://ollama:11434
volumes:
# BIND MOUNT: Easy to backup config/users
- /home/phunter/docker-data/openwebui:/app/backend/data
deploy:
placement:
constraints:
- node.role == manager
# --- UI 2: AnythingLLM (HP Mini 1) ---
anythingllm:
image: mintplexlabs/anythingllm
networks:
- ai_internal
- Proxy
ports:
- "3001:3001"
environment:
- STORAGE_DIR=/app/server/storage
- LLM_PROVIDER=ollama
- OLLAMA_BASE_PATH=http://ollama:11434
- OLLAMA_MODEL_PREF=llama3
volumes:
# BIND MOUNT: Easy to backup PDFs/Workspaces
- /home/phunter/docker-data/anythingllm:/app/server/storage
deploy:
placement:
constraints:
- node.role == manager
# --- UI 3: Lobe Chat (HP Mini 1) ---
lobechat:
image: lobehub/lobe-chat
networks:
- ai_internal
- Proxy
ports:
- "3210:3210"
environment:
- OLLAMA_PROXY_URL=http://ollama:11434/v1
deploy:
placement:
constraints:
- node.role == manager
networks: networks:
proxy: ai_internal:
driver: overlay
attachable: true
Proxy:
external: true external: true