version: '3.8' services: # --- THE BRAIN (Backend Only - P52) --- ollama: image: ollama/ollama:rocm networks: - ai_internal deploy: replicas: 1 placement: constraints: - node.labels.gpu == true restart_policy: condition: on-failure user: root devices: # Map the AMD Card Node (Control) - /dev/dri/card0:/dev/dri/card0 - /dev/kfd:/dev/kfd # 2. Mount ONLY the AMD Render Node (The Blinders) - /dev/dri/renderD128:/dev/dri/renderD128 volumes: # P52 Bind Mount (Already working) - /mnt/local-ssd/docker/ollama:/root/.ollama environment: - HSA_OVERRIDE_GFX_VERSION=10.3.0 - ROCR_VISIBLE_DEVICES=0 - OLLAMA_HOST=0.0.0.0 - OLLAMA_KEEP_ALIVE=5m - OLLAMA_DEBUG=1 # --- UI 1: Open WebUI (HP Mini 1) --- openwebui: image: ghcr.io/open-webui/open-webui:main networks: - ai_internal - proxy ports: - "3000:8080" environment: - OLLAMA_BASE_URL=http://ollama:11434 volumes: # BIND MOUNT: Easy to backup config/users - /home/phunter/docker-data/openwebui:/app/backend/data deploy: placement: constraints: - node.role == manager # --- UI 2: AnythingLLM (HP Mini 1) --- anythingllm: image: mintplexlabs/anythingllm networks: - ai_internal - proxy ports: - "3001:3001" environment: - STORAGE_DIR=/app/server/storage - LLM_PROVIDER=ollama - OLLAMA_BASE_PATH=http://ollama:11434 - OLLAMA_MODEL_PREF=llama3 volumes: # BIND MOUNT: Easy to backup PDFs/Workspaces - /home/phunter/docker-data/anythingllm:/app/server/storage deploy: placement: constraints: - node.role == manager # --- UI 3: Lobe Chat (HP Mini 1) --- lobechat: image: lobehub/lobe-chat networks: - ai_internal - proxy ports: - "3210:3210" environment: - OLLAMA_PROXY_URL=http://ollama:11434/v1 deploy: placement: constraints: - node.role == manager networks: ai_internal: driver: overlay attachable: true proxy: external: true