job "ai-frontend" { datacenters = ["Homelab-PTECH-DC"] region = "home" type = "service" # --- OpenWebUI (Keep as is) --- group "openwebui" { count = 1 constraint { attribute = "${attr.unique.hostname}" value = "hp-mini-1" } network { port "http" { static = 8080 to = 8080 } } service { name = "openwebui" port = "http" tags = ["traefik.enable=true"] check { type = "http" path = "/health" interval = "20s" timeout = "2s" } } task "server" { driver = "podman" env { OLLAMA_BASE_URL = "http://ollama.service.consul:11434" } config { image = "ghcr.io/open-webui/open-webui:main" ports = ["http"] volumes = [ "/mnt/local-ssd/nomad/stacks/ai/ai-frontend/openwebui:/app/backend/data" ] } resources { cpu = 1000; memory = 1024 } } } # --- LobeChat (UPDATED) --- group "lobechat" { count = 1 constraint { attribute = "${attr.unique.hostname}" value = "hp-mini-1" } network { port "http" { static = 3210 to = 3210 } } service { name = "lobechat" port = "http" tags = ["traefik.enable=true"] check { type = "http" path = "/api/health" interval = "20s" timeout = "2s" } } task "server" { driver = "podman" env { OLLAMA_PROXY_URL = "http://ollama.service.consul:11434" ACCESS_CODE = "securepassword123" } config { image = "docker.io/lobehub/lobe-chat" ports = ["http"] } resources { cpu = 500 # --- THE FIX --- # Increased from 512 to 1024 to prevent OOM crashes memory = 1024 } } } }