Files
nomad/stacks/ai/ai-backend.nomad
2025-12-27 12:43:54 -05:00

65 lines
1.3 KiB
HCL

job "ai-backend" {
datacenters = ["Homelab-PTECH-DC"]
region = "home"
type = "service"
group "ollama-group" {
count = 1
# Pin to P52 Laptop (eGPU Host)
constraint {
attribute = "${meta.device}"
value = "p52-laptop"
}
network {
port "api" { static = 11434 }
}
task "ollama" {
driver = "podman"
env {
OLLAMA_HOST = "0.0.0.0"
OLLAMA_ORIGINS = "*"
# CRITICAL FOR 6900XT:
# This tells ROCm to treat the card like a supported Pro workstation card
HSA_OVERRIDE_GFX_VERSION = "10.3.0"
}
config {
# Standard image (contains ROCm libraries)
image = "docker.io/ollama/ollama:latest"
ports = ["api"]
# Required for hardware access
privileged = true
# Pass the graphics hardware to the container
volumes = [
"/opt/homelab/ollama:/root/.ollama",
"/dev/kfd:/dev/kfd",
"/dev/dri:/dev/dri"
]
}
service {
name = "ollama"
port = "api"
check {
type = "http"
path = "/"
interval = "20s"
timeout = "2s"
}
}
resources {
cpu = 2000
memory = 8192 # 8GB System RAM (The GPU has its own VRAM)
}
}
}
}