* Reformatted the `resources` block for the `openwebui` group to a multi-line format to resolve an illegal semicolon syntax issue. * Added explicit comments to both `openwebui` and `lobechat` groups to clarify the host pinning constraint. * Streamlined comments in the `lobechat` group's `resources` block to concisely state the memory allocation, removing outdated "fix" comments and adding a current "FIXED" comment. These changes enhance the readability, maintainability, and correctness of the Nomad job definition.
107 lines
2.0 KiB
HCL
107 lines
2.0 KiB
HCL
job "ai-frontend" {
|
|
datacenters = ["Homelab-PTECH-DC"]
|
|
region = "home"
|
|
type = "service"
|
|
|
|
# --- OpenWebUI ---
|
|
group "openwebui" {
|
|
count = 1
|
|
|
|
# Pin to HP1
|
|
constraint {
|
|
attribute = "${attr.unique.hostname}"
|
|
value = "hp-mini-1"
|
|
}
|
|
|
|
network {
|
|
port "http" {
|
|
static = 8080
|
|
to = 8080
|
|
}
|
|
}
|
|
|
|
service {
|
|
name = "openwebui"
|
|
port = "http"
|
|
tags = ["traefik.enable=true"]
|
|
check {
|
|
type = "http"
|
|
path = "/health"
|
|
interval = "20s"
|
|
timeout = "2s"
|
|
}
|
|
}
|
|
|
|
task "server" {
|
|
driver = "podman"
|
|
|
|
env {
|
|
OLLAMA_BASE_URL = "http://ollama.service.consul:11434"
|
|
}
|
|
|
|
config {
|
|
image = "ghcr.io/open-webui/open-webui:main"
|
|
ports = ["http"]
|
|
volumes = [
|
|
"/mnt/local-ssd/nomad/stacks/ai/ai-frontend/openwebui:/app/backend/data"
|
|
]
|
|
}
|
|
|
|
# FIXED: Expanded to multi-line to remove illegal semicolon
|
|
resources {
|
|
cpu = 1000
|
|
memory = 1024
|
|
}
|
|
}
|
|
}
|
|
|
|
# --- LobeChat ---
|
|
group "lobechat" {
|
|
count = 1
|
|
|
|
# Pin to HP1
|
|
constraint {
|
|
attribute = "${attr.unique.hostname}"
|
|
value = "hp-mini-1"
|
|
}
|
|
|
|
network {
|
|
port "http" {
|
|
static = 3210
|
|
to = 3210
|
|
}
|
|
}
|
|
|
|
service {
|
|
name = "lobechat"
|
|
port = "http"
|
|
tags = ["traefik.enable=true"]
|
|
check {
|
|
type = "http"
|
|
path = "/api/health"
|
|
interval = "20s"
|
|
timeout = "2s"
|
|
}
|
|
}
|
|
|
|
task "server" {
|
|
driver = "podman"
|
|
|
|
env {
|
|
OLLAMA_PROXY_URL = "http://ollama.service.consul:11434"
|
|
ACCESS_CODE = "securepassword123"
|
|
}
|
|
|
|
config {
|
|
image = "docker.io/lobehub/lobe-chat"
|
|
ports = ["http"]
|
|
}
|
|
|
|
# FIXED: Ensure 1GB RAM to prevent OOM
|
|
resources {
|
|
cpu = 500
|
|
memory = 1024
|
|
}
|
|
}
|
|
}
|
|
} |