From 6a9d37dbbff07d506b12339b203c8c8dc2186d42 Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Fri, 3 Apr 2026 19:39:20 -0700 Subject: [PATCH 1/9] Create compose.yaml --- services/open-webui/compose.yaml | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 services/open-webui/compose.yaml diff --git a/services/open-webui/compose.yaml b/services/open-webui/compose.yaml new file mode 100644 index 0000000..a2e4f5c --- /dev/null +++ b/services/open-webui/compose.yaml @@ -0,0 +1,42 @@ +services: + tailscale-open-webui: + image: tailscale/tailscale:latest + container_name: tailscale-open-webui + hostname: ${TS_HOSTNAME} + restart: unless-stopped + environment: + - TS_AUTHKEY=${TS_AUTHKEY} + - TS_STATE_DIR=/var/lib/tailscale + - TS_SERVE_CONFIG=/config/serve.json + - TS_USERSPACE=true + - TS_EXTRA_ARGS=--advertise-tags=tag:container + volumes: + - ./ts/state:/var/lib/tailscale + - ./ts/config:/config + - /dev/net/tun:/dev/net/tun + cap_add: + - NET_ADMIN + - SYS_MODULE + healthcheck: + test: ["CMD", "tailscale", "status"] + interval: 1m + timeout: 10s + retries: 3 + start_period: 10s + + app-open-webui: + image: ghcr.io/open-webui/open-webui:main + container_name: app-open-webui + restart: unless-stopped + depends_on: + tailscale-open-webui: + condition: service_healthy + network_mode: service:tailscale-open-webui + environment: + - OLLAMA_BASE_URL=${OLLAMA_BASE_URL} + - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY} + - TZ=${TZ} + volumes: + - ./data:/app/backend/data + # ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Uncomment to expose on LAN in addition to Tailnet From 7514917faf3e5d93edc7a916a4a1042b1bd4a820 Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Fri, 3 Apr 2026 19:40:40 -0700 Subject: [PATCH 2/9] Create .env.example --- services/open-webui/.env.example | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 services/open-webui/.env.example diff --git a/services/open-webui/.env.example b/services/open-webui/.env.example new file mode 100644 index 0000000..cba9502 --- /dev/null +++ b/services/open-webui/.env.example @@ -0,0 +1,17 @@ +# Tailscale +TS_AUTHKEY=tskey-auth-your-key-here +TS_HOSTNAME=open-webui + +# Open WebUI +# Point to your Ollama instance - can be local or remote +# Examples: +# Local Ollama on same host: http://host.docker.internal:11434 +# Remote Ollama on LAN: http://192.168.1.x:11434 +# Remote Ollama over Tailnet: http://100.x.x.x:11434 +# Use OpenAI API instead: leave blank and configure in the UI +OLLAMA_BASE_URL=http://host.docker.internal:11434 +WEBUI_SECRET_KEY=change-me-to-a-random-secret +TZ=America/New_York + +# Optional: uncomment port in compose.yaml to expose on LAN +SERVICEPORT=8080 From e444fecc5ba2bdcd535cac0034a98470223e2d3a Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Fri, 3 Apr 2026 19:42:02 -0700 Subject: [PATCH 3/9] Create README.md --- services/open-webui/README.md | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 services/open-webui/README.md diff --git a/services/open-webui/README.md b/services/open-webui/README.md new file mode 100644 index 0000000..d53a07c --- /dev/null +++ b/services/open-webui/README.md @@ -0,0 +1,46 @@ +# Open WebUI with Tailscale Sidecar Configuration + +This Docker Compose configuration sets up [Open WebUI](https://openwebui.com/) with Tailscale as a sidecar container to keep the app reachable over your Tailnet. + +## Open WebUI + +[Open WebUI](https://openwebui.com/) is a feature-rich, self-hosted AI platform that provides a ChatGPT-style interface for interacting with local and cloud-based AI models. It supports Ollama and any OpenAI-compatible API. Pairing it with Tailscale means your private AI interface is securely accessible from any of your devices — phone, laptop, or otherwise — without exposing it to the public internet. + +## Configuration Overview + +In this setup, the `tailscale-open-webui` service runs Tailscale, which manages secure networking for Open WebUI. The `app-open-webui` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This keeps the app Tailnet-only unless you intentionally expose ports. + +## Prerequisites + +- A Tailscale account with an auth key ([generate one here](https://login.tailscale.com/admin/settings/keys)) +- MagicDNS and HTTPS enabled in your [Tailscale admin console](https://login.tailscale.com/admin/dns) +- Docker and Docker Compose installed +- An AI backend — Ollama running locally, on another machine, or an OpenAI-compatible API + +## Setup + +1. Copy `.env.example` to `.env` and fill in your values +2. Set `OLLAMA_BASE_URL` to point at your Ollama instance (see `.env.example` for examples), or leave it blank and configure a different API provider in the Open WebUI settings after first launch +3. Copy `serve.json` into `ts/config/serve.json` — it is mounted into the Tailscale container +4. Pre-create the data directory to avoid Docker creating it as root-owned: `mkdir -p ./data` +5. Run `docker compose config` to validate before deploying +6. Start the stack: `docker compose up -d` +7. On first launch, navigate to `https://..ts.net` and create your admin account — the server is open until the first user registers + +## Gotchas + +- **First-run security**: Create your admin account immediately after deployment +- **WebSocket support**: Open WebUI requires WebSocket connections — ensure nothing in your network path blocks them +- **Ollama on the same host**: Use `host.docker.internal:11434` as the `OLLAMA_BASE_URL` to reach Ollama running on the Docker host +- **Ollama over Tailnet**: If Ollama runs on a different machine, use its Tailscale IP (e.g. `http://100.x.x.x:11434`) +- **No Ollama**: Leave `OLLAMA_BASE_URL` blank and configure OpenAI or another provider in the UI after first launch +- **Health check**: The compose uses `tailscale status` for the health check. The `41234/healthz` endpoint is not available in userspace mode (`TS_USERSPACE=true`) +- **MagicDNS**: `TS_CERT_DOMAIN` in `serve.json` is populated automatically by Tailscale at runtime — you do not set it manually +- **LAN access**: Ports are commented out by default. Uncomment `SERVICEPORT` in `compose.yaml` if you also want LAN access alongside Tailnet access + +## Resources + +- [Open WebUI Documentation](https://docs.openwebui.com/) +- [Open WebUI GitHub](https://github.com/open-webui/open-webui) +- [Tailscale Serve docs](https://tailscale.com/kb/1242/tailscale-serve) +- [Tailscale Docker guide](https://tailscale.com/blog/docker-tailscale-guide) From c796fa26ff87f805b04ebe693d2046a0cf51579d Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Fri, 3 Apr 2026 19:42:42 -0700 Subject: [PATCH 4/9] Create serve.json --- services/open-webui/ts/config/serve.json | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 services/open-webui/ts/config/serve.json diff --git a/services/open-webui/ts/config/serve.json b/services/open-webui/ts/config/serve.json new file mode 100644 index 0000000..7fc99c8 --- /dev/null +++ b/services/open-webui/ts/config/serve.json @@ -0,0 +1,19 @@ +{ + "TCP": { + "443": { + "HTTPS": true + } + }, + "Web": { + "${TS_CERT_DOMAIN}:443": { + "Handlers": { + "/": { + "Proxy": "http://127.0.0.1:8080" + } + } + } + }, + "AllowFunnel": { + "${TS_CERT_DOMAIN}:443": false + } +} From 56ccfe8a7aabfc1b59afe028c5850e8c1474a85c Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Sun, 5 Apr 2026 11:01:50 -0700 Subject: [PATCH 5/9] Update README.md Updated as per guidelines --- services/open-webui/README.md | 44 +++++++++++++++-------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/services/open-webui/README.md b/services/open-webui/README.md index d53a07c..9c02ff1 100644 --- a/services/open-webui/README.md +++ b/services/open-webui/README.md @@ -4,39 +4,33 @@ This Docker Compose configuration sets up [Open WebUI](https://openwebui.com/) w ## Open WebUI -[Open WebUI](https://openwebui.com/) is a feature-rich, self-hosted AI platform that provides a ChatGPT-style interface for interacting with local and cloud-based AI models. It supports Ollama and any OpenAI-compatible API. Pairing it with Tailscale means your private AI interface is securely accessible from any of your devices — phone, laptop, or otherwise — without exposing it to the public internet. +[Open WebUI](https://openwebui.com/) is a feature-rich, self-hosted AI platform that provides a ChatGPT-style interface for local and cloud-based AI models. It supports Ollama and any OpenAI-compatible API. Pairing it with Tailscale means your private AI interface is securely accessible from any of your devices without exposing it to the public internet. ## Configuration Overview -In this setup, the `tailscale-open-webui` service runs Tailscale, which manages secure networking for Open WebUI. The `app-open-webui` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This keeps the app Tailnet-only unless you intentionally expose ports. +In this setup, the `tailscale-open-webui` service runs Tailscale, which manages secure networking for Open WebUI. The `open-webui` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This keeps the app Tailnet-only unless you intentionally expose ports. -## Prerequisites +## What to document for users -- A Tailscale account with an auth key ([generate one here](https://login.tailscale.com/admin/settings/keys)) -- MagicDNS and HTTPS enabled in your [Tailscale admin console](https://login.tailscale.com/admin/dns) -- Docker and Docker Compose installed -- An AI backend — Ollama running locally, on another machine, or an OpenAI-compatible API +- **Prerequisites**: Docker and Docker Compose installed. No special group membership, GPU, or devices required for CPU-only inference. A Tailscale account with an auth key from https://tailscale.com/admin/authkeys. +- **Volumes**: Pre-create `./open-webui-data` before deploying to avoid Docker creating a root-owned directory: `mkdir -p ./open-webui-data ./config ./ts/state` +- **MagicDNS/Serve**: Enable MagicDNS and HTTPS in your Tailscale admin console before deploying. The serve config proxies to port `8080` — this is hardcoded in the `configs` block and does not consume `.env` values. Uncomment `TS_ACCEPT_DNS=true` in `compose.yaml` if DNS resolution issues arise. +- **Ollama**: Set `OLLAMA_BASE_URL` in `.env` to point at your Ollama instance. Options: + - Same Docker host: `http://host.docker.internal:11434` + - LAN machine: `http://:11434` (use the private IP of the machine running Ollama) + - Another Tailnet device: `http://100.x.x.x:11434` + - Leave blank to configure a different provider (e.g. OpenAI) via the UI after first launch. +- **Ports**: The `0.0.0.0:${SERVICEPORT}:${SERVICEPORT}` mapping is commented out by default. Uncomment only if LAN access is required alongside Tailnet access. +- **Gotchas**: + - Create your admin account immediately after first launch — Open WebUI is open to registration until the first user is created. + - Open WebUI requires WebSocket support — ensure nothing in your network path blocks WebSocket connections. + - After adding new models to Ollama, refresh the model list in Open WebUI via **Settings → Connections**. -## Setup +## Files to check -1. Copy `.env.example` to `.env` and fill in your values -2. Set `OLLAMA_BASE_URL` to point at your Ollama instance (see `.env.example` for examples), or leave it blank and configure a different API provider in the Open WebUI settings after first launch -3. Copy `serve.json` into `ts/config/serve.json` — it is mounted into the Tailscale container -4. Pre-create the data directory to avoid Docker creating it as root-owned: `mkdir -p ./data` -5. Run `docker compose config` to validate before deploying -6. Start the stack: `docker compose up -d` -7. On first launch, navigate to `https://..ts.net` and create your admin account — the server is open until the first user registers +Please check the following contents for validity as some variables need to be defined upfront. -## Gotchas - -- **First-run security**: Create your admin account immediately after deployment -- **WebSocket support**: Open WebUI requires WebSocket connections — ensure nothing in your network path blocks them -- **Ollama on the same host**: Use `host.docker.internal:11434` as the `OLLAMA_BASE_URL` to reach Ollama running on the Docker host -- **Ollama over Tailnet**: If Ollama runs on a different machine, use its Tailscale IP (e.g. `http://100.x.x.x:11434`) -- **No Ollama**: Leave `OLLAMA_BASE_URL` blank and configure OpenAI or another provider in the UI after first launch -- **Health check**: The compose uses `tailscale status` for the health check. The `41234/healthz` endpoint is not available in userspace mode (`TS_USERSPACE=true`) -- **MagicDNS**: `TS_CERT_DOMAIN` in `serve.json` is populated automatically by Tailscale at runtime — you do not set it manually -- **LAN access**: Ports are commented out by default. Uncomment `SERVICEPORT` in `compose.yaml` if you also want LAN access alongside Tailnet access +- `.env` // Main variables: `TS_AUTHKEY`, `SERVICE`, `IMAGE_URL`, `OLLAMA_BASE_URL`, `WEBUI_SECRET_KEY` ## Resources From 6c7e9df78fc1e48b7fc8da8d9432c0d352ad762c Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Sun, 5 Apr 2026 11:06:31 -0700 Subject: [PATCH 6/9] Update .env.example --- services/open-webui/.env.example | 36 +++++++++++++++++++------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/services/open-webui/.env.example b/services/open-webui/.env.example index cba9502..cf4fb58 100644 --- a/services/open-webui/.env.example +++ b/services/open-webui/.env.example @@ -1,17 +1,25 @@ -# Tailscale -TS_AUTHKEY=tskey-auth-your-key-here -TS_HOSTNAME=open-webui +#version=1.1 +#URL=https://github.com/tailscale-dev/ScaleTail +#COMPOSE_PROJECT_NAME= # Optional: only use when running multiple deployments on the same infrastructure. -# Open WebUI -# Point to your Ollama instance - can be local or remote +# Service Configuration +SERVICE=open-webui # Service name. Used as hostname in Tailscale and for container naming (app-${SERVICE}). +IMAGE_URL=ghcr.io/open-webui/open-webui:main # Docker image URL from container registry. + +# Network Configuration +SERVICEPORT=8080 # Port to expose to local network. Uncomment the "ports:" section in compose.yaml to enable. +DNS_SERVER=9.9.9.9 # Preferred DNS server for Tailscale. Uncomment the "dns:" section in compose.yaml to enable. + +# Tailscale Configuration +TS_AUTHKEY= # Auth key from https://tailscale.com/admin/authkeys. See: https://tailscale.com/kb/1085/auth-keys#generate-an-auth-key for instructions. + +# Open WebUI Configuration +# Point to your Ollama instance - can be local or remote. # Examples: -# Local Ollama on same host: http://host.docker.internal:11434 -# Remote Ollama on LAN: http://192.168.1.x:11434 -# Remote Ollama over Tailnet: http://100.x.x.x:11434 -# Use OpenAI API instead: leave blank and configure in the UI +# Ollama on same Docker host: http://host.docker.internal:11434 +# Ollama on LAN: http://192.168.1.x:11434 +# Ollama over Tailnet: http://100.x.x.x:11434 +# Leave blank to configure a different provider (e.g. OpenAI) via the UI. OLLAMA_BASE_URL=http://host.docker.internal:11434 -WEBUI_SECRET_KEY=change-me-to-a-random-secret -TZ=America/New_York - -# Optional: uncomment port in compose.yaml to expose on LAN -SERVICEPORT=8080 +WEBUI_SECRET_KEY= # Random secret key for session security. Generate with: openssl rand -hex 32 +TZ=Europe/Amsterdam # Timezone for the container. From 25161855f48258c03a3f812009a6c9b4c54b53ce Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Sun, 5 Apr 2026 11:07:54 -0700 Subject: [PATCH 7/9] Delete services/open-webui/ts directory --- services/open-webui/ts/config/serve.json | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 services/open-webui/ts/config/serve.json diff --git a/services/open-webui/ts/config/serve.json b/services/open-webui/ts/config/serve.json deleted file mode 100644 index 7fc99c8..0000000 --- a/services/open-webui/ts/config/serve.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "TCP": { - "443": { - "HTTPS": true - } - }, - "Web": { - "${TS_CERT_DOMAIN}:443": { - "Handlers": { - "/": { - "Proxy": "http://127.0.0.1:8080" - } - } - } - }, - "AllowFunnel": { - "${TS_CERT_DOMAIN}:443": false - } -} From 5c76be51a982ee5fe7f0dc5194be2a9442e36424 Mon Sep 17 00:00:00 2001 From: wedge22 <34723349+wedge22@users.noreply.github.com> Date: Sun, 5 Apr 2026 11:08:41 -0700 Subject: [PATCH 8/9] Update compose.yaml updating as per feedback --- services/open-webui/compose.yaml | 86 +++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 29 deletions(-) diff --git a/services/open-webui/compose.yaml b/services/open-webui/compose.yaml index a2e4f5c..291386f 100644 --- a/services/open-webui/compose.yaml +++ b/services/open-webui/compose.yaml @@ -1,42 +1,70 @@ +configs: + ts-serve: + content: | + {"TCP":{"443":{"HTTPS":true}}, + "Web":{"$${TS_CERT_DOMAIN}:443": + {"Handlers":{"/": + {"Proxy":"http://127.0.0.1:8080"}}}}, + "AllowFunnel":{"$${TS_CERT_DOMAIN}:443":false}} + services: - tailscale-open-webui: - image: tailscale/tailscale:latest - container_name: tailscale-open-webui - hostname: ${TS_HOSTNAME} - restart: unless-stopped +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. + # Tailscale Sidecar Configuration + tailscale: + image: tailscale/tailscale:latest # Image to be used + container_name: tailscale-${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - - TS_SERVE_CONFIG=/config/serve.json - - TS_USERSPACE=true - - TS_EXTRA_ARGS=--advertise-tags=tag:container + - TS_SERVE_CONFIG=/config/serve.json # Tailscale Serve configuration to expose the web interface on your local Tailnet + - TS_USERSPACE=false + - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" + - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint + #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS + - TS_AUTH_ONCE=true + configs: + - source: ts-serve + target: /config/serve.json volumes: - - ./ts/state:/var/lib/tailscale - - ./ts/config:/config - - /dev/net/tun:/dev/net/tun + - ./config:/config # Config folder used to store Tailscale files - you may need to change the path + - ./ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + devices: + - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - NET_ADMIN - - SYS_MODULE + - net_admin # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICEPORT} to the local network - may be removed if only exposure to your Tailnet is required + # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below + #dns: + # - ${DNS_SERVER} healthcheck: - test: ["CMD", "tailscale", "status"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 10s + test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 10s # Time to wait before starting health checks + restart: always - app-open-webui: - image: ghcr.io/open-webui/open-webui:main - container_name: app-open-webui - restart: unless-stopped - depends_on: - tailscale-open-webui: - condition: service_healthy - network_mode: service:tailscale-open-webui + # Open WebUI + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route Open WebUI through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - OLLAMA_BASE_URL=${OLLAMA_BASE_URL} - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY} - TZ=${TZ} volumes: - - ./data:/app/backend/data - # ports: - # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Uncomment to expose on LAN in addition to Tailnet + - ./open-webui-data:/app/backend/data + depends_on: + tailscale: + condition: service_healthy + healthcheck: + test: ["CMD", "pgrep", "-f", "open-webui"] # Check if open-webui process is running + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 30s # Time to wait before starting health checks + restart: always From 67eb8cb4626344c2978dd11b7cb2cf66569690c0 Mon Sep 17 00:00:00 2001 From: Bart <57799908+crypt0rr@users.noreply.github.com> Date: Mon, 6 Apr 2026 08:56:09 +0200 Subject: [PATCH 9/9] Fix link formatting in README.md prerequisites section Updated the link format for Tailscale auth key in prerequisites. --- services/open-webui/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/open-webui/README.md b/services/open-webui/README.md index 9c02ff1..3ee50cd 100644 --- a/services/open-webui/README.md +++ b/services/open-webui/README.md @@ -12,7 +12,7 @@ In this setup, the `tailscale-open-webui` service runs Tailscale, which manages ## What to document for users -- **Prerequisites**: Docker and Docker Compose installed. No special group membership, GPU, or devices required for CPU-only inference. A Tailscale account with an auth key from https://tailscale.com/admin/authkeys. +- **Prerequisites**: Docker and Docker Compose installed. No special group membership, GPU, or devices required for CPU-only inference. A Tailscale account with an auth key from . - **Volumes**: Pre-create `./open-webui-data` before deploying to avoid Docker creating a root-owned directory: `mkdir -p ./open-webui-data ./config ./ts/state` - **MagicDNS/Serve**: Enable MagicDNS and HTTPS in your Tailscale admin console before deploying. The serve config proxies to port `8080` — this is hardcoded in the `configs` block and does not consume `.env` values. Uncomment `TS_ACCEPT_DNS=true` in `compose.yaml` if DNS resolution issues arise. - **Ollama**: Set `OLLAMA_BASE_URL` in `.env` to point at your Ollama instance. Options: