This guide provides production-ready Docker deployment configurations for Uptime Kuma, including security hardening, volume management, and operational best practices.
Latest Docker Image:
louislam/uptime-kuma:2orghcr.io/louislam/uptime-kuma:2
Latest Stable Version: v2.1.1 (February 13, 2026)
Supported Architectures: amd64, arm64, arm/v7
| Registry | Image | Tags | Description |
|---|---|---|---|
| Docker Hub | louislam/uptime-kuma |
:2, :2.1.1, :latest |
Primary image |
| GitHub Container Registry | ghcr.io/louislam/uptime-kuma |
:2, :2.1.1, :latest |
Mirror image |
| Slim Variant | louislam/uptime-kuma |
:2-slim, :2-slim-rootless |
Smaller footprint |
# Production (recommended - tracks v2.x stable)
louislam/uptime-kuma:2
# Pinned version (maximum reproducibility)
louislam/uptime-kuma:2.1.1
# Development/testing
louislam/uptime-kuma:latest
# Minimal footprint (no extra tools)
louislam/uptime-kuma:2-slim
# Rootless container (enhanced security)
louislam/uptime-kuma:2-slim-rootless
⚠️ Important: Avoid using
:latestin production. Pin to specific versions for reproducibility.
docker run -d --restart=always \
--name uptime-kuma \
-p 3001:3001 \
-v uptime-kuma:/app/data \
-e TZ=Europe/Berlin \
louislam/uptime-kuma:2
# Check container status
docker ps | grep uptime-kuma
# View logs
docker logs -f uptime-kuma
# Access application
curl http://localhost:3001
Create docker-compose.yml:
services:
uptime-kuma:
image: louislam/uptime-kuma:2
container_name: uptime-kuma
restart: unless-stopped
ports:
- "3001:3001"
volumes:
- uptime-kuma-data:/app/data
environment:
- TZ=Europe/Berlin
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
volumes:
uptime-kuma-data:
driver: local
Deploy:
docker compose up -d
docker compose ps
docker compose logs -f
services:
uptime-kuma:
image: louislam/uptime-kuma:2.1.1
container_name: uptime-kuma
restart: unless-stopped
ports:
- "127.0.0.1:3001:3001" # Bind to localhost only
volumes:
- uptime-kuma-data:/app/data
- /etc/localtime:/etc/localtime:ro # Sync host timezone
environment:
- TZ=Europe/Berlin
- UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN=0
- NODE_ENV=production
networks:
- monitoring-network
security_opt:
- no-new-privileges:true
read_only: false # Must be false for data persistence
cap_drop:
- ALL
cap_add:
- NET_BIND_SERVICE
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels:
- "com.uptime-kuma.app=monitoring"
- "com.uptime-kuma.environment=production"
volumes:
uptime-kuma-data:
driver: local
driver_opts:
type: none
o: bind
device: /opt/uptime-kuma/data # Explicit host path
networks:
monitoring-network:
driver: bridge
ipam:
config:
- subnet: 172.28.0.0/16
# Create data directory with proper permissions
sudo mkdir -p /opt/uptime-kuma/data
sudo chown 1000:1000 /opt/uptime-kuma/data
sudo chmod 750 /opt/uptime-kuma/data
# Deploy
docker compose -f docker-compose.prod.yml up -d
# Verify
docker compose ps
docker compose logs -f uptime-kuma
Uptime Kuma v2.0+ supports external databases for better scalability.
services:
uptime-kuma:
image: louislam/uptime-kuma:2
depends_on:
postgres:
condition: service_healthy
environment:
- DATABASE_TYPE=postgres
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_NAME=uptime_kuma
- DATABASE_USER=uptime_kuma
- DATABASE_PASSWORD=your-secure-password
- DATABASE_SSL=true
postgres:
image: postgres:16-alpine
restart: unless-stopped
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
- POSTGRES_DB=uptime_kuma
- POSTGRES_USER=uptime_kuma
- POSTGRES_PASSWORD=your-secure-password
healthcheck:
test: ["CMD-SHELL", "pg_isready -U uptime_kuma"]
interval: 10s
timeout: 5s
retries: 5
volumes:
postgres-data:
services:
uptime-kuma:
image: louislam/uptime-kuma:2
depends_on:
mariadb:
condition: service_healthy
environment:
- DATABASE_TYPE=mysql
- DATABASE_HOST=mariadb
- DATABASE_PORT=3306
- DATABASE_NAME=uptime_kuma
- DATABASE_USER=uptime_kuma
- DATABASE_PASSWORD=your-secure-password
mariadb:
image: mariadb:11
restart: unless-stopped
volumes:
- mariadb-data:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=root-secure-password
- MYSQL_DATABASE=uptime_kuma
- MYSQL_USER=uptime_kuma
- MYSQL_PASSWORD=your-secure-password
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 10s
timeout: 5s
retries: 5
volumes:
mariadb-data:
services:
uptime-kuma:
image: louislam/uptime-kuma:2
expose:
- "3001"
networks:
- proxy-network
nginx:
image: nginx:alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
- certbot-www:/var/www/certbot
depends_on:
- uptime-kuma
networks:
- proxy-network
volumes:
certbot-www:
networks:
proxy-network:
driver: bridge
nginx.conf:
events {
worker_connections 1024;
}
http {
upstream uptime-kuma {
server uptime-kuma:3001;
}
server {
listen 80;
server_name uptime.example.com;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://$server_name$request_uri;
}
}
server {
listen 443 ssl http2;
server_name uptime.example.com;
ssl_certificate /etc/nginx/ssl/fullchain.pem;
ssl_certificate_key /etc/nginx/ssl/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1d;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
location / {
proxy_pass http://uptime-kuma;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 86400;
}
}
}
services:
uptime-kuma:
image: louislam/uptime-kuma:2
labels:
- "traefik.enable=true"
- "traefik.http.routers.uptime-kuma.rule=Host(`uptime.example.com`)"
- "traefik.http.routers.uptime-kuma.entrypoints=websecure"
- "traefik.http.routers.uptime-kuma.tls.certresolver=letsencrypt"
- "traefik.http.services.uptime-kuma.loadbalancer.server.port=3001"
networks:
- traefik-network
networks:
traefik-network:
external: true
services:
uptime-kuma:
# ... other config
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- NET_BIND_SERVICE
read_only: false # Required for data persistence
tmpfs:
- /tmp:noexec,nosuid,size=100m
user: "1000:1000" # Non-root user
services:
uptime-kuma:
image: louislam/uptime-kuma:2
secrets:
- db_password
- db_username
environment:
- DATABASE_PASSWORD_FILE=/run/secrets/db_password
- DATABASE_USERNAME_FILE=/run/secrets/db_username
secrets:
db_password:
file: ./secrets/db_password.txt
db_username:
file: ./secrets/db_username.txt
networks:
monitoring-network:
driver: bridge
ipam:
config:
- subnet: 172.28.0.0/16
driver_opts:
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.enable_icc: "false"
Create backup-uptime-kuma.sh:
#!/bin/bash
set -e
BACKUP_DIR="/backup/uptime-kuma"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/uptime-kuma-${DATE}.tar.gz"
# Create backup directory
mkdir -p "${BACKUP_DIR}"
# Stop container (optional - can backup live)
docker stop uptime-kuma
# Backup volume
docker run --rm \
-v uptime-kuma-data:/source:ro \
-v "${BACKUP_DIR}":/backup \
alpine tar czf /backup/uptime-kuma-${DATE}.tar.gz -C /source .
# Restart container
docker start uptime-kuma
# Verify backup
tar tzf "${BACKUP_FILE}" > /dev/null && echo "Backup verified successfully"
# Cleanup old backups (keep 30 days)
find "${BACKUP_DIR}" -name "uptime-kuma-*.tar.gz" -mtime +30 -delete
echo "Backup completed: ${BACKUP_FILE}"
Make executable and schedule:
chmod +x backup-uptime-kuma.sh
sudo crontab -e
# Add: 0 2 * * * /path/to/backup-uptime-kuma.sh
# Stop container
docker compose down
# Remove existing volume
docker volume rm uptime-kuma-data
# Create new volume
docker volume create uptime-kuma-data
# Restore backup
docker run --rm \
-v uptime-kuma-data:/target \
-v $(pwd):/backup \
alpine tar xzf /backup/uptime-kuma-YYYYMMDD_HHMMSS.tar.gz -C /target
# Restart container
docker compose up -d
services:
uptime-kuma:
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
compress: "true"
# Real-time logs
docker compose logs -f uptime-kuma
# Last 100 lines
docker compose logs --tail=100 uptime-kuma
# Export logs
docker compose logs uptime-kuma > uptime-kuma.log
Uptime Kuma v2.1.0+ supports Prometheus metrics export. Configure in application settings and scrape:
scrape_configs:
- job_name: 'uptime-kuma'
static_configs:
- targets: ['uptime-kuma:3001']
metrics_path: '/metrics'
Container Won’t Start:
# Check logs
docker logs uptime-kuma
# Check port conflicts
sudo ss -tlnp | grep 3001
# Verify volume permissions
ls -la /opt/uptime-kuma/data
Database Connection Failed:
# Test database connectivity
docker exec uptime-kuma nc -zv postgres 5432
# Check environment variables
docker exec uptime-kuma env | grep DATABASE
High Memory Usage:
# Check container stats
docker stats uptime-kuma
# Limit memory in compose
services:
uptime-kuma:
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
Volume Permission Issues:
# Fix permissions
sudo chown -R 1000:1000 /opt/uptime-kuma/data
sudo chmod -R 750 /opt/uptime-kuma/data
Any questions?
Feel free to contact us. Find all contact information on our contact page.