Comprehensive security hardening guide for Proxmox VE covering management plane security, cluster hardening, VM isolation, and compliance considerations.
Proxmox VE security requires a layered approach covering:
Firewall Rules:
# Allow only from management network
iptables -A INPUT -p tcp -s 192.168.1.0/24 --dport 8006 -j ACCEPT
iptables -A INPUT -p tcp --dport 8006 -j DROP
# Save rules
iptables-save > /etc/iptables/rules.v4
Proxmox Firewall:
# Enable firewall
pve-firewall enable
# Create management zone
pve-firewall zones add management --interfaces vmbr0
# Add allow rule for management network
pve-firewall ruleset add --zone management --type in --source 192.168.1.0/24 --proto tcp --dport 8006 --action ACCEPT
Two-Factor Authentication (TFA):
# Enable TFA for root
pveum user modify root@pam --enable tfa
# Enable TFA for all admin users
pveum user modify admin@pam --enable tfa
# Or via web UI: Datacenter → Permissions → Users → Select User → TFA
Password Policy:
# Edit /etc/pve/user.cfg
# Set password policy
password-policy:
enable: true
min-length: 12
require-uppercase: true
require-lowercase: true
require-digit: true
require-special: true
max-age-days: 90
# Edit /etc/ssh/sshd_config
PermitRootLogin prohibit-password
PubkeyAuthentication yes
PasswordAuthentication no
# Restart SSH
systemctl restart sshd
# Generate SSH key
ssh-keygen -t ed25519 -a 100
# Copy to Proxmox host
ssh-copy-id root@proxmox-host
# Test key authentication
ssh root@proxmox-host
# Create API token with limited privileges
pveum user token add admin@pam monitoring-token --privsep 0
# Assign specific privileges
pveum acl modify / --token admin@pam!monitoring-token --role PVEAuditor
# Rotate tokens regularly
pveum user token remove admin@pam monitoring-token
pveum user token add admin@pam monitoring-token --privsep 0
# Enable security updates
echo "deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription" > /etc/apt/sources.list.d/pve-no-subscription.list
# Update regularly
apt update
apt dist-upgrade -y
# Reboot if kernel updated
[ -f /var/run/reboot-required ] && reboot
Network Separation:
# /etc/network/interfaces
# Management network
auto vmbr0
iface vmbr0 inet static
address 192.168.1.100/24
bridge-ports eno1
# Cluster/corosync network (isolated)
auto vmbr1
iface vmbr1 inet static
address 10.0.0.100/24
bridge-ports eno2
# Storage network (isolated)
auto vmbr2
iface vmbr2 inet static
address 10.0.1.100/24
bridge-ports eno3
# Edit /etc/pve/corosync.conf
totem {
cluster_name: PRODUCTION
crypto_cipher: aes256
crypto_hash: sha256
}
# Generate new key
corosync-keygen
# Distribute to all nodes
scp /etc/corosync/authkey root@pve2:/etc/corosync/
scp /etc/corosync/authkey root@pve3:/etc/corosync/
# Enable migration encryption
# Edit /etc/pve/datacenter.cfg
migration: secure
# Use dedicated migration network
pvesm set --migration-network 10.0.0.0/24
# Edit /etc/sysctl.d/99-proxmox-hardening.conf
kernel.unprivileged_userns_clone = 0
kernel.unprivileged_bpf_disabled = 1
net.ipv4.ip_forward = 0
net.ipv6.conf.all.forwarding = 0
kernel.kptr_restrict = 2
kernel.dmesg_restrict = 1
# Apply
sysctl -p /etc/sysctl.d/99-proxmox-hardening.conf
# Remove default credentials from templates
# Windows: Remove local administrator password
# Linux: Remove root password, enforce SSH keys
# Cloud-init for automated secure deployment
# Edit VM config: /etc/pve/qemu-server/VMID.conf
cipassword: RANDOM_PASSWORD
ciuser: ubuntu
sshkeys: ssh-ed25519 AAAA...
# Enable QEMU sandboxing
# Edit /etc/pve/qemu-server/VMID.conf
args: -sandbox on
# Restrict CPU features
cpu: host,hidden=1,flags=+md-clear;+pcid;+spec-ctrl;+ssbd;+pdpe1gb
# Enable TPM passthrough (for Windows 11)
tpmstate0: file=/var/lib/qemu-server/tpmstate/VMID,version=v2.0
# Unprivileged containers (default, more secure)
unprivileged: 1
# Restrict container capabilities
features: mount=nfs,cifs
# Disable nesting unless required
features: nesting=0
# Enable firewall on VM
qm set VMID --firewall 1
# Add VM firewall rules
pve-firewall ruleset add --vmid VMID --type in --proto tcp --dport 22 --action ACCEPT
pve-firewall ruleset add --vmid VMID --type in --proto tcp --dport 80 --action ACCEPT
pve-firewall ruleset add --vmid VMID --type in --action DROP
# Enable backup encryption
vzdump --encrypt 1 --storage backup-storage all
# Or via web UI: Datacenter → Backup → Add → Encryption: AES-256
# Restrict backup storage access
# Edit /etc/pve/storage.cfg
dir: backup-storage
path /backup
content backup
nodes pve1,pve2
disable
# Create encrypted ZFS pool
zpool create -o encryption=aes-256-gcm -o keylocation=prompt rpool /dev/sda
# Or with keyfile
zpool create -o encryption=aes-256-gcm -o keysource=file:///etc/zfs/keys/rpool.key rpool /dev/sda
# Enable Ceph encryption
ceph config set global ms_cluster_mode secure
ceph config set global ms_service_mode secure
# Enable CephX authentication (enabled by default)
ceph auth list
# Edit /etc/network/interfaces
auto vmbr0v100
iface vmbr0v100 inet static
address 192.168.100.100/24
bridge-ports vmbr0
bridge-stp off
bridge-fd 0
vlan-raw-device vmbr0
vlan_id 100
# Enable rate limiting on bridge
# Edit /etc/network/interfaces
auto vmbr0
iface vmbr0 inet static
address 192.168.1.100/24
bridge-ports eno1
bridge-stp off
bridge-fd 0
post-up echo 1000000 > /sys/class/net/vmbr0/brport/bcast_flood
# Disable unused services
systemctl disable cups
systemctl disable avahi-daemon
systemctl disable bluetooth
# Check running services
systemctl list-units --type=service --state=running
# Install auditd
apt install auditd
# Configure Proxmox audit rules
auditctl -w /etc/pve -p wa -k proxmox-config
auditctl -w /var/log -p wa -k proxmox-logs
# View audit logs
ausearch -k proxmox-config
# Monitor failed logins
grep "authentication failure" /var/log/auth.log
# Monitor successful root logins
grep "session opened for user root" /var/log/auth.log
# Set up log monitoring
journalctl -f -u pveproxy
journalctl -f -u pvedaemon
# Edit /etc/pve/datacenter.cfg
mailto: security@example.com
# Or via web UI: Datacenter → Options → Email from
Proxmox VE can be hardened to align with CIS benchmarks:
For GDPR compliance:
Any questions?
Feel free to contact us. Find all contact information on our contact page.