Comprehensive configuration guide for Proxmox VE including node settings, cluster management, storage, networking, and VM optimization.
| File | Purpose | Location |
|---|---|---|
/etc/pve/pve.conf |
Main Proxmox configuration | All nodes |
/etc/network/interfaces |
Network interface configuration | All nodes |
/etc/pve/storage.cfg |
Storage configuration | All nodes |
/etc/pve/user.cfg |
User and permission configuration | All nodes |
/etc/hosts |
Hostname resolution | All nodes |
/etc/hostname |
System hostname | All nodes |
Proxmox VE uses Linux networking with bridges for VM/container connectivity.
Default Configuration (/etc/network/interfaces):
auto lo
iface lo inet loopback
auto eno1
iface eno1 inet manual
auto vmbr0
iface vmbr0 inet static
address 192.168.1.100/24
gateway 192.168.1.1
bridge-ports eno1
bridge-stp off
bridge-fd 0
dns-nameservers 8.8.8.8 8.8.4.4
Multi-NIC Configuration (Recommended for Production):
auto lo
iface lo inet loopback
# Management network
auto eno1
iface eno1 inet manual
auto vmbr0
iface vmbr0 inet static
address 192.168.1.100/24
gateway 192.168.1.1
bridge-ports eno1
bridge-stp off
bridge-fd 0
dns-nameservers 8.8.8.8 8.8.4.4
# VM traffic network
auto eno2
iface eno2 inet manual
auto vmbr1
iface vmbr1 inet static
address 192.168.2.100/24
bridge-ports eno2
bridge-stp off
bridge-fd 0
# Storage network (iSCSI/Ceph)
auto eno3
iface eno3 inet static
address 10.0.0.100/24
CPU Power Management:
# Check current governor
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
# Set to performance mode
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
Huge Pages (for VM performance):
# Enable huge pages
echo "vm.nr_hugepages = 1024" >> /etc/sysctl.conf
sysctl -p
On the first node:
# Create cluster
pvecm create CLUSTER_NAME
# Example
pvecm create PRODUCTION
On additional nodes:
# Join existing cluster
pvecm add IP_OF_FIRST_NODE
# Example
pvecm add 192.168.1.100
# Check cluster status
pvecm status
# View cluster nodes
pvecm nodes
# Check quorum
pvecm quorum
/etc/pve/cluster.conf)<?xml version="1.0"?>
<cluster name="PRODUCTION" config_version="3">
<fence_daemon post_fail_delay="0" post_join_delay="3"/>
<clusternodes>
<clusternode name="pve1" nodeid="1">
<fence/>
</clusternode>
<clusternode name="pve2" nodeid="2">
<fence/>
</clusternode>
<clusternode name="pve3" nodeid="3">
<fence/>
</clusternode>
</clusternodes>
</cluster>
Default local storage for ISO images and container templates:
# Configuration in /etc/pve/storage.cfg
dir: local
path /var/lib/vz
content iso,vztmpl,backup
maxfiles 0
shared 0
# Via command line
pvesm add nfs --server nfs.example.com --export /export/proxmox --content iso,vztmpl,backup nfs-storage
# Via web UI: Datacenter → Storage → Add → NFS
# Discover targets
iscsiadm -m discovery -t sendtargets -p 192.168.1.50
# Login to target
iscsiadm -m node -T TARGET_IQN -p 192.168.1.50 --login
# Add to Proxmox
pvesm add iscsi --portal 192.168.1.50 --target TARGET_IQN iscsi-storage
# Create ZFS pool
zpool create -f rpool mirror /dev/sda /dev/sdb
# Add to Proxmox
pvesm add zfspool --pool rpool --content rootdir,images zfs-storage
# Initialize Ceph on cluster
pveceph init
# Create CephFS
pveceph cephfs create
# Add OSDs
pveceph osd create /dev/sdc
pveceph osd create /dev/sdd
# Monitor status
pveceph status
# Start HA manager
systemctl start pve-ha-crm
systemctl enable pve-ha-crm
systemctl start pve-ha-lrm
systemctl enable pve-ha-lrm
# Add VM to HA
ha-manager add vm:100
# Set VM as highly available
ha-manager set vm:100 --state started
# Create HA group
ha-manager groupadd group1
# Add nodes to group
ha-manager groupadd node pve1
ha-manager groupadd node pve2
# Set group priority
ha-manager groupset group1 --priority 10
# Edit VM config: /etc/pve/qemu-server/VMID.conf
# CPU type (host-passthrough for best performance)
cpu: host
# CPU cores
cores: 4
# CPU sockets
sockets: 1
# NUMA (for large VMs)
numa: 1
# Fixed memory
memory: 8192
# Or with ballooning
balloon: 0
memory: 8192
# VirtIO SCSI with SSD emulation
scsihw: virtio-scsi-single
scsi0: local-lvm:vm-100-disk-0,size=100G,ssd=1,discard=on
# Enable IO thread
iothread: 1
# VirtIO network (best performance)
net0: virtio=XX:XX:XX:XX:XX:XX,bridge=vmbr0,firewall=1
# Enable MTU for jumbo frames
net0: virtio=XX:XX:XX:XX:XX:XX,bridge=vmbr0,mtu=9000
# Via command line
vzdump --mode snapshot --storage backup-storage --schedule "0 2 * * *" all
# Or via web UI: Datacenter → Backup → Add
# Keep 7 daily, 4 weekly, 6 monthly
vzdump --mode snapshot --storage backup-storage --retention-mode last --retention-keep-last 7 --retention-keep-daily 4 --retention-keep-weekly 6 --retention-keep-monthly 6 all
# Add PBS storage
pvesm add proxmox-backup --server pbs.example.com --datastore backup --username proxmox@pbs --password PASSWORD pbs-storage
# For root user
pveum user modify root@pam --enable tfa
# Or via web UI: Datacenter → Permissions → Users → root@pam → TFA
# Enable firewall on node
pve-firewall enable
# Add firewall rule
pve-firewall ruleset add --type in --proto tcp --dport 22 --action ACCEPT
pve-firewall ruleset add --type in --proto tcp --dport 8006 --action ACCEPT
# Create API token
pveum user token add root@pam automation-token --privsep 0
# Use token for API calls
curl -H "Authorization: PVEAPIToken=root@pam!automation-token=SECRET" https://pve1:8006/api2/json/nodes
# Install smartmontools
apt install smartmontools
# Configure
smartctl -a /dev/sda
# Edit /etc/pve/datacenter.cfg
mailto: admin@example.com
# Or via web UI: Datacenter → Options → Email from
# View resource usage
pvesh get /nodes
# View specific node
pvesh get /nodes/pve1/status
# View VM status
pvesh get /nodes/pve1/qemu
Any questions?
Feel free to contact us. Find all contact information on our contact page.