This page provides a PostgreSQL configuration guide for production deployments on Debian and RHEL systems, updated for PostgreSQL 18 with current best practices.
Package layout stores configuration in:
/etc/postgresql/18/main/postgresql.conf
/etc/postgresql/18/main/pg_hba.conf
Common locations are:
/var/lib/pgsql/data/postgresql.conf
/var/lib/pgsql/data/pg_hba.conf
Or for versioned installs:
/var/lib/pgsql/18/data/postgresql.conf
/var/lib/pgsql/18/data/pg_hba.conf
# Network
listen_addresses = 'localhost,10.0.0.10' # Comma-separated list of addresses
port = 5432
max_connections = 100 # Adjust based on your workload
superuser_reserved_connections = 3
# Connection throttling
max_prepared_transactions = 0 # Use only if needed for 2PC
unix_socket_directories = '/var/run/postgresql' # Platform-specific
# Memory (adjust based on your server's RAM)
shared_buffers = 256MB # 25% of total RAM (up to 8GB)
huge_pages = try # Use huge pages if available
temp_buffers = 8MB # Per-session temp buffers
max_prepared_transactions = 0 # Set to 0 if not using prepared transactions
# Work memory settings
work_mem = 4MB # Per-operation memory (sorts, hashes)
maintenance_work_mem = 256MB # Maintenance operations (VACUUM, CREATE INDEX)
autovacuum_work_mem = -1 # -1 means unlimited, otherwise set limit
max_stack_depth = 7MB # Maximum depth of the server's execution stack
# Kernel resource usage
dynamic_shared_memory_type = posix # Valid values: posix, sysv, windows, none
# WAL configuration (optimized for PostgreSQL 18)
wal_level = replica # minimal, replica, or logical
fsync = on # Ensure WAL records are flushed to disk
synchronous_commit = on # Synchronous commit behavior
# WAL size and retention
wal_sync_method = fsync # Method to use for WAL synchronization
full_page_writes = on # Write full pages to WAL when needed
wal_compression = on # Compress WAL data (PostgreSQL 9.5+)
wal_buffers = -1 # Auto-tune (usually 1/32 of shared_buffers, min 64kB)
wal_writer_delay = 200ms # Time between WAL flushes
commit_delay = 0 # Delay in microseconds before committing
commit_siblings = 5 # Number of concurrent active sessions
# Checkpoint configuration
checkpoint_timeout = 15min # Time between checkpoints
max_wal_size = 4GB # Maximum WAL size
min_wal_size = 1GB # Minimum WAL size
checkpoint_completion_target = 0.9 # Time to spread checkpoints
checkpoint_warning = 30s # Warn if checkpoints happen too frequently
# Planner cost constants
seq_page_cost = 1.0 # Cost of a sequential page fetch
random_page_cost = 4.0 # Cost of a nonsequentially fetched page
cpu_tuple_cost = 0.01 # Cost of processing each tuple
cpu_index_tuple_cost = 0.005 # Cost of processing each index entry
cpu_operator_cost = 0.0025 # Cost of processing each operator
effective_cache_size = 2GB # Assume 2GB of OS caches
# Genetic query optimizer
geqo = on # Enable genetic query optimization
geqo_threshold = 12 # Use GEQO for queries with join trees
geqo_effort = 5 # GEQO effort level (1-10)
geqo_pool_size = 0 # Size of GEQO pool (0 = auto)
geqo_generations = 0 # Number of generations in GEQO (0 = auto)
geqo_selection_bias = 2.0 # GEQO selection bias (1.5-2.0)
geqo_seed = 0.0 # Set seed for random path (0-1)
# Other planner options
default_statistics_target = 100 # Default statistics target
constraint_exclusion = partition # Enable constraint exclusion
cursor_tuple_fraction = 0.1 # Cursor position estimate
from_collapse_limit = 8 # Number of FROM items before collapsing
join_collapse_limit = 8 # Number of JOIN items before flattening
# What to log
log_destination = 'stderr' # Valid values: stderr, csvlog, syslog
logging_collector = on # Enable capturing of stderr/stdout
log_directory = 'log' # Directory where log files are written
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # Log filename pattern
log_file_mode = 0600 # Creation mode for log files
log_truncate_on_rotation = off # Truncate existing log files
log_rotation_age = 1d # Automatic log file rotation age
log_rotation_size = 100MB # Automatic log file rotation size
# These are relevant when logging_collectors is enabled:
syslog_facility = 'LOCAL0' # Syslog "facility" to use
syslog_ident = 'postgres' # Syslog "ident" to use
syslog_sequence_numbers = on # Add sequence numbers to syslog
syslog_split_messages = on # Split long messages across multiple syslog lines
# Log lines produced by the server itself
log_min_messages = warning # Values: debug5, debug4, debug3, debug2, debug1,
# info, notice, warning, error, log, fatal, panic
log_min_error_statement = error # Values: debug5, debug4, debug3, debug2, debug1,
# info, notice, warning, error, log, fatal, panic
log_min_duration_statement = 1000 # Log statements taking longer than specified time (ms)
log_checkpoints = on # Write checkpoint-related log messages
log_connections = on # Write connection-related log messages
log_disconnections = on # Write disconnection-related log messages
log_lock_waits = on # Log lock waits longer than deadlock_timeout
log_temp_files = 0 # Log temporary file names and sizes
log_autovacuum_min_duration = 0 # Log autovacuum actions taking more than specified time
# Log-line configuration
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ' # Special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQLSTATE error code
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
log_timezone = 'UTC' # Sets log timezone
# Query/Index Statistics Collector
track_activities = on # Enable tracking of currently active queries
track_counts = on # Enable tracking of table and index hit/miss counts
track_io_timing = on # Enable timing of database I/O calls
track_functions = pl # Track usage of user-defined functions
stats_temp_directory = 'pg_stat_tmp' # Directory for temporary statistics files
# Autovacuum settings
autovacuum = on # Enable autovacuum subprocess
log_autovacuum_min_duration = 0 # Log autovacuum actions taking more than specified time
autovacuum_max_workers = 3 # Maximum number of autovacuum workers
autovacuum_naptime = 1min # Time between autovacuum runs
autovacuum_vacuum_threshold = 50 # Default vacuum threshold
autovacuum_analyze_threshold = 50 # Default analyze threshold
autovacuum_vacuum_scale_factor = 0.2 # Default vacuum scale factor
autovacuum_analyze_scale_factor = 0.1 # Default analyze scale factor
autovacuum_freeze_max_age = 200000000 # Age at which to vacuum all tables
autovacuum_multixact_freeze_max_age = 400000000 # Multixact age at which to vacuum all tables
autovacuum_vacuum_cost_delay = 20ms # Vacuum cost delay for autovacuum
autovacuum_vacuum_cost_limit = -1 # Vacuum cost limit for autovacuum (-1 = use vacuum_cost_limit)
# Statement behavior
search_path = '"$user", public' # Schema search order
default_tablespace = '' # Default tablespace to use
temp_tablespaces = '' # Tablespaces for temporary objects
check_function_bodies = on # Check function bodies during CREATE FUNCTION
default_transaction_isolation = read committed # Default isolation level
default_transaction_read_only = off # Default read-only status
default_transaction_deferrable = off # Default deferrable status
session_replication_role = origin # Session's role in replicated transactions
statement_timeout = 0 # Time out for statements (ms)
lock_timeout = 0 # Time out for lock acquisition (ms)
idle_in_transaction_session_timeout = 0 # Time out for idle transactions (ms)
vacuum_freeze_table_age = 150000000 # Age at which to vacuum table to prevent wraparound
vacuum_multixact_freeze_table_age = 150000000 # Multixact age at which to vacuum table to prevent wraparound
bytea_output = hex # Change output format for bytea
xmlbinary = base64 # Binary encoding in XML
xmloption = content # XML content option
gin_fuzzy_search_limit = 0 # Memory limit for GIN searches
# Locale and Formatting
datestyle = 'iso, mdy' # Date/time formatting
intervalstyle = postgres # Interval formatting style
timezone = 'UTC' # Timezone for timestamps
log_timezone = 'UTC' # Timezone for log timestamps
enable_parallel_hash = on # Enable parallel hash joins
jit_provider = 'llvmjit' # JIT provider to use
pg_hba.conf (Debian/RHEL)Use strict auth methods and limit client ranges.
# PostgreSQL Client Authentication Configuration File
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all postgres peer
# Type, database, user, address, authentication method
# IPv4 local connections:
host all all 127.0.0.1/32 scram-sha-256
# IPv6 local connections:
host all all ::1/128 scram-sha-256
# Allow replication connections from localhost
local replication all peer
host replication all 127.0.0.1/32 scram-sha-256
host replication all ::1/128 scram-sha-256
# Private LAN application subnet (adjust to your network)
host all appuser 10.0.0.0/24 scram-sha-256
host appdb appuser 10.0.0.0/24 scram-sha-256
# Optional replication user from private network
host replication repluser 10.0.0.0/24 scram-sha-256
# For production, avoid 'trust' method except for local administrative access
# host all all 127.0.0.1/32 trust
shared_buffers: Main PostgreSQL cache. Start with 25% of system RAM (up to 8GB). For systems with >64GB RAM, 8GB is often sufficient.effective_cache_size: Tell the query planner how much memory is available for caching. Set to ~75% of system RAM.work_mem: Memory allocated per operation (sorts, hashes). Higher values improve performance but consume more memory per connection.maintenance_work_mem: Memory for maintenance operations like VACUUM, CREATE INDEX. Can be set higher during maintenance windows.wal_level: Set to replica for streaming replication, logical if you need logical replication.max_wal_size: Larger values mean less frequent checkpoints but longer recovery times.checkpoint_completion_target: Spread checkpoint I/O over time to reduce I/O spikes.max_connections: Higher values increase memory usage. Plan accordingly and monitor connection usage.listen_addresses: Limit interfaces PostgreSQL binds to. Keep as narrow as possible for security.log_min_duration_statement: Set to identify slow queries without overwhelming logs.log_line_prefix: Include useful information for troubleshooting.Reload configuration after edits:
# Reload configuration without restarting
sudo systemctl reload postgresql
# Or reload via SQL (requires superuser)
sudo -u postgres psql -c "SELECT pg_reload_conf();"
If reload is not sufficient (or service unit is versioned), restart:
sudo systemctl restart postgresql
Validate server status and active settings:
# Check PostgreSQL version
sudo -u postgres psql -c "SELECT version();"
# Check current configuration values
sudo -u postgres psql -c "SHOW listen_addresses;"
sudo -u postgres psql -c "SHOW max_connections;"
sudo -u postgres psql -c "SHOW shared_buffers;"
sudo -u postgres psql -c "SHOW effective_cache_size;"
sudo -u postgres psql -c "SHOW wal_level;"
# Check all current settings
sudo -u postgres psql -c "SELECT name, setting, source FROM pg_settings WHERE source != 'default';"
-- Check connection usage
SELECT count(*) AS connected_clients,
sum(numbackends) AS total_backends
FROM pg_stat_database;
-- Check for long-running queries
SELECT pid, now() - pg_stat_activity.query_start AS duration,
usename, datname, query
FROM pg_stat_activity
WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes'
AND state = 'active';
-- Check table bloat
SELECT schemaname, tablename,
ROUND(CASE WHEN otta=0 THEN 0.0 ELSE table_bloat.relsize::FLOAT/otta END,1) AS tbloat,
CASE WHEN relpages < otta THEN 0 ELSE bs*(relpages-otta)::BIGINT END AS wastedbytes
FROM (
SELECT schemaname, tablename, cc.reltuples, cc.relpages,
bs,
CEIL((cc.reltuples*((datahdr+ma-
(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta
FROM (
SELECT ma,bs,schemaname,tablename,
(datawidth+(hdr+pdop-1)/pdop::FLOAT)*reltuples+(reltuples*(6+nulldatahdrwidth)/pdop::FLOAT) AS datahdr,
(maxfracsum*(idxblklen-avg_leaf_density)*(cbndrelpages-relpages)/cbndrelpages)::INT AS nulldatahdrwidth,
CEIL((reltuples*(hdr+datahdr-1))/(bs-20::FLOAT)) AS relpages
FROM (
SELECT
schemaname, tablename, hdr, ma, bs,
SUM((1-null_frac)*avg_width) AS datawidth,
MAX(null_frac) AS maxfracsum,
reltuples, relpages, corpc.relpages AS cbndrelpages,
(corpc.reltuples-corpc.relpages)/(corpc.reltuples-cbndrelpages) AS avg_leaf_density,
tp.hdr AS idxblklen,
tp.ma AS pdop
FROM pg_stats s
JOIN pg_class cc ON cc.relname = s.tablename
JOIN pg_class corpc ON corpc.relname = s.tablename
JOIN (
SELECT (current_setting('block_size')::INTEGER) AS bs,
substring(version() FROM '(\d+)\.'::TEXT)::INTEGER AS version,
CASE WHEN substring(version() FROM '(\d+)\.'::TEXT)::INTEGER >= 11
THEN 27 ELSE 23 END AS hdr,
CASE WHEN substring(version() FROM '(\d+)\.'::TEXT)::INTEGER >= 11
THEN 8 ELSE 4 END AS ma
) AS tp ON true
WHERE schemaname NOT IN ('information_schema', 'pg_catalog')
GROUP BY 1,2,3,4,5,6,7,8,9,10,11
) AS s
) AS b
) AS table_bloat;
5432 only from trusted private networks.# Use pgtune to generate optimized configurations based on your hardware
# sudo apt install pgtune # or equivalent for your distro
# Online configuration calculators
# https://pgtune.leopard.in.ua/ - Web-based PostgreSQL configuration generator
# Check configuration with pg_config
pg_config --version
pg_config --sharedir
pg_config --pkglibdir