The ELK Stack (Elasticsearch, Logstash, Kibana) is a powerful log aggregation and analytics platform. Security must cover all three components plus the Beats data shippers. This guide covers security measures for production ELK Stack deployments.
ELK Stack architecture includes multiple security boundaries:
Key security concerns include authentication, authorization, data encryption, API security, and index-level access control.
Configure firewall rules for ELK components:
# Elasticsearch
ufw allow from 10.0.0.0/8 to any port 9200 proto tcp # HTTP API
ufw allow from 10.0.0.0/8 to any port 9300 proto tcp # Transport (cluster)
# Kibana
ufw allow from 10.0.0.0/8 to any port 5601 proto tcp
# Logstash
ufw allow from 10.0.0.0/8 to any port 5044 proto tcp # Beats input
ufw allow from 10.0.0.0/8 to any port 5000 proto tcp # TCP input
ufw allow from 10.0.0.0/8 to any port 5000 proto udp # UDP input
# Block external access
ufw deny from any to any port 9200 proto tcp
ufw deny from any to any port 9300 proto tcp
ufw deny from any to any port 5601 proto tcp
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: elasticsearch-network-policy
spec:
podSelector:
matchLabels:
app: elasticsearch
ingress:
- from:
- podSelector:
matchLabels:
app: kibana
- podSelector:
matchLabels:
app: logstash
ports:
- protocol: TCP
port: 9200
- protocol: TCP
port: 9300
Configure Elasticsearch binding:
# /etc/elasticsearch/elasticsearch.yml
network.host: 10.0.1.100
http.port: 9200
transport.port: 9300
# Discovery settings
discovery.seed_hosts: ["10.0.1.100", "10.0.1.101", "10.0.1.102"]
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]
Configure Kibana binding:
# /etc/kibana/kibana.yml
server.host: "127.0.0.1"
server.port: 5601
server.name: "kibana"
elasticsearch.hosts: ["https://localhost:9200"]
Configure native Elasticsearch users:
# Enable native realm
# /etc/elasticsearch/elasticsearch.yml
xpack.security.enabled: true
xpack.security.authc.realms.native.native1:
order: 0
# Create users with roles
bin/elasticsearch-users useradd admin -p ${ADMIN_PASSWORD} -r superuser
bin/elasticsearch-users useradd kibana_system -p ${KIBANA_PASSWORD} -r kibana_system
bin/elasticsearch-users useradd logstash_system -p ${LOGSTASH_PASSWORD} -r logstash_system
bin/elasticsearch-users userapp add monitoring_user -p ${MONITORING_PASSWORD} -r monitoring_user
Configure LDAP authentication:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.authc.realms.ldap.ldap1:
order: 1
url: "ldaps://ldap.company.com:636"
bind_dn: "cn=elasticsearch,ou=services,dc=company,dc=com"
bind_password: ${LDAP_BIND_PASSWORD}
user_search:
base_dn: "ou=users,dc=company,dc=com"
filter: "(objectClass=inetOrgPerson)"
attribute: "uid"
group_search:
base_dn: "ou=groups,dc=company,dc=com"
filter: "(objectClass=groupOfNames)"
attribute: "cn"
Define custom roles:
# /etc/elasticsearch/roles.yml
monitoring_role:
cluster: ["monitor"]
indices:
- names: [ ".monitoring-*" ]
privileges: ["read", "view_index_metadata"]
log_writer:
cluster: ["monitor", "manage_index_templates"]
indices:
- names: [ "logs-*", "metrics-*" ]
privileges: ["create_index", "write", "read"]
log_reader:
cluster: ["monitor"]
indices:
- names: [ "logs-*", "metrics-*" ]
privileges: ["read", "view_index_metadata"]
kibana_user:
cluster: ["monitor"]
indices:
- names: [ ".kibana*", "logs-*", "metrics-*" ]
privileges: ["read", "write", "manage"]
Generate API keys for services:
# Create API key for Logstash
curl -X POST -u elastic:${ELASTIC_PASSWORD} \
-H "Content-Type: application/json" \
https://localhost:9200/_security/api_key \
-d '{
"name": "logstash-api-key",
"role_descriptors": {
"logstash_writer": {
"cluster": ["monitor", "manage_index_templates"],
"index": [
{
"names": ["logs-*"],
"privileges": ["write", "create_index"]
}
]
}
}
}'
Configure SAML authentication:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.authc.realms.saml.saml1:
order: 2
idp.metadata.path: /etc/elasticsearch/saml-metadata.xml
idp.entity_id: "https://sso.company.com/saml"
sp.entity_id: "https://elasticsearch.company.com"
sp.acs: "https://elasticsearch.company.com/_security/saml/acs"
sp.logout: "https://elasticsearch.company.com/_security/saml/logout"
groups.attribute: "groups"
Configure Kibana SAML:
# /etc/kibana/kibana.yml
xpack.security.authc.providers:
saml.saml1:
order: 0
realm: saml1
basic.basic1:
order: 1
Configure TLS for Elasticsearch:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.certificate: /etc/elasticsearch/certs/http.crt
xpack.security.http.ssl.key: /etc/elasticsearch/certs/http.key
xpack.security.http.ssl.certificate_authorities: [ "/etc/elasticsearch/certs/ca.crt" ]
xpack.security.http.ssl.verification_mode: certificate
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.certificate: /etc/elasticsearch/certs/transport.crt
xpack.security.transport.ssl.key: /etc/elasticsearch/certs/transport.key
xpack.security.transport.ssl.certificate_authorities: [ "/etc/elasticsearch/certs/ca.crt" ]
Configure TLS for Kibana:
# /etc/kibana/kibana.yml
server.ssl.enabled: true
server.ssl.certificate: /etc/kibana/certs/kibana.crt
server.ssl.key: /etc/kibana/certs/kibana.key
server.ssl.certificateAuthorities: [ "/etc/kibana/certs/ca.crt" ]
elasticsearch.ssl.certificateAuthorities: [ "/etc/kibana/certs/ca.crt" ]
elasticsearch.ssl.verificationMode: certificate
Configure TLS for Logstash inputs:
# /etc/logstash/conf.d/01-beats-input.conf
input {
beats {
port => 5044
ssl => true
ssl_certificate => "/etc/logstash/certs/logstash.crt"
ssl_key => "/etc/logstash/certs/logstash.key"
ssl_certificate_authorities => ["/etc/logstash/certs/ca.crt"]
ssl_verification_mode => "force"
}
}
Generate certificates using Elasticsearch tools:
# Generate CA
bin/elasticsearch-certutil ca --out /etc/elasticsearch/certs/ca.zip
# Generate HTTP certificate
bin/elasticsearch-certutil cert --ca /etc/elasticsearch/certs/ca.crt \
--ca-key /etc/elasticsearch/certs/ca.key \
--name node-1 \
--dns node-1.company.com \
--ip 10.0.1.100 \
--out /etc/elasticsearch/certs/node-1.zip
Restrict Elasticsearch API access:
| Endpoint | Risk Level | Access Control |
|---|---|---|
GET /_cluster/health |
Low | Monitoring role |
GET /_cat/indices |
Low | Read roles |
POST /{index}/_doc |
Medium | Write roles |
DELETE /{index} |
High | Admin only |
PUT /_cluster/settings |
Critical | Superuser only |
POST /_security/user |
Critical | Security admin only |
GET /.kibana/_doc |
Medium | Kibana users |
Configure index-level permissions:
# Role with index patterns
logs_analyst:
cluster: ["monitor"]
indices:
- names: [ "logs-app-*", "logs-web-*" ]
privileges: ["read", "search"]
field_security:
grant: ["@timestamp", "message", "level", "service"]
query: '{"term": {"environment": "production"}}'
Restrict access to sensitive fields:
# Role with field security
limited_viewer:
cluster: ["monitor"]
indices:
- names: [ "logs-*" ]
privileges: ["read"]
field_security:
grant: ["@timestamp", "message", "host.name"]
except: ["user.password", "api.key", "secret.*"]
Implement document-level access control:
# Role with document query
tenant_a_viewer:
cluster: ["monitor"]
indices:
- names: [ "logs-*" ]
privileges: ["read"]
query: '{"term": {"tenant": "tenant_a"}}'
Configure Kibana Spaces for multi-tenancy:
# Create space via API
curl -X POST -u admin:${PASSWORD} \
-H "kbn-xsrf: true" \
-H "Content-Type: application/json" \
https://kibana.company.com/api/spaces/space \
-d '{
"id": "tenant-a",
"name": "Tenant A",
"description": "Space for Tenant A",
"disabledFeatures": ["management", "dev_tools"]
}'
Enable Elasticsearch encryption at rest:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.encryption_key: ${ENCRYPTION_KEY_32_CHARS}
xpack.encrypted_saved_objects.encryptionKey: ${SAVED_OBJECTS_KEY_32_CHARS}
xpack.reporting.encryptionKey: ${REPORTING_KEY_32_CHARS}
Configure filesystem encryption:
# Use encrypted filesystem for data directory
# /etc/elasticsearch/elasticsearch.yml
path.data: /encrypted/elasticsearch/data
path.logs: /encrypted/elasticsearch/logs
Secure sensitive configuration:
# Use Elasticsearch keystore
bin/elasticsearch-keystore create
# Add secrets
bin/elasticsearch-keystore add xpack.notification.slack.account.monitoring.secure_url
bin/elasticsearch-keystore add xpack.security.authc.realms.ldap.ldap1.bind_password
For Kubernetes:
apiVersion: v1
kind: Secret
metadata:
name: elasticsearch-secrets
type: Opaque
data:
elastic-password: <base64-encoded>
encryption-key: <base64-encoded>
Configure index lifecycle management:
PUT _ilm/policy/logs-policy
{
"policy": {
"phases": {
"hot": {
"min_age": "0ms",
"actions": {
"rollover": {
"max_size": "50gb",
"max_age": "7d"
}
}
},
"warm": {
"min_age": "7d",
"actions": {
"shrink": { "number_of_shards": 1 },
"forcemerge": { "max_num_segments": 1 }
}
},
"cold": {
"min_age": "30d",
"actions": {
"freeze": {}
}
},
"delete": {
"min_age": "90d",
"actions": {
"delete": {}
}
}
}
}
}
Enable Elasticsearch audit logging:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.audit.enabled: true
xpack.security.audit.logfile.events.include:
- access_denied
- access_granted
- anonymous_access_denied
- authentication_failed
- authentication_success
- connection_denied
- tampered_request
- run_as_denied
- run_as_granted
Configure audit log settings:
# /etc/elasticsearch/elasticsearch.yml
xpack.security.audit.logfile.events.exclude:
- authentication_success
xpack.security.audit.logfile.deprecation:
enabled: true
Enable Kibana audit logging:
# /etc/kibana/kibana.yml
xpack.security.audit.enabled: true
xpack.security.audit.appender.file.path: /var/log/kibana/audit.log
xpack.security.audit.appender.file.layout.pattern: "%d{ISO8601} [%c{1}] [%level] %m%n"
Create monitoring alerts:
PUT .monitoring-alert-7/.doc/security-alerts
{
"alerts": [
{
"name": "Failed Login Attempts",
"condition": {
"script": {
"source": "ctx.payload.hits.total.value > 10"
}
},
"query": {
"bool": {
"filter": [
{ "term": { "event.action": "authentication_failed" } }
]
}
}
},
{
"name": "Privilege Escalation Attempt",
"condition": {
"script": {
"source": "ctx.payload.hits.total.value > 0"
}
},
"query": {
"bool": {
"filter": [
{ "term": { "event.action": "run_as_denied" } }
]
}
}
}
]
}
Forward audit logs to external SIEM:
# Logstash configuration for audit forwarding
output {
syslog {
host => "siem.company.com"
port => 514
protocol => "tcp"
facility => "local0"
}
}