This guide provides a full Ansible playbook to deploy Local Deep Research with Docker Compose on Debian 10+, Ubuntu LTS, and RHEL 9+ compatible hosts.
- name: Deploy Local Deep Research
hosts: local-deep-research
become: true
vars:
app_root: /opt/local-deep-research
app_port: 5000
data_dir: /srv/local-deep-research
ollama_model: gemma3:12b
tasks:
- name: Install Docker on Debian/Ubuntu
apt:
name:
- docker.io
- docker-compose-plugin
state: present
update_cache: true
when: ansible_os_family == "Debian"
- name: Install Docker on RHEL family
dnf:
name:
- docker
- docker-compose-plugin
state: present
when: ansible_os_family == "RedHat"
- name: Enable and start Docker
service:
name: docker
state: started
enabled: true
- name: Create application directory
file:
path: "{{ app_root }}"
state: directory
mode: "0755"
- name: Create data directory
file:
path: "{{ data_dir }}"
state: directory
mode: "0750"
owner: root
group: docker
- name: Write Docker Compose file
copy:
dest: "{{ app_root }}/docker-compose.yml"
mode: "0644"
content: |
services:
local-deep-research:
image: localdeepresearch/local-deep-research:latest
restart: unless-stopped
ports:
- "{{ app_port }}:5000"
environment:
- LDR_DATA_DIR=/data
- LDR_LLM_OLLAMA_URL=http://ollama:11434
- LDR_SEARCH_ENGINE_WEB_SEARXNG_DEFAULT_PARAMS_INSTANCE_URL=http://searxng:8080
- LDR_APP_ALLOW_REGISTRATIONS=true
volumes:
- ldr_data:/data
depends_on:
ollama:
condition: service_healthy
searxng:
condition: service_started
ollama:
image: ollama/ollama:latest
restart: unless-stopped
environment:
- OLLAMA_KEEP_ALIVE=30m
volumes:
- ollama_data:/root/.ollama
healthcheck:
test: ["CMD", "ollama", "show", "{{ ollama_model }}"]
interval: 10s
timeout: 5s
retries: 2
start_period: 10m
searxng:
image: searxng/searxng:latest
restart: unless-stopped
volumes:
- searxng_data:/etc/searxng
volumes:
ldr_data:
ollama_data:
searxng_data:
- name: Start application stack
command: docker compose up -d
args:
chdir: "{{ app_root }}"
- name: Wait for service to be ready
uri:
url: "http://localhost:{{ app_port }}"
status_code: 200
register: result
retries: 30
delay: 5
until: result.status == 200
For GPU-accelerated inference, add the GPU override compose file:
- name: Write GPU override Compose file
copy:
dest: "{{ app_root }}/docker-compose.gpu.override.yml"
mode: "0644"
content: |
services:
ollama:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
Then start with:
docker compose -f docker-compose.yml -f docker-compose.gpu.override.yml up -d
Run the playbook:
ansible-playbook -i inventory.ini deploy-local-deep-research.yml
Access the application at http://your-server:5000.
app_port variable)ollama_model variable as needed)ldr_data, ollama_data, searxng_data)| Variable | Description | Default |
|---|---|---|
app_root |
Installation directory | /opt/local-deep-research |
app_port |
External port | 5000 |
data_dir |
Host data directory | /srv/local-deep-research |
ollama_model |
Ollama model to use | gemma3:12b |
Any questions?
Feel free to contact us. Find all contact information on our contact page.