Automated deployment of LlamaIndex using Ansible.
This playbook installs Python, creates a virtual environment, and deploys a LlamaIndex application.
Create inventory.ini:
[llama_index_servers]
llama-server-01 ansible_host=192.168.1.100
[llama_index_servers:vars]
ansible_user=ubuntu
ansible_python_interpreter=/usr/bin/python3
Create llama-index.yml:
---
- name: Deploy LlamaIndex Application
hosts: llama_index_servers
become: true
vars:
app_name: llama-index
app_dir: /opt/llama-index
python_version: "3.11"
openai_api_key: "{{ vault_openai_api_key }}"
tasks:
- name: Install system dependencies
apt:
name:
- python3
- python3-pip
- python3-venv
- git
state: present
update_cache: yes
- name: Create application directory
file:
path: "{{ app_dir }}"
state: directory
mode: '0755'
- name: Create data directory
file:
path: "{{ app_dir }}/data"
state: directory
mode: '0755'
- name: Create storage directory
file:
path: "{{ app_dir }}/storage"
state: directory
mode: '0755'
- name: Create Python virtual environment
command: python3 -m venv {{ app_dir }}/venv
args:
creates: {{ app_dir }}/venv/bin/activate
- name: Install LlamaIndex
pip:
name:
- llama-index
- llama-index-llms-openai
- llama-index-embeddings-openai
virtualenv: "{{ app_dir }}/venv"
- name: Create application script
copy:
dest: "{{ app_dir }}/app.py"
content: |
import os
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
os.environ["OPENAI_API_KEY"] = "{{ openai_api_key }}"
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir="./storage")
query_engine = index.as_query_engine()
response = query_engine.query("What is this document about?")
print(response)
- name: Create systemd service
copy:
dest: /etc/systemd/system/llama-index.service
content: |
[Unit]
Description=LlamaIndex Application
After=network.target
[Service]
Type=simple
User=www-data
WorkingDirectory={{ app_dir }}
ExecStart={{ app_dir }}/venv/bin/python {{ app_dir }}/app.py
Restart=always
[Install]
WantedBy=multi-user.target
notify:
- Restart LlamaIndex
- name: Enable and start service
systemd:
name: llama-index
enabled: yes
state: started
daemon_reload: yes
handlers:
- name: Restart LlamaIndex
systemd:
name: llama-index
state: restarted
daemon_reload: yes
# Basic run
ansible-playbook -i inventory.ini llama-index.yml
# With vault secrets
ansible-playbook -i inventory.ini llama-index.yml --ask-vault-pass
Create secrets file:
ansible-vault create group_vars/all/vault.yml
Add your API key:
vault_openai_api_key: "sk-your-openai-api-key"
# Check service status
ansible -i inventory.ini llama_index_servers -m systemd -a "name=llama-index state=started"
# View logs
ansible -i inventory.ini llama_index_servers -m shell -a "journalctl -u llama-index -n 20"
See the main LlamaIndex Setup guide for more details.