Automated deployment of LangChain applications using Ansible.
This playbook installs Python, creates a virtual environment, and deploys a LangChain application.
Create inventory.ini:
[langchain_servers]
langchain-prod-01 ansible_host=192.168.1.100
langchain-prod-02 ansible_host=192.168.1.101
[langchain_servers:vars]
ansible_user=ubuntu
ansible_python_interpreter=/usr/bin/python3
Create langchain.yml:
---
- name: Deploy LangChain Application
hosts: langchain_servers
become: true
vars:
app_name: langchain
app_dir: /opt/langchain
python_version: "3.11"
openai_api_key: "{{ vault_openai_api_key }}"
langchain_api_key: "{{ vault_langchain_api_key }}"
tasks:
- name: Install system dependencies
apt:
name:
- python3
- python3-pip
- python3-venv
- python3-dev
- git
- build-essential
state: present
update_cache: yes
- name: Create application directory
file:
path: "{{ app_dir }}"
state: directory
mode: '0755'
- name: Create subdirectories
file:
path: "{{ item }}"
state: directory
mode: '0755'
loop:
- "{{ app_dir }}/src"
- "{{ app_dir }}/data"
- "{{ app_dir }}/output"
- "{{ app_dir }}/logs"
- name: Create Python virtual environment
command: python3 -m venv {{ app_dir }}/venv
args:
creates: {{ app_dir }}/venv/bin/activate
- name: Upgrade pip
pip:
name: pip
state: latest
virtualenv: "{{ app_dir }}/venv"
- name: Install LangChain
pip:
name:
- langchain
- langchain-core
- langchain-community
- langchain-openai
- python-dotenv
- chromadb
- fastapi
- uvicorn
virtualenv: "{{ app_dir }}/venv"
- name: Create .env file
copy:
dest: "{{ app_dir }}/.env"
content: |
OPENAI_API_KEY={{ openai_api_key }}
LANGCHAIN_API_KEY={{ langchain_api_key }}
LANGCHAIN_TRACING_V2=true
LANGCHAIN_PROJECT=production
mode: '0600'
- name: Create application script
copy:
dest: "{{ app_dir }}/src/app.py"
content: |
#!/usr/bin/env python3
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
load_dotenv()
llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant"),
("human", "{question}")
])
chain = prompt | llm | StrOutputParser()
def main():
question = "What is LangChain?"
response = chain.invoke({"question": question})
print(f"Q: {question}")
print(f"A: {response}")
if __name__ == "__main__":
main()
mode: '0755'
- name: Create FastAPI service
copy:
dest: "{{ app_dir }}/src/api.py"
content: |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
app = FastAPI()
llm = ChatOpenAI(model="gpt-4o")
class QueryRequest(BaseModel):
question: str
class QueryResponse(BaseModel):
answer: str
@app.post("/query", response_model=QueryResponse)
async def query(request: QueryRequest) -> QueryResponse:
try:
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant"),
("human", "{question}")
])
chain = prompt | llm | StrOutputParser()
response = chain.invoke({"question": request.question})
return QueryResponse(answer=response.content)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health():
return {"status": "healthy"}
mode: '0644'
- name: Create systemd service
copy:
dest: /etc/systemd/system/langchain.service
content: |
[Unit]
Description=LangChain Application
After=network.target
[Service]
Type=simple
User=www-data
Group=www-data
WorkingDirectory={{ app_dir }}
Environment="PATH={{ app_dir }}/venv/bin"
EnvironmentFile={{ app_dir }}/.env
ExecStart={{ app_dir }}/venv/bin/python {{ app_dir }}/src/app.py
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
notify:
- Restart LangChain
- name: Enable and start service
systemd:
name: langchain
enabled: yes
state: started
daemon_reload: yes
handlers:
- name: Restart LangChain
systemd:
name: langchain
state: restarted
daemon_reload: yes
# Basic run
ansible-playbook -i inventory.ini langchain.yml
# With vault secrets
ansible-playbook -i inventory.ini langchain.yml --ask-vault-pass
# Limit to specific host
ansible-playbook -i inventory.ini langchain.yml --limit langchain-prod-01
# Dry run (check mode)
ansible-playbook -i inventory.ini langchain.yml --check
Create secrets file:
ansible-vault create group_vars/all/vault.yml
Add your secrets:
vault_openai_api_key: "sk-your-openai-api-key"
vault_langchain_api_key: "your-langsmith-key"
# Check service status
ansible -i inventory.ini langchain_servers -m systemd -a "name=langchain state=started"
# View logs
ansible -i inventory.ini langchain_servers -m shell -a "journalctl -u langchain -n 20"
# Test execution
ansible -i inventory.ini langchain_servers -m shell -a "{{ app_dir }}/venv/bin/python {{ app_dir }}/src/app.py"
# Test API
ansible -i inventory.ini langchain_servers -m uri -a "url=http://localhost:8000/health method=GET"
- name: Rolling update LangChain
hosts: langchain_servers
become: true
serial: 1 # Update one server at a time
tasks:
- name: Stop service
systemd:
name: langchain
state: stopped
- name: Update pip packages
pip:
name:
- langchain
- langchain-core
- langchain-openai
state: latest
virtualenv: "{{ app_dir }}/venv"
- name: Start service
systemd:
name: langchain
state: started
- name: Verify service
systemd:
name: langchain
state: started
register: result
retries: 3
delay: 10
until: result.status.ActiveState == "active"
- name: Backup LangChain configuration
hosts: langchain_servers
become: true
tasks:
- name: Create backup directory
file:
path: "{{ app_dir }}/backups"
state: directory
- name: Backup config files
archive:
path:
- "{{ app_dir }}/.env"
- "{{ app_dir }}/src"
dest: "{{ app_dir }}/backups/config-{{ ansible_date_time.date }}.tar.gz"
- name: Deploy LangChain FastAPI service
hosts: langchain_servers
become: true
vars:
api_port: 8000
tasks:
- name: Create API systemd service
copy:
dest: /etc/systemd/system/langchain-api.service
content: |
[Unit]
Description=LangChain FastAPI Service
After=network.target
[Service]
Type=simple
User=www-data
WorkingDirectory={{ app_dir }}
Environment="PATH={{ app_dir }}/venv/bin"
EnvironmentFile={{ app_dir }}/.env
ExecStart={{ app_dir }}/venv/bin/uvicorn src.api:app --host 0.0.0.0 --port {{ api_port }}
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
notify:
- Restart LangChain API
- name: Enable and start API service
systemd:
name: langchain-api
enabled: yes
state: started
daemon_reload: yes
handlers:
- name: Restart LangChain API
systemd:
name: langchain-api
state: restarted
daemon_reload: yes
ansible -i inventory.ini langchain_servers \
-m systemd -a "name=langchain"
ansible -i inventory.ini langchain_servers \
-m shell -a "journalctl -u langchain --since '1 hour ago'"
ansible -i inventory.ini langchain_servers \
-m systemd -a "name=langchain state=restarted"
ansible -i inventory.ini langchain_servers \
-m shell -a "{{ app_dir }}/venv/bin/pip list | grep langchain"
ansible -i inventory.ini langchain_servers \
-m uri -a "url=http://localhost:8000/health method=GET return_content=yes"
See the main LangChain Setup guide for more details.