Deploy LangChain applications using Docker.
Note: LangChain does not provide an official Docker image. You can create a custom Dockerfile for your application.
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Set environment variables
ENV PYTHONUNBUFFERED=1
# Default command
CMD ["python", "app.py"]
langchain==1.2.10
langchain-core==0.3.0
langchain-community==0.3.0
langchain-openai==0.3.0
python-dotenv>=1.0.0
chromadb>=0.5.0
docker build -t langchain-app .
docker run -it --rm \
-e OPENAI_API_KEY="sk-your-key" \
-e LANGCHAIN_API_KEY="your-langsmith-key" \
-v $(pwd)/data:/app/data \
langchain-app
version: "3.9"
services:
langchain:
build: .
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY}
- LANGCHAIN_TRACING_V2=true
volumes:
- ./data:/app/data
- ./output:/app/output
stdin_open: true
tty: true
# Create .env file
cat > .env << EOF
OPENAI_API_KEY=sk-your-key
LANGCHAIN_API_KEY=your-langsmith-key
EOF
# Start container
docker compose up -d
# View logs
docker compose logs -f
# Run interactively
docker compose run --rm langchain python app.py
# Build stage
FROM python:3.11-slim as builder
WORKDIR /app
RUN apt-get update && apt-get install -y \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --user --no-cache-dir -r requirements.txt
# Runtime stage
FROM python:3.11-slim
WORKDIR /app
# Create non-root user
RUN useradd -m -u 1000 langchain
# Copy from builder
COPY --from=builder /root/.local /home/langchain/.local
COPY --chown=langchain:langchain . .
# Set environment
ENV PATH=/home/langchain/.local/bin:$PATH
ENV PYTHONUNBUFFERED=1
USER langchain
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD python -c "print('healthy')" || exit 1
CMD ["python", "app.py"]
version: "3.9"
services:
langchain:
build: .
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- CHROMA_DB_HOST=chroma
- CHROMA_DB_PORT=8000
depends_on:
- chroma
volumes:
- ./output:/app/output
chroma:
image: chromadb/chroma:latest
ports:
- "8000:8000"
volumes:
- chroma-data:/chroma/chroma
command: --persist-directory /chroma/chroma
volumes:
chroma-data:
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma(
client_settings=chromadb.Settings(
chroma_api_impl="rest",
chroma_server_host="chroma",
chroma_server_http_port="8000"
),
collection_name="my_collection",
embedding_function=embeddings
)
version: "3.9"
services:
ollama:
image: ollama/ollama:latest
ports:
- "11434:11434"
volumes:
- ollama-data:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
langchain:
build: .
environment:
- OPENAI_API_KEY=ollama
- OPENAI_API_BASE=http://ollama:11434/v1
- MODEL_NAME=llama3.1
depends_on:
- ollama
volumes:
- ./output:/app/output
volumes:
ollama-data:
from langchain_ollama import ChatOllama
llm = ChatOllama(
model="llama3.1",
base_url="http://ollama:11434"
)
version: "3.9"
services:
langchain:
build: .
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY}
- DATABASE_URL=postgresql://langchain:password@postgres:5432/langchain
depends_on:
- postgres
postgres:
image: postgres:15-alpine
environment:
POSTGRES_USER: langchain
POSTGRES_PASSWORD: password
POSTGRES_DB: langchain
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "5432:5432"
volumes:
postgres-data:
apiVersion: apps/v1
kind: Deployment
metadata:
name: langchain-deployment
spec:
replicas: 2
selector:
matchLabels:
app: langchain
template:
metadata:
labels:
app: langchain
spec:
containers:
- name: langchain
image: your-registry/langchain-app:latest
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: langchain-secrets
key: openai-api-key
- name: LANGCHAIN_API_KEY
valueFrom:
secretKeyRef:
name: langchain-secrets
key: langsmith-key
volumeMounts:
- name: data-volume
mountPath: /app/data
volumes:
- name: data-volume
persistentVolumeClaim:
claimName: langchain-data-pvc
apiVersion: batch/v1
kind: Job
metadata:
name: langchain-job
spec:
template:
spec:
containers:
- name: langchain
image: your-registry/langchain-app:latest
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: langchain-secrets
key: openai-api-key
restartPolicy: Never
backoffLimit: 1
api.pyfrom fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
app = FastAPI()
llm = ChatOpenAI(model="gpt-4o")
class QueryRequest(BaseModel):
question: str
class QueryResponse(BaseModel):
answer: str
@app.post("/query", response_model=QueryResponse)
async def query(request: QueryRequest) -> QueryResponse:
try:
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant"),
("human", "{question}")
])
chain = prompt | llm
response = chain.invoke({"question": request.question})
return QueryResponse(answer=response.content)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health():
return {"status": "healthy"}
FROM python:3.11-slim
WORKDIR /app
RUN apt-get update && apt-get install -y git
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "8000"]
langchain==1.2.10
langchain-openai==0.3.0
fastapi==0.115.0
uvicorn==0.32.0
python-dotenv>=1.0.0
docker build -t langchain-api .
docker run -d -p 8000:8000 -e OPENAI_API_KEY=sk-... langchain-api
# Check logs
docker logs langchain-app
# Run interactively for debugging
docker run -it --rm langchain-app /bin/bash
# In docker-compose.yml
services:
langchain:
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 2G
# Verify environment variable is set
docker run --rm langchain-app env | grep OPENAI