Deploy Microsoft AutoGen applications using Docker.
Note: Microsoft AutoGen does not provide an official Docker image. You can create a custom Dockerfile for your application.
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Create coding directory for code execution
RUN mkdir -p /app/coding
# Set environment variables
ENV PYTHONUNBUFFERED=1
# Default command
CMD ["python", "src/main.py"]
autogen-agentchat>=0.7.0
autogen-ext[openai]>=0.7.0
python-dotenv>=1.0.0
docker build -t autogen-app .
docker run -it --rm \
-e OPENAI_API_KEY="sk-your-key" \
-v $(pwd)/output:/app/output \
-v $(pwd)/coding:/app/coding \
autogen-app
version: "3.9"
services:
autogen:
build: .
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- ./output:/app/output
- ./coding:/app/coding
stdin_open: true
tty: true
# Create .env file
cat > .env << EOF
OPENAI_API_KEY=sk-your-key
EOF
# Start container
docker compose up -d
# View logs
docker compose logs -f
# Run interactively
docker compose run --rm autogen python src/main.py
# Build stage
FROM python:3.11-slim as builder
WORKDIR /app
RUN apt-get update && apt-get install -y \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --user --no-cache-dir -r requirements.txt
# Runtime stage
FROM python:3.11-slim
WORKDIR /app
# Create non-root user
RUN useradd -m -u 1000 autogen
# Copy from builder
COPY --from=builder /root/.local /home/autogen/.local
COPY --chown=autogen:autogen . .
# Create coding directory
RUN mkdir -p /app/coding && chown autogen:autogen /app/coding
# Set environment
ENV PATH=/home/autogen/.local/bin:$PATH
ENV PYTHONUNBUFFERED=1
USER autogen
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD python -c "print('healthy')" || exit 1
CMD ["python", "src/main.py"]
version: "3.9"
services:
autogen:
build: .
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- AUTOGEN_USE_DOCKER=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./output:/app/output
- ./coding:/app/coding
depends_on:
- dind
dind:
image: docker:dind
privileged: true
environment:
- DOCKER_TLS_CERTDIR=
volumes:
- dind-data:/var/lib/docker
volumes:
dind-data:
FROM python:3.11-slim
WORKDIR /app
RUN apt-get update && apt-get install -y \
git \
docker.io \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
RUN mkdir -p /app/coding
ENV PYTHONUNBUFFERED=1
ENV AUTOGEN_USE_DOCKER=true
CMD ["python", "src/main.py"]
version: "3.9"
services:
ollama:
image: ollama/ollama:latest
ports:
- "11434:11434"
volumes:
- ollama-data:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
autogen:
build: .
environment:
- OLLAMA_BASE_URL=http://ollama:11434
- MODEL_NAME=llama3.1
depends_on:
- ollama
volumes:
- ./output:/app/output
- ./coding:/app/coding
volumes:
ollama-data:
from autogen_ext.models.ollama import OllamaChatCompletionClient
model_client = OllamaChatCompletionClient(
model="llama3.1",
base_url="http://ollama:11434"
)
src/api.pyfrom fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import asyncio
from autogen_agentchat.agents import AssistantAgent
from autogen_ext.models.openai import OpenAIChatCompletionClient
app = FastAPI()
class TaskRequest(BaseModel):
task: str
model: str = "gpt-4o"
class TaskResponse(BaseModel):
result: str
@app.post("/run", response_model=TaskResponse)
async def run_task(request: TaskRequest) -> TaskResponse:
try:
model_client = OpenAIChatCompletionClient(model=request.model)
agent = AssistantAgent(
"assistant",
model_client=model_client
)
result = await agent.run(task=request.task)
await model_client.close()
return TaskResponse(result=str(result))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health():
return {"status": "healthy"}
FROM python:3.11-slim
WORKDIR /app
RUN apt-get update && apt-get install -y git
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["uvicorn", "src.api:app", "--host", "0.0.0.0", "--port", "8000"]
autogen-agentchat>=0.7.0
autogen-ext[openai]>=0.7.0
fastapi==0.115.0
uvicorn==0.32.0
python-dotenv>=1.0.0
docker build -t autogen-api .
docker run -d -p 8000:8000 -e OPENAI_API_KEY=sk-... autogen-api
version: "3.9"
services:
autogen-studio:
image: ghcr.io/microsoft/autogen-studio:latest
ports:
- "8080:8080"
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- autogen-studio-data:/home/autogen/.autogenstudio
volumes:
autogen-studio-data:
docker compose up -d
Access at: http://localhost:8080
apiVersion: apps/v1
kind: Deployment
metadata:
name: autogen-deployment
spec:
replicas: 2
selector:
matchLabels:
app: autogen
template:
metadata:
labels:
app: autogen
spec:
containers:
- name: autogen
image: your-registry/autogen-app:latest
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: autogen-secrets
key: openai-api-key
volumeMounts:
- name: coding-volume
mountPath: /app/coding
volumes:
- name: coding-volume
emptyDir: {}
apiVersion: batch/v1
kind: Job
metadata:
name: autogen-job
spec:
template:
spec:
containers:
- name: autogen
image: your-registry/autogen-app:latest
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: autogen-secrets
key: openai-api-key
restartPolicy: Never
backoffLimit: 1
# Check logs
docker logs autogen-app
# Run interactively for debugging
docker run -it --rm autogen-app /bin/bash
# Verify coding directory permissions
docker exec autogen-app ls -la /app/coding
# Check Docker socket mount
docker exec autogen-app docker ps
# Verify environment variable is set
docker run --rm autogen-app env | grep OPENAI