Deploy Chainlit as a standalone Python application.
mkdir chainlit-app
cd chainlit-app
mkdir -p public
python -m venv venv
source venv/bin/activate # Linux/macOS
# or
venv\Scripts\activate # Windows
Create requirements.txt:
chainlit==2.10.0
python-dotenv>=1.0.0
langchain>=0.3.0
langchain-openai>=0.3.0
Install:
pip install -r requirements.txt
Create app.py:
import chainlit as cl
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
load_dotenv()
@cl.on_chat_start
async def on_chat_start():
"""Initialize the chat session"""
# Create LLM chain
llm = ChatOpenAI(model="gpt-4o", temperature=0.7)
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("human", "{input}")
])
chain = prompt | llm | StrOutputParser()
# Store chain in user session
cl.user_session.set("chain", chain)
# Send welcome message
await cl.Message(
content="Hello! I'm your AI assistant. How can I help you today?"
).send()
@cl.on_message
async def on_message(message: cl.Message):
"""Handle user messages"""
# Get chain from session
chain = cl.user_session.get("chain")
# Create message for streaming
msg = cl.Message(content="")
await msg.send()
# Stream response
async for chunk in chain.astream({"input": message.content}):
await msg.stream_token(chunk)
await msg.update()
@cl.on_chat_end
async def on_chat_end():
"""Clean up when chat ends"""
print("Chat ended")
Create .env file:
OPENAI_API_KEY=sk-your-openai-api-key-here
CHAINLIT_AUTH_SECRET=your-secret-key
chainlit run app.py
Access at: http://localhost:8000
# Run on all interfaces
chainlit run app.py --host 0.0.0.0 --port 8000
# Run with authentication
chainlit run app.py --host 0.0.0.0 --port 8000 -e CHAINLIT_AUTH_SECRET=secret
Install gunicorn:
pip install gunicorn
Run with gunicorn:
gunicorn "chainlit.cli:cli" \
--bind 0.0.0.0:8000 \
--workers 4 \
--worker-class uvicorn.workers.UvicornWorker \
--timeout 120
Create /etc/systemd/system/chainlit.service:
[Unit]
Description=Chainlit Application
After=network.target
[Service]
Type=simple
User=www-data
Group=www-data
WorkingDirectory=/opt/chainlit
Environment="PATH=/opt/chainlit/venv/bin"
Environment="OPENAI_API_KEY=your-api-key"
Environment="CHAINLIT_AUTH_SECRET=your-secret"
ExecStart=/opt/chainlit/venv/bin/chainlit run app.py --host 0.0.0.0 --port 8000
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
Enable and start:
sudo systemctl daemon-reload
sudo systemctl enable chainlit
sudo systemctl start chainlit
sudo systemctl status chainlit
Create /etc/nginx/sites-available/chainlit:
server {
listen 80;
server_name your-domain.com;
location / {
proxy_pass http://localhost:8000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Enable and restart:
sudo ln -s /etc/nginx/sites-available/chainlit /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl restart nginx
Edit crontab:
crontab -e
Add entry (run daily at 6 AM):
0 6 * * * cd /opt/chainlit && /opt/chainlit/venv/bin/chainlit run app.py >> /var/log/chainlit.log 2>&1
Create logging_config.py:
import logging
import sys
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('chainlit.log'),
logging.StreamHandler(sys.stdout)
]
)
Use in app.py:
from logging_config import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
@cl.on_message
async def on_message(message: cl.Message):
logger.info(f"Received message: {message.content[:50]}...")
# ... rest of code