Quick start guide to get Chainlit running in under 5 minutes.
Using uv (recommended for speed):
uv pip install chainlit
Using pip:
pip install chainlit
chainlit hello
This creates and runs a demo app to verify installation.
Create app.py:
import chainlit as cl
@cl.on_message
async def main(message: cl.Message):
# Your custom logic goes here
await cl.Message(
content=f"Received: {message.content}",
).send()
chainlit run app.py
Access at: http://localhost:8000
Create rag_app.py:
import chainlit as cl
from langchain.text import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
@cl.on_chat_start
async def on_chat_start():
"""Initialize the RAG chain when chat starts"""
# Sample documents
documents = [
"Chainlit is a Python framework for building conversational AI apps.",
"It provides a beautiful chat interface with minimal code.",
"Chainlit supports LangChain, LlamaIndex, and other integrations."
]
# Split text
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Create vector store
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings)
# Create retriever
retriever = vectorstore.as_retriever()
# Create chain
prompt = ChatPromptTemplate.from_messages([
("system", "Answer based on context: {context}"),
("human", "{question}")
])
llm = ChatOpenAI(model="gpt-4o")
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# Store chain in user session
cl.user_session.set("chain", chain)
@cl.on_message
async def on_message(message: cl.Message):
"""Handle user messages"""
# Get chain from session
chain = cl.user_session.get("chain")
# Create message element for streaming
msg = cl.Message(content="")
await msg.send()
# Stream response
for chunk in chain.stream(message.content):
await msg.stream_token(chunk)
await msg.update()
Run it:
chainlit run rag_app.py
# Using pip
pip install chainlit
# Using uv (faster)
uv pip install chainlit
# Using Poetry
poetry add chainlit
# LangChain
pip install chainlit langchain langchain-openai
# LlamaIndex
pip install chainlit llama-index
# OpenAI
pip install chainlit openai
# All common integrations
pip install chainlit langchain langchain-openai llama-index
pip install chainlit
chainlit create-chainlit # Create starter project
When you run chainlit create-chainlit:
my-chainlit-app/
├── app.py # Main application
├── chainlit.md # Chat welcome message
├── chainlit_config.toml # Configuration
└── public/ # Static assets
└── avatar.webp
Create basic.py:
import chainlit as cl
@cl.on_chat_start
async def on_chat_start():
"""Called when a new chat session starts"""
await cl.Message(
content="Welcome! How can I help you today?"
).send()
@cl.on_message
async def on_message(message: cl.Message):
"""Called when a user sends a message"""
# Process the user message
user_msg = message.content
# Send a response
await cl.Message(
content=f"You said: {user_msg}",
).send()
@cl.on_chat_end
async def on_chat_end():
"""Called when a chat session ends"""
print("Chat ended")
Run:
chainlit run basic.py
Create streaming.py:
import chainlit as cl
import asyncio
@cl.on_message
async def on_message(message: cl.Message):
"""Stream a response token by token"""
msg = cl.Message(content="")
await msg.send()
# Simulate streaming
response = "This is a streamed response. "
response += "Chainlit makes it easy to show streaming output. "
response += "Each token appears one at a time."
for token in response.split():
await msg.stream_token(token + " ")
await asyncio.sleep(0.1)
await msg.update()
Run:
chainlit run streaming.py
Create openai_app.py:
import chainlit as cl
from openai import AsyncOpenAI
@cl.on_chat_start
async def on_chat_start():
cl.user_session.set(
"client",
AsyncOpenAI(api_key=cl.config.get("openai_api_key"))
)
@cl.on_message
async def on_message(message: cl.Message):
client = cl.user_session.get("client")
msg = cl.Message(content="")
await msg.send()
# Stream OpenAI response
stream = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": message.content}],
stream=True
)
async for chunk in stream:
if chunk.choices[0].delta.content:
await msg.stream_token(chunk.choices[0].delta.content)
await msg.update()
Run:
chainlit run openai_app.py -e OPENAI_API_KEY=sk-your-key