Configuration guide for Microsoft AutoGen agents and teams.
from autogen_ext.models.openai import OpenAIChatCompletionClient
model_client = OpenAIChatCompletionClient(
model="gpt-4o",
api_key="sk-your-key", # Or use OPENAI_API_KEY env var
temperature=0.7,
max_tokens=2000,
timeout=30
)
from autogen_ext.models.azure import AzureOpenAIChatCompletionClient
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
# Using Azure CLI credentials
token_provider = get_bearer_token_provider(
DefaultAzureCredential(),
"https://cognitiveservices.azure.com/.default"
)
model_client = AzureOpenAIChatCompletionClient(
azure_endpoint="https://your-endpoint.openai.azure.com/",
api_version="2025-01-01-preview",
model="gpt-4o",
azure_ad_token_provider=token_provider
)
from autogen_ext.models.anthropic import AnthropicChatCompletionClient
model_client = AnthropicChatCompletionClient(
model="claude-sonnet-4-20250514",
api_key="sk-ant-your-key", # Or use ANTHROPIC_API_KEY env var
temperature=0.7,
max_tokens=2000
)
from autogen_ext.models.ollama import OllamaChatCompletionClient
model_client = OllamaChatCompletionClient(
model="llama3.1",
base_url="http://localhost:11434",
temperature=0.7
)
from autogen_agentchat.agents import AssistantAgent
agent = AssistantAgent(
name="assistant",
model_client=model_client,
description="A helpful assistant",
system_message="You are a helpful AI assistant.",
tools=[], # List of tools
model_client_stream=True, # Enable streaming
max_tool_iterations=10 # Max tool execution attempts
)
from autogen_agentchat.agents import UserProxyAgent
user_proxy = UserProxyAgent(
name="user_proxy",
description="A human user proxy",
code_execution_config={
"work_dir": "coding", # Directory for code execution
"use_docker": False, # Use Docker for code execution
},
human_input_mode="TERMINATE", # ALWAYS, TERMINATE, NEVER
max_consecutive_auto_reply=10
)
from autogen_agentchat.agents import BaseChatAgent
from autogen_core import CancellationToken
from autogen_agentchat.messages import ChatMessage
class CustomAgent(BaseChatAgent):
def __init__(self, name: str, description: str):
super().__init__(name, description)
async def on_messages(self, messages: list[ChatMessage], cancellation_token: CancellationToken) -> ChatMessage:
# Custom logic here
return ChatMessage(
source=self.name,
content="Custom response"
)
from autogen_agentchat.teams import RoundRobinGroupChat
team = RoundRobinGroupChat(
agents=[agent1, agent2, agent3],
max_turns=10 # Maximum conversation turns
)
from autogen_agentchat.teams import SelectorGroupChat
team = SelectorGroupChat(
agents=[agent1, agent2, agent3],
model_client=model_client, # Used to select next speaker
max_turns=10
)
from autogen_agentchat.teams import MagenticOneGroupChat
team = MagenticOneGroupChat(
agents=[agent1, agent2, agent3],
model_client=model_client,
max_turns=10,
max_stalls=3 # Max stalls before termination
)
from autogen_ext.tools import PythonFunctionTool
def get_weather(city: str) -> str:
"""Get weather for a city."""
return f"It's sunny in {city}"
tool = PythonFunctionTool(
name="get_weather",
description="Get the weather for a city",
function=get_weather
)
from autogen_ext.tools.mcp import McpWorkbench, StdioServerParams
server_params = StdioServerParams(
command="npx",
args=["@playwright/mcp@latest", "--headless"]
)
async with McpWorkbench(server_params) as mcp:
agent = AssistantAgent(
"web_assistant",
model_client=model_client,
workbench=mcp
)
from autogen_core.tools import FunctionTool
from pydantic import BaseModel, Field
class WeatherInput(BaseModel):
city: str = Field(..., description="The city name")
unit: str = Field(default="celsius", description="Temperature unit")
def get_weather_impl(city: str, unit: str = "celsius") -> str:
return f"Weather in {city}: 20°{unit[0].upper()}"
tool = FunctionTool(
func=get_weather_impl,
description="Get weather information",
name="get_weather"
)
from autogen_agentchat.agents import UserProxyAgent
user_proxy = UserProxyAgent(
name="user_proxy",
code_execution_config={
"work_dir": "coding",
"use_docker": False,
"timeout": 60,
"last_n_messages": 1
}
)
user_proxy = UserProxyAgent(
name="user_proxy",
code_execution_config={
"work_dir": "coding",
"use_docker": "python:3.11", # Docker image
"timeout": 60
}
)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="ALWAYS", # Always ask human
max_consecutive_auto_reply=10
)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="TERMINATE", # Ask when agent wants to terminate
max_consecutive_auto_reply=10
)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER", # Fully autonomous
max_consecutive_auto_reply=10
)
# OpenAI
export OPENAI_API_KEY="sk-your-key"
# Azure OpenAI
export AZURE_OPENAI_API_KEY="your-key"
export AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/"
# Anthropic
export ANTHROPIC_API_KEY="sk-ant-your-key"
# Google
export GOOGLE_API_KEY="your-key"
# Ollama
export OLLAMA_BASE_URL="http://localhost:11434"
model_client = OpenAIChatCompletionClient(
model="gpt-4o",
model_client_stream=True # Enable streaming
)
agent = AssistantAgent(
"assistant",
model_client=model_client,
model_client_stream=True
)
from autogen_agentchat.ui import Console
await Console(agent.run_stream(task="Write a poem"))
import logging
# Set logging level
logging.basicConfig(level=logging.INFO)
# Or for more verbose logging
logging.basicConfig(level=logging.DEBUG)