This example is available on GitHub: examples/01_standalone_sdk/11_async.py
examples/01_standalone_sdk/11_async.py
Copy
Ask AI
"""
This example demonstrates usage of a Conversation in an async context
(e.g.: From a fastapi server). The conversation is run in a background
thread and a callback with results is executed in the main runloop
"""
import asyncio
import os
from pydantic import SecretStr
from openhands.sdk import (
LLM,
Agent,
Conversation,
Event,
LLMConvertibleEvent,
get_logger,
)
from openhands.sdk.conversation.types import ConversationCallbackType
from openhands.sdk.tool import Tool, register_tool
from openhands.sdk.utils.async_utils import AsyncCallbackWrapper
from openhands.tools.execute_bash import BashTool
from openhands.tools.file_editor import FileEditorTool
from openhands.tools.task_tracker import TaskTrackerTool
logger = get_logger(__name__)
# Configure LLM
api_key = os.getenv("LLM_API_KEY")
assert api_key is not None, "LLM_API_KEY environment variable is not set."
model = os.getenv("LLM_MODEL", "openhands/claude-sonnet-4-5-20250929")
base_url = os.getenv("LLM_BASE_URL")
llm = LLM(
usage_id="agent",
model=model,
base_url=base_url,
api_key=SecretStr(api_key),
)
# Tools
cwd = os.getcwd()
register_tool("BashTool", BashTool)
register_tool("FileEditorTool", FileEditorTool)
register_tool("TaskTrackerTool", TaskTrackerTool)
tools = [
Tool(
name="BashTool",
),
Tool(name="FileEditorTool"),
Tool(name="TaskTrackerTool"),
]
# Agent
agent = Agent(llm=llm, tools=tools)
llm_messages = [] # collect raw LLM messages
# Callback coroutine
async def callback_coro(event: Event):
if isinstance(event, LLMConvertibleEvent):
llm_messages.append(event.to_llm_message())
# Synchronous run conversation
def run_conversation(callback: ConversationCallbackType):
conversation = Conversation(agent=agent, callbacks=[callback])
conversation.send_message(
"Hello! Can you create a new Python file named hello.py that prints "
"'Hello, World!'? Use task tracker to plan your steps."
)
conversation.run()
conversation.send_message("Great! Now delete that file.")
conversation.run()
async def main():
loop = asyncio.get_running_loop()
# Create the callback
callback = AsyncCallbackWrapper(callback_coro, loop)
# Run the conversation in a background thread and wait for it to finish...
await loop.run_in_executor(None, run_conversation, callback)
print("=" * 100)
print("Conversation finished. Got the following LLM messages:")
for i, message in enumerate(llm_messages):
print(f"Message {i}: {str(message)[:200]}")
if __name__ == "__main__":
asyncio.run(main())
Running the Example
Copy
Ask AI
export LLM_API_KEY="your-api-key"
cd agent-sdk
uv run python examples/01_standalone_sdk/11_async.py
Async Streaming
Useastream() to process events as they occur without blocking:
Copy
Ask AI
async def run_agent():
conversation = Conversation(agent=agent, workspace=cwd)
conversation.send_message("Write 3 facts about Python to FACTS.txt")
async for event in conversation.astream():
print(f"Event: {event}")
Concurrent Agents
Run multiple agent tasks in parallel usingasyncio.gather():
Copy
Ask AI
async def main():
# Create multiple conversation tasks
tasks = [
run_task("task 1"),
run_task("task 2"),
run_task("task 3")
]
results = await asyncio.gather(*tasks)
Next Steps
- Persistence - Save and restore conversation state
- Send Message While Processing - Interrupt running agents

