Restore a conversation using the same ID and persistence directory:
# Later, in a different sessiondel conversation# Deserialize the conversationprint("Deserializing conversation...")conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)conversation.send_message("Continue task")conversation.run() # Continues from saved state
When you set a persistence_dir, your conversation will be persisted to a directory structure where each
conversation has its own subdirectory. By default, the persistence directory is workspace/conversations/
(unless you specify a custom path).Directory structure:
workspace/conversations
<conversation-id-1>
base_state.json
events
event-00000-<event-id>.json
event-00001-<event-id>.json
...
<conversation-id-2>
...
Each conversation directory contains:
base_state.json: The core conversation state including agent configuration, execution status, statistics, and metadata
events/: A subdirectory containing individual event files, each named with a sequential index and event ID (e.g., event-00000-abc123.json)
The collection of event files in the events/ directory represents the same trajectory data you would find in the trajectory.json file from OpenHands V0, but split into individual files for better performance and granular access.
import osimport uuidfrom pydantic import SecretStrfrom openhands.sdk import ( LLM, Agent, Conversation, Event, LLMConvertibleEvent, get_logger,)from openhands.sdk.tool import Toolfrom openhands.tools.file_editor import FileEditorToolfrom openhands.tools.terminal import TerminalToollogger = get_logger(__name__)# Configure LLMapi_key = os.getenv("LLM_API_KEY")assert api_key is not None, "LLM_API_KEY environment variable is not set."model = os.getenv("LLM_MODEL", "anthropic/claude-sonnet-4-5-20250929")base_url = os.getenv("LLM_BASE_URL")llm = LLM( usage_id="agent", model=model, base_url=base_url, api_key=SecretStr(api_key),)# Toolscwd = os.getcwd()tools = [ Tool(name=TerminalTool.name), Tool(name=FileEditorTool.name),]# Add MCP Toolsmcp_config = { "mcpServers": { "fetch": {"command": "uvx", "args": ["mcp-server-fetch"]}, }}# Agentagent = Agent(llm=llm, tools=tools, mcp_config=mcp_config)llm_messages = [] # collect raw LLM messagesdef conversation_callback(event: Event): if isinstance(event, LLMConvertibleEvent): llm_messages.append(event.to_llm_message())conversation_id = uuid.uuid4()persistence_dir = "./.conversations"conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)conversation.send_message( "Read https://github.com/OpenHands/OpenHands. Then write 3 facts " "about the project into FACTS.txt.")conversation.run()conversation.send_message("Great! Now delete that file.")conversation.run()print("=" * 100)print("Conversation finished. Got the following LLM messages:")for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}")# Conversation persistenceprint("Serializing conversation...")del conversation# Deserialize the conversationprint("Deserializing conversation...")conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)print("Sending message to deserialized conversation...")conversation.send_message("Hey what did you create? Return an agent finish action")conversation.run()# Report costcost = llm.metrics.accumulated_costprint(f"EXAMPLE_COST: {cost}")
You can run the example code as-is.
The model name should follow the LiteLLM convention: provider/model_name (e.g., anthropic/claude-sonnet-4-5-20250929, openai/gpt-4o).
The LLM_API_KEY should be the API key for your chosen provider.
ChatGPT Plus/Pro subscribers: You can use LLM.subscription_login() to authenticate with your ChatGPT account and access Codex models without consuming API credits. See the LLM Subscriptions guide for details.
"""Load persisted events and convert them into LLM-ready messages."""import jsonimport osimport uuidfrom pathlib import Pathfrom pydantic import SecretStrconversation_id = uuid.uuid4()persistence_root = Path(".conversations")log_dir = ( persistence_root / "logs" / "event-json-to-openai-messages" / conversation_id.hex)os.environ.setdefault("LOG_JSON", "true")os.environ.setdefault("LOG_TO_FILE", "true")os.environ.setdefault("LOG_DIR", str(log_dir))os.environ.setdefault("LOG_LEVEL", "INFO")from openhands.sdk import ( # noqa: E402 LLM, Agent, Conversation, Event, LLMConvertibleEvent, Tool,)from openhands.sdk.logger import get_logger, setup_logging # noqa: E402from openhands.tools.terminal import TerminalTool # noqa: E402setup_logging(log_to_file=True, log_dir=str(log_dir))logger = get_logger(__name__)api_key = os.getenv("LLM_API_KEY")if not api_key: raise RuntimeError("LLM_API_KEY environment variable is not set.")llm = LLM( usage_id="agent", model=os.getenv("LLM_MODEL", "anthropic/claude-sonnet-4-5-20250929"), base_url=os.getenv("LLM_BASE_URL"), api_key=SecretStr(api_key),)agent = Agent( llm=llm, tools=[Tool(name=TerminalTool.name)],)####### Create a conversation that persists its events######conversation = Conversation( agent=agent, workspace=os.getcwd(), persistence_dir=str(persistence_root), conversation_id=conversation_id,)conversation.send_message( "Use the terminal tool to run `pwd` and write the output to tool_output.txt. " "Reply with a short confirmation once done.")conversation.run()conversation.send_message( "Without using any tools, summarize in one sentence what you did.")conversation.run()assert conversation.state.persistence_dir is not Nonepersistence_dir = Path(conversation.state.persistence_dir)event_dir = persistence_dir / "events"event_paths = sorted(event_dir.glob("event-*.json"))if not event_paths: raise RuntimeError("No event files found. Was persistence enabled?")####### Read from serialized events######events = [Event.model_validate_json(path.read_text()) for path in event_paths]convertible_events = [ event for event in events if isinstance(event, LLMConvertibleEvent)]llm_messages = LLMConvertibleEvent.events_to_messages(convertible_events)if llm.uses_responses_api(): logger.info("Formatting messages for the OpenAI Responses API.") instructions, input_items = llm.format_messages_for_responses(llm_messages) logger.info("Responses instructions:\n%s", instructions) logger.info("Responses input:\n%s", json.dumps(input_items, indent=2))else: logger.info("Formatting messages for the OpenAI Chat Completions API.") chat_messages = llm.format_messages_for_llm(llm_messages) logger.info("Chat Completions messages:\n%s", json.dumps(chat_messages, indent=2))# Report costcost = llm.metrics.accumulated_costprint(f"EXAMPLE_COST: {cost}")
You can run the example code as-is.
The model name should follow the LiteLLM convention: provider/model_name (e.g., anthropic/claude-sonnet-4-5-20250929, openai/gpt-4o).
The LLM_API_KEY should be the API key for your chosen provider.
ChatGPT Plus/Pro subscribers: You can use LLM.subscription_login() to authenticate with your ChatGPT account and access Codex models without consuming API credits. See the LLM Subscriptions guide for details.
The SDK uses an automatic persistence system that saves state changes immediately when they occur. This ensures that conversation state is always recoverable, even if the process crashes unexpectedly.
When you modify any public field on ConversationState, the SDK automatically:
Detects the field change via a custom __setattr__ implementation
Serializes the entire base state to base_state.json
Triggers any registered state change callbacks
This happens transparently—you don’t need to call any save methods manually.
# These changes are automatically persisted:conversation.state.execution_status = ConversationExecutionStatus.RUNNINGconversation.state.max_iterations = 100