# First, install the SDK if you haven't already# pip install xpander-sdkfrom xpander_sdk import XpanderClient, LLMProviderfrom openai import OpenAIimport os# Initialize clients with your API keys# Set these environment variables before runningxpander = XpanderClient(api_key=os.getenv("XPANDER_API_KEY"))openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))# Get your agent from xpander's backend# Create your agent first at https://app.xpander.aiagent = xpander.agents.get(agent_id=os.getenv("XPANDER_AGENT_ID"))# Select the LLM provider you want to useagent.select_llm_provider(LLMProvider.OPEN_AI)# Let the LLM decide when it has finishedagent.enable_agent_end_tool()# Add a task for the agent to processagent.add_task("Hello world! Tell me what you can do.")# Run the agent with OpenAIwhile not agent.is_finished(): # Get LLM response from OpenAI response = openai.chat.completions.create( model="gpt-4.1", messages=agent.messages, tools=agent.get_tools(), tool_choice=agent.tool_choice, temperature=0 ) # Add response to agent memory agent.add_messages(response.model_dump()) # Extract and run any tools the agent wants to use tool_calls = agent.extract_tool_calls( llm_response=response.model_dump() ) if tool_calls: agent.run_tools(tool_calls=tool_calls)# Get the final resultresult = agent.retrieve_execution_result()print(result.result)# Store the thread_id for future conversations (memory persistence)thread_id = result.memory_thread_idprint(f"Thread ID: {thread_id}")
hello_world_agent.ts
Copy
Ask AI
import { XpanderClient, LLMProvider } from '@xpander-ai/sdk';import OpenAI from 'openai';const xpander = new XpanderClient({ apiKey: process.env.XPANDER_API_KEY });const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });const agent = await xpander.agents.get({ agentId: process.env.XPANDER_AGENT_ID });agent.selectLLMProvider(LLMProvider.OPEN_AI);// Allow the model to finish when doneagent.enableAgentEndTool();await agent.addTask({ input: 'Hello world! Tell me what you can do.' });while (!(await agent.isFinished())) { const response = await openai.chat.completions.create({ model: 'gpt-4.1', messages: agent.messages, tools: agent.getTools(), tool_choice: agent.toolChoice, temperature: 0 }); await agent.addMessages(response); const toolCalls = agent.extractToolCalls({ llmResponse: response }); if (toolCalls.length > 0) { await agent.runTools({ toolCalls }); }}const result = await agent.retrieveExecutionResult();console.log(result.result);const threadId = result.memoryThreadId;console.log(`Thread ID: ${threadId}`);
xpander deploy # Deploys the Docker container to the cloud and runs it via xpander_handler.pyxpander logs # Streams logs locally from your configured agent
The xpander.ai backend is model-agnostic. You can use any LLM provider:
Copy
Ask AI
from openai import OpenAIopenai_client = OpenAI(api_key="OpenAI-APIkey")# First, select the provideragent.select_llm_provider(LLMProvider.OPEN_AI)# Then use the provider's clientresponse = openai_client.chat.completions.create( model="gpt-4.1", messages=agent.messages, tools=agent.get_tools(), # Tools automatically formatted for OpenAI tool_choice=agent.tool_choice)# Process the responseagent.add_messages(response.model_dump())tool_calls = agent.extract_tool_calls( llm_response=response.model_dump())agent.run_tools(tool_calls=tool_calls)