Quick Start: Add a Backend to Your Agent
This guide provides a simple Hello World example to get you started with xpander’s backend services.
You’ll learn:
- How to connect to xpander’s backend services
- Basic agent execution flow
- How to process tool calls
- How to manage conversation state
- Core SDK architecture and components
This guide shows how to enhance any agent with xpander.ai’s backend for memory, tools, and more.
📦 Installation
# Python
pip install xpander-sdk
# Node.js
npm install @xpander-ai/sdk
# CLI (for agent creation)
npm install -g xpander-cli
Use xpander-cli to scaffold a new agent template
xpander login
xpander agent new
python xpander_handler.py # Entry point for your agent's event handling
Create Hello World Agent
# First, install the SDK if you haven't already
# pip install xpander-sdk
from xpander_sdk import XpanderClient, LLMProvider
from openai import OpenAI
import os
# Initialize clients with your API keys
# Set these environment variables before running
xpander = XpanderClient(api_key=os.getenv("XPANDER_API_KEY"))
openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Get your agent from xpander's backend
# Create your agent first at https://app.xpander.ai
agent = xpander.agents.get(agent_id=os.getenv("XPANDER_AGENT_ID"))
# Select the LLM provider you want to use
agent.select_llm_provider(LLMProvider.OPEN_AI)
# Let the LLM decide when it has finished
agent.enable_agent_end_tool()
# Add a task for the agent to process
agent.add_task("Hello world! Tell me what you can do.")
# Run the agent with OpenAI
while not agent.is_finished():
# Get LLM response from OpenAI
response = openai.chat.completions.create(
model="gpt-4.1",
messages=agent.messages,
tools=agent.get_tools(),
tool_choice=agent.tool_choice,
temperature=0
)
# Add response to agent memory
agent.add_messages(response.model_dump())
# Extract and run any tools the agent wants to use
tool_calls = agent.extract_tool_calls(
llm_response=response.model_dump()
)
if tool_calls:
agent.run_tools(tool_calls=tool_calls)
# Get the final result
result = agent.retrieve_execution_result()
print(result.result)
# Store the thread_id for future conversations (memory persistence)
thread_id = result.memory_thread_id
print(f"Thread ID: {thread_id}")
import { XpanderClient, LLMProvider } from '@xpander-ai/sdk';
import OpenAI from 'openai';
const xpander = new XpanderClient({ apiKey: process.env.XPANDER_API_KEY });
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const agent = await xpander.agents.get({ agentId: process.env.XPANDER_AGENT_ID });
agent.selectLLMProvider(LLMProvider.OPEN_AI);
// Allow the model to finish when done
agent.enableAgentEndTool();
await agent.addTask({ input: 'Hello world! Tell me what you can do.' });
while (!(await agent.isFinished())) {
const response = await openai.chat.completions.create({
model: 'gpt-4.1',
messages: agent.messages,
tools: agent.getTools(),
tool_choice: agent.toolChoice,
temperature: 0
});
await agent.addMessages(response);
const toolCalls = agent.extractToolCalls({ llmResponse: response });
if (toolCalls.length > 0) {
await agent.runTools({ toolCalls });
}
}
const result = await agent.retrieveExecutionResult();
console.log(result.result);
const threadId = result.memoryThreadId;
console.log(`Thread ID: ${threadId}`);
Deploy agent to the cloud
xpander deploy # Deploys the Docker container to the cloud and runs it via xpander_handler.py
xpander logs # Streams logs locally from your configured agent
What This Does
The example above:
- Connects to xpander.ai’s backend services
- Loads an agent you’ve created in the Builder
- Adds a task for the agent to process
- Runs the agent with the OpenAI API
- Executes any tools the agent needs
- Returns the result with a thread ID for continuity
See the full Hello World application here.
Using Different LLM Providers
The xpander.ai backend is model-agnostic. You can use any LLM provider:
from openai import OpenAI
openai_client = OpenAI(api_key="OpenAI-APIkey")
# First, select the provider
agent.select_llm_provider(LLMProvider.OPEN_AI)
# Then use the provider's client
response = openai_client.chat.completions.create(
model="gpt-4.1",
messages=agent.messages,
tools=agent.get_tools(), # Tools automatically formatted for OpenAI
tool_choice=agent.tool_choice
)
# Process the response
agent.add_messages(response.model_dump())
tool_calls = agent.extract_tool_calls(
llm_response=response.model_dump()
)
agent.run_tools(tool_calls=tool_calls)
from openai import OpenAI
openai_client = OpenAI(api_key="OpenAI-APIkey")
# First, select the provider
agent.select_llm_provider(LLMProvider.OPEN_AI)
# Then use the provider's client
response = openai_client.chat.completions.create(
model="gpt-4.1",
messages=agent.messages,
tools=agent.get_tools(), # Tools automatically formatted for OpenAI
tool_choice=agent.tool_choice
)
# Process the response
agent.add_messages(response.model_dump())
tool_calls = agent.extract_tool_calls(
llm_response=response.model_dump()
)
agent.run_tools(tool_calls=tool_calls)
import boto3
import json
# Select the Bedrock provider
agent.select_llm_provider(LLMProvider.AMAZON_BEDROCK)
# Use the Bedrock client
bedrock = boto3.client(service_name="bedrock-runtime")
response = bedrock.invoke_model(
modelId="anthropic.claude-3-sonnet-20240229-v1:0",
body=json.dumps({
"messages": agent.messages,
"tools": agent.get_tools() # Tools automatically formatted for Bedrock
})
)
# Process the response
response_body = json.loads(response["body"].read().decode())
agent.add_messages(response_body)
tool_calls = agent.extract_tool_calls(
llm_response=response_body
)
agent.run_tools(tool_calls=tool_calls)