The xpander.ai SDK uses several data models for working with tools and function calls. This documentation covers the key models used when working with tools.
An enumeration defining the different types of tool calls.
from xpander_sdk import ToolCallType
# Tool call types
print(ToolCallType.XPANDER) # Tools provided by the xpander.ai platform
print(ToolCallType.LOCAL) # Tools implemented locally in your code
print(ToolCallType.UNKNOWN) # Unrecognized tool type
Enum Value | Description |
---|
ToolCallType.XPANDER | Tool calls executed on the xpander.ai platform |
ToolCallType.LOCAL | Tool calls executed locally in your application |
ToolCallType.UNKNOWN | Unrecognized tool calls (typically a fallback value) |
Represents a function call from an LLM.
from xpander_sdk import ToolCall, ToolCallType
# Create a tool call for a web search
web_search = ToolCall(
name="web_search",
type=ToolCallType.XPANDER,
payload={
"bodyParams": {
"query": "latest AI research papers"
}
},
tool_call_id="call_123456789"
)
# Access tool call properties
print(f"Tool name: {web_search.name}")
print(f"Tool type: {web_search.type}")
print(f"Tool payload: {web_search.payload}")
print(f"Tool call ID: {web_search.tool_call_id}")
Properties
Property | Type | Description |
---|
name | string | The name of the tool being called |
type | ToolCallType | The type of the tool (XPANDER, LOCAL, etc.) |
payload | Object | The parameters passed to the tool |
tool_call_id | string | A unique identifier for the tool call |
Usage
Tool calls are typically extracted from LLM responses using the extract_tool_calls()
method:
from xpander_sdk import XpanderClient, LLMProvider
from openai import OpenAI
# Initialize OpenAI client
openai_client = OpenAI(api_key="your-openai-key")
# Get LLM response with tool calls
response = openai_client.chat.completions.create(
model="gpt-4.1",
messages=[{"role": "user", "content": "What's the weather in London?"}],
tools=[{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state or country"
}
},
"required": ["location"]
}
}
}]
)
# Extract tool calls in xpander.ai format
tool_calls = agent.extract_tool_calls(
llm_response=response.model_dump(),
)
# Process the tool calls
for tool_call in tool_calls:
print(f"Tool: {tool_call.name}")
print(f"Type: {tool_call.type}")
print(f"Payload: {tool_call.payload}")
Represents the result of executing a tool call.
from xpander_sdk import ToolCallResult
# Create a successful tool call result
success_result = ToolCallResult(
function_name="web_search",
tool_call_id="call_123456789",
is_success=True,
result="Found the following information: OpenAI released GPT-4 Turbo on...",
payload={"query": "latest AI research"}
)
# Create a failed tool call result
error_result = ToolCallResult(
function_name="get_stock_price",
tool_call_id="call_987654321",
is_success=False,
error="API rate limit exceeded",
payload={"symbol": "AAPL"}
)
# Access properties
print(f"Tool name: {success_result.function_name}")
print(f"Success: {success_result.is_success}")
print(f"Result: {success_result.result}")
Properties
Property | Type | Description |
---|
function_name | string | The name of the function that was called |
tool_call_id | string | The ID of the original tool call |
is_success | boolean | Whether the tool call was successful |
result | string | The result of the tool call (if successful) |
error | string | Error message (if the tool call failed) |
payload | Object | The original parameters passed to the tool |
Usage
Tool call results are typically returned from the run_tool()
or run_tools()
methods:
from xpander_sdk import XpanderClient, ToolCall, ToolCallType
# Initialize client and get agent
client = XpanderClient(api_key="your-api-key")
agent = client.agents.get(agent_id="agent-1234")
# Create a tool call
tool_call = ToolCall(
name="web_search",
type=ToolCallType.XPANDER,
payload={
"bodyParams": {
"query": "latest advances in quantum computing"
}
},
tool_call_id="call_1234"
)
# Execute the tool
result = agent.run_tool(tool=tool_call)
# Process the result
if result.is_success:
print(f"Tool succeeded with result: {result.result[:100]}...")
else:
print(f"Tool failed with error: {result.error}")
LLMProvider
An enumeration representing different LLM providers for formatting tools and extracting tool calls.
from xpander_sdk import LLMProvider
# Available LLM providers
print(LLMProvider.OPEN_AI) # OpenAI (GPT models)
print(LLMProvider.FRIENDLI_AI) # Claude (via FriendliAI)
print(LLMProvider.GEMINI_OPEN_AI) # Google Gemini (OpenAI-compatible)
print(LLMProvider.OLLAMA) # Ollama (local models)
Enum Value | Description |
---|
LLMProvider.OPEN_AI | OpenAI’s format for GPT models |
LLMProvider.FRIENDLI_AI | Claude format (via FriendliAI) |
LLMProvider.GEMINI_OPEN_AI | Google Gemini with OpenAI-compatible format |
LLMProvider.OLLAMA | Ollama format for local models |
Usage
The LLM provider is used when:
- Initializing memory for an agent
- Getting tools formatted for a specific LLM provider
- Extracting tool calls from an LLM response
from xpander_sdk import XpanderClient, LLMProvider
from openai import OpenAI
# Initialize clients
xpander_client = XpanderClient(api_key="your-api-key")
openai_client = OpenAI(api_key="your-openai-key")
agent = xpander_client.agents.get(agent_id="agent-1234")
# Add task - this initializes memory and supports loading previous threads with thread_id
agent.add_task(
input="What's the weather in Paris?",
thread_id="optional-previous-thread-id" # Optional: continue from previous conversation
)
# Get tools formatted for OpenAI
# Use the tools with OpenAI
response = openai_client.chat.completions.create(
model="gpt-4.1",
messages=agent.messages,
tools=tools,
tool_choice="auto"
)
# Extract tool calls from the response
tool_calls = agent.extract_tool_calls(
llm_response=response.model_dump(),
)