Data models for working with tools in the xpander.ai SDK
The xpander.ai SDK uses several data models for working with tools and function calls. This documentation covers the key models used when working with tools.
An enumeration defining the different types of tool calls.
from xpander_sdk import ToolCallType# Tool call typesprint(ToolCallType.XPANDER)# Tools provided by the xpander.ai platformprint(ToolCallType.LOCAL)# Tools implemented locally in your codeprint(ToolCallType.UNKNOWN)# Unrecognized tool type
Enum Value
Description
ToolCallType.XPANDER
Tool calls executed on the xpander.ai platform
ToolCallType.LOCAL
Tool calls executed locally in your application
ToolCallType.UNKNOWN
Unrecognized tool calls (typically a fallback value)
from xpander_sdk import ToolCall, ToolCallType# Create a tool call for a web searchweb_search = ToolCall( name="web_search",type=ToolCallType.XPANDER, payload={"bodyParams":{"query":"latest AI research papers"}}, tool_call_id="call_123456789")# Access tool call propertiesprint(f"Tool name: {web_search.name}")print(f"Tool type: {web_search.type}")print(f"Tool payload: {web_search.payload}")print(f"Tool call ID: {web_search.tool_call_id}")
Tool calls are typically extracted from LLM responses using the extract_tool_calls() method:
from xpander_sdk import XpanderClient, LLMProviderfrom openai import OpenAI# Initialize OpenAI clientopenai_client = OpenAI(api_key="your-openai-key")# Get LLM response with tool callsresponse = openai_client.chat.completions.create( model="gpt-4o", messages=[{"role":"user","content":"What's the weather in London?"}], tools=[{"type":"function","function":{"name":"get_weather","description":"Get the current weather in a location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state or country"}},"required":["location"]}}}])# Extract tool calls in xpander.ai formattool_calls = XpanderClient.extract_tool_calls( llm_response=response.model_dump(), llm_provider=LLMProvider.OPEN_AI)# Process the tool callsfor tool_call in tool_calls:print(f"Tool: {tool_call.name}")print(f"Type: {tool_call.type}")print(f"Payload: {tool_call.payload}")
Tool call results are typically returned from the run_tool() or run_tools() methods:
from xpander_sdk import XpanderClient, ToolCall, ToolCallType# Initialize client and get agentclient = XpanderClient(api_key="your-api-key")agent = client.agents.get(agent_id="agent-1234")# Create a tool calltool_call = ToolCall( name="web_search",type=ToolCallType.XPANDER, payload={"bodyParams":{"query":"latest advances in quantum computing"}}, tool_call_id="call_1234")# Execute the toolresult = agent.run_tool(tool=tool_call)# Process the resultif result.is_success:print(f"Tool succeeded with result: {result.result[:100]}...")else:print(f"Tool failed with error: {result.error}")
An enumeration representing different LLM providers for formatting tools and extracting tool calls.
from xpander_sdk import LLMProvider# Available LLM providersprint(LLMProvider.OPEN_AI)# OpenAI (GPT models)print(LLMProvider.FRIENDLI_AI)# Claude (via FriendliAI)print(LLMProvider.GEMINI_OPEN_AI)# Google Gemini (OpenAI-compatible)print(LLMProvider.OLLAMA)# Ollama (local models)
Getting tools formatted for a specific LLM provider
Extracting tool calls from an LLM response
from xpander_sdk import XpanderClient, LLMProviderfrom openai import OpenAI# Initialize clientsxpander_client = XpanderClient(api_key="your-api-key")openai_client = OpenAI(api_key="your-openai-key")agent = xpander_client.agents.get(agent_id="agent-1234")# Initialize memory with OpenAI formatagent.memory.init_messages(input="What's the weather in Paris?", instructions=agent.instructions, llm_provider=LLMProvider.OPEN_AI)# Get tools formatted for OpenAItools = agent.get_tools(llm_provider=LLMProvider.OPEN_AI)# Use the tools with OpenAIresponse = openai_client.chat.completions.create( model="gpt-4o", messages=agent.messages, tools=tools, tool_choice="auto")# Extract tool calls from the responsetool_calls = XpanderClient.extract_tool_calls( llm_response=response.model_dump(), llm_provider=LLMProvider.OPEN_AI)