The xpander.ai SDK uses several data models for working with tools and function calls. This documentation covers the key models used when working with tools.

ToolCallType

An enumeration defining the different types of tool calls.

from xpander_sdk import ToolCallType

# Tool call types
print(ToolCallType.XPANDER)  # Tools provided by the xpander.ai platform
print(ToolCallType.LOCAL)    # Tools implemented locally in your code
print(ToolCallType.UNKNOWN)  # Unrecognized tool type
Enum ValueDescription
ToolCallType.XPANDERTool calls executed on the xpander.ai platform
ToolCallType.LOCALTool calls executed locally in your application
ToolCallType.UNKNOWNUnrecognized tool calls (typically a fallback value)

ToolCall

Represents a function call from an LLM.

from xpander_sdk import ToolCall, ToolCallType

# Create a tool call for a web search
web_search = ToolCall(
    name="web_search",
    type=ToolCallType.XPANDER,
    payload={
        "bodyParams": {
            "query": "latest AI research papers"
        }
    },
    tool_call_id="call_123456789"
)

# Access tool call properties
print(f"Tool name: {web_search.name}")
print(f"Tool type: {web_search.type}")
print(f"Tool payload: {web_search.payload}")
print(f"Tool call ID: {web_search.tool_call_id}")

Properties

PropertyTypeDescription
namestringThe name of the tool being called
typeToolCallTypeThe type of the tool (XPANDER, LOCAL, etc.)
payloadObjectThe parameters passed to the tool
tool_call_idstringA unique identifier for the tool call

Usage

Tool calls are typically extracted from LLM responses using the extract_tool_calls() method:

from xpander_sdk import XpanderClient, LLMProvider
from openai import OpenAI

# Initialize OpenAI client
openai_client = OpenAI(api_key="your-openai-key")

# Get LLM response with tool calls
response = openai_client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "What's the weather in London?"}],
    tools=[{
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get the current weather in a location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state or country"
                    }
                },
                "required": ["location"]
            }
        }
    }]
)

# Extract tool calls in xpander.ai format
tool_calls = XpanderClient.extract_tool_calls(
    llm_response=response.model_dump(),
    llm_provider=LLMProvider.OPEN_AI
)

# Process the tool calls
for tool_call in tool_calls:
    print(f"Tool: {tool_call.name}")
    print(f"Type: {tool_call.type}")
    print(f"Payload: {tool_call.payload}")

ToolCallResult

Represents the result of executing a tool call.

from xpander_sdk import ToolCallResult

# Create a successful tool call result
success_result = ToolCallResult(
    function_name="web_search",
    tool_call_id="call_123456789",
    is_success=True,
    result="Found the following information: OpenAI released GPT-4 Turbo on...",
    payload={"query": "latest AI research"}
)

# Create a failed tool call result
error_result = ToolCallResult(
    function_name="get_stock_price",
    tool_call_id="call_987654321",
    is_success=False,
    error="API rate limit exceeded",
    payload={"symbol": "AAPL"}
)

# Access properties
print(f"Tool name: {success_result.function_name}")
print(f"Success: {success_result.is_success}")
print(f"Result: {success_result.result}")

Properties

PropertyTypeDescription
function_namestringThe name of the function that was called
tool_call_idstringThe ID of the original tool call
is_successbooleanWhether the tool call was successful
resultstringThe result of the tool call (if successful)
errorstringError message (if the tool call failed)
payloadObjectThe original parameters passed to the tool

Usage

Tool call results are typically returned from the run_tool() or run_tools() methods:

from xpander_sdk import XpanderClient, ToolCall, ToolCallType

# Initialize client and get agent
client = XpanderClient(api_key="your-api-key")
agent = client.agents.get(agent_id="agent-1234")

# Create a tool call
tool_call = ToolCall(
    name="web_search",
    type=ToolCallType.XPANDER,
    payload={
        "bodyParams": {
            "query": "latest advances in quantum computing"
        }
    },
    tool_call_id="call_1234"
)

# Execute the tool
result = agent.run_tool(tool=tool_call)

# Process the result
if result.is_success:
    print(f"Tool succeeded with result: {result.result[:100]}...")
else:
    print(f"Tool failed with error: {result.error}")

LLMProvider

An enumeration representing different LLM providers for formatting tools and extracting tool calls.

from xpander_sdk import LLMProvider

# Available LLM providers
print(LLMProvider.OPEN_AI)        # OpenAI (GPT models)
print(LLMProvider.FRIENDLI_AI)    # Claude (via FriendliAI)
print(LLMProvider.GEMINI_OPEN_AI) # Google Gemini (OpenAI-compatible)
print(LLMProvider.OLLAMA)         # Ollama (local models)
Enum ValueDescription
LLMProvider.OPEN_AIOpenAI’s format for GPT models
LLMProvider.FRIENDLI_AIClaude format (via FriendliAI)
LLMProvider.GEMINI_OPEN_AIGoogle Gemini with OpenAI-compatible format
LLMProvider.OLLAMAOllama format for local models

Usage

The LLM provider is used when:

  1. Initializing memory for an agent
  2. Getting tools formatted for a specific LLM provider
  3. Extracting tool calls from an LLM response
from xpander_sdk import XpanderClient, LLMProvider
from openai import OpenAI

# Initialize clients
xpander_client = XpanderClient(api_key="your-api-key")
openai_client = OpenAI(api_key="your-openai-key")
agent = xpander_client.agents.get(agent_id="agent-1234")

# Initialize memory with OpenAI format
agent.memory.init_messages(
    input="What's the weather in Paris?",
    instructions=agent.instructions,
    llm_provider=LLMProvider.OPEN_AI
)

# Get tools formatted for OpenAI
tools = agent.get_tools(llm_provider=LLMProvider.OPEN_AI)

# Use the tools with OpenAI
response = openai_client.chat.completions.create(
    model="gpt-4o",
    messages=agent.messages,
    tools=tools,
    tool_choice="auto"
)

# Extract tool calls from the response
tool_calls = XpanderClient.extract_tool_calls(
    llm_response=response.model_dump(),
    llm_provider=LLMProvider.OPEN_AI
)