Overview

This is a quick guide to create and run a simple agent using the xpander.ai SDK. The SDK is a powerful foundation of the platform (in fact, the UI itself uses the SDK). While the xpander.ai UI provides a visual interface for creating agent graphs and visualizing tool relationships, this guide provides programmers with a straightforward way to directly use the SDK.

Step 1: Environment Setup

First, let’s set up our development environment and create our base agent.

Dependencies

Create a new directory and install the required packages:

mkdir hello-agent
cd hello-agent
pip install xpander-sdk openai python-dotenv

Configuration

Create a .env file with your API keys:

XPANDER_API_KEY=your_xpander_api_key
OPENAI_API_KEY=your_openai_api_key

Base Agent Implementation

Create a file named agent.py with our initial implementation:

from xpander_sdk import Agent, XpanderClient, LLMProvider, Tokens, LLMTokens
from openai import OpenAI
import os
import time
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Initialize clients
xpander_client = XpanderClient(api_key=os.environ['XPANDER_API_KEY'])
openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])

# Simple agent execution loop
def agent_loop(agent: Agent, local_tools_by_name=None):
    print("🪄 Starting Agent Loop")
    # Initialize token tracking and timing
    execution_tokens = Tokens(worker=LLMTokens(completion_tokens=0, prompt_tokens=0, total_tokens=0))
    execution_start_time = time.perf_counter()
    
    while not agent.is_finished():
        # Get response from OpenAI
        start_time = time.perf_counter()
        response = openai_client.chat.completions.create(
            model="gpt-4o",
            messages=agent.messages,
            tools=agent.get_tools(llm_provider=LLMProvider.OPEN_AI),
            tool_choice=agent.tool_choice,
            temperature=0
        )
        
        # Track token usage
        execution_tokens.worker.completion_tokens += response.usage.completion_tokens
        execution_tokens.worker.prompt_tokens += response.usage.prompt_tokens
        execution_tokens.worker.total_tokens += response.usage.total_tokens
        
        # Report LLM usage to Xpander
        agent.report_llm_usage(
            llm_response=response.model_dump(),
            llm_inference_duration=time.perf_counter() - start_time,
            llm_provider=LLMProvider.OPEN_AI
        )
        
        agent.add_messages(response.model_dump())
        
        tool_calls = XpanderClient.extract_tool_calls(
            llm_response=response.model_dump(),
            llm_provider=LLMProvider.OPEN_AI
        )
        
        if tool_calls:
            agent.run_tools(tool_calls=tool_calls)
    
    # Report execution metrics to Xpander
    agent.report_execution_metrics(
        llm_tokens=execution_tokens,
        ai_model="gpt-4o"
    )
    
    print(f"✨ Execution duration: {time.perf_counter() - execution_start_time:.2f} seconds")
    print(f"🔢 Total tokens used: {execution_tokens.worker.total_tokens}")

# Create a new agent
agent = xpander_client.agents.create(name="Hello World Agent")
agent.instructions.goal = "Answer questions helpfully and accurately."
# Save the agent ID for reuse
agent_id = agent.id
print(f"🎉 Created agent ID: {agent_id}")
print(f"📝 Add this to your .env: XPANDER_AGENT_ID={agent_id}")

# Ask the agent a question
agent.add_task(input="Hello! What can you do?")

# Run the agent execution loop
agent_loop(agent)

# Get the result
result = agent.retrieve_execution_result()
print(f"\n🤖 Agent response: {result.result}")
print(f"🧵 Thread ID: {result.memory_thread_id}")
print(f"\n🔍 View this agent in the Xpander platform with ID: {agent_id}")

If you encounter a sync issue, simply re-run the script. The agent has already been created and will work on subsequent runs.

Step 2: Add Thread Management

Now let’s enhance our agent to handle multi-turn conversations. We’ll add thread management capabilities.

Changes to Implement

Add the following function to your existing code:

# Function to chat with the agent
def chat(agent: Agent, message, thread_id=None, local_tools_by_name=None):
    """Send a message to the agent and get a response"""
    print(f"\n👤 User: {message}")
    
    # Add task to agent (using thread_id for conversation continuity)
    agent.add_task(input=message, thread_id=thread_id)
    
    # Run the agent loop
    agent_loop(agent, local_tools_by_name)
    
    # Get and return result
    result = agent.retrieve_execution_result()
    print(f"🤖 Agent: {result.result}")
    print(f"🧵 Thread ID: {result.memory_thread_id}")
    return result.memory_thread_id

Replace the main execution code with:

# Have a multi-turn conversation
thread_id = chat(agent, "What is machine learning?")
chat(agent, "Give me an example of a machine learning algorithm.", thread_id)

Thread IDs allow you to maintain context across multiple messages. The agent remembers the conversation history, creating a more natural dialogue experience.

Step 3: Add Local Tools

Let’s add the ability to run local Python functions as tools.

New Local Tool Definition

Add this code at the top of your file, after the imports:

# Define local function
def get_current_time(timezone=None):
    """Get the current date and time with millisecond precision."""
    now = datetime.now()
    return f"Current time{' in '+timezone if timezone else ''}: {now}"

# Set up local tools
local_tools = [
    {
        "declaration": {
            "type": "function",
            "function": {
                "name": "get_current_time",
                "description": "Get the precise current date and time.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "timezone": {
                            "type": "string",
                            "description": "Timezone (e.g., 'UTC', 'EST')"
                        }
                    },
                    "required": []
                }
            }
        },
        "fn": get_current_time
    }
]

local_tools_list = [tool['declaration'] for tool in local_tools]
local_tools_by_name = {tool['declaration']['function']['name']: tool['fn'] 
                      for tool in local_tools}

Updates to Agent Loop

Add local tool execution to the agent_loop function:

# Inside agent_loop, after running cloud tools:
# Run local tools if provided
if local_tools_by_name:
    pending_local_tool_execution = XpanderClient.retrieve_pending_local_tool_calls(tool_calls=tool_calls)
    local_tools_results = []
    
    for tc in pending_local_tool_execution:
        print(f"🛠️ Running local tool: {tc.name}")
        tool_call_result = ToolCallResult(function_name=tc.name, tool_call_id=tc.tool_call_id, payload=tc.payload)
        try:
            if tc.name in local_tools_by_name:
                tool_call_result.is_success = True
                tool_call_result.result = local_tools_by_name[tc.name](**tc.payload)
            else:
                raise Exception(f"Local tool {tc.name} not found")
        except Exception as e:
            tool_call_result.is_success = False
            tool_call_result.is_error = True
            tool_call_result.result = str(e)
        finally:
            local_tools_results.append(tool_call_result)

    if local_tools_results:
        print(f"📝 Registering {len(local_tools_results)} local tool results...")
        agent.memory.add_tool_call_results(tool_call_results=local_tools_results)

Register Local Tools

Add this before running the agent:

# Add local tools to the agent
agent.add_local_tools(local_tools_list)
print("🧰 Local tools added to agent")

Local Tools

Tools that run directly in your Python code, allowing access to local resources and functionality.

Cloud Tools

Tools that run on the Xpander platform, providing access to services like search or external APIs.

Step 4: Add Search Capability

Finally, let’s add Tavily search capability to make our agent more powerful.

Search Setup Function

Add this function to your code:

def add_tavily_search(agent: Agent):
    """Add Tavily search capability to the agent"""
    # Check if Tavily search is already available
    print("🔍 Checking for existing search tools...")
    tools = agent.get_tools(llm_provider=LLMProvider.OPEN_AI)
    for tool in tools:
        if 'function' in tool and tool['function']['name'] == 'XpanderNewsInsightsFetchTavilyAIInsights':
            print(f"✅ Tavily search already available")
            return True
    
    print("🔄 Adding Tavily search capability...")
    
    try:
        # Find the xpanderAI Tools interface
        available_interfaces = agent.retrieve_agentic_interfaces()
        xpander_tools = next((interface for interface in available_interfaces 
                     if "xpanderai tools" in interface.name.lower()), None)
        
        if not xpander_tools:
            print("❌ xpanderAI Tools interface not found")
            return False
        
        # Find the Tavily operation
        operations = agent.retrieve_agentic_operations(agentic_interface=xpander_tools)
        tavily_op = next((op for op in operations 
                     if "tavily" in op.name.lower()), None)
        
        if not tavily_op:
            print("❌ Tavily search operation not found")
            return False
        
        print(f"✅ Found Tavily operation: {tavily_op.name}")
        
        # Add the operation to the agent
        print("🔄 Attaching search operation to agent...")
        agent.attach_operations(operations=[tavily_op])
        
        # Create a graph node for the operation
        print("🔄 Creating graph node...")
        agent.graph.add_node(
            GraphItem(
                agent=agent,
                item_id=tavily_op.id_to_use_on_graph,
                name=tavily_op.name,
                type=AgentGraphItemType.TOOL,
                is_local_tool=False
            )
        )
        agent.sync()
        
        print("✅ Tavily search capability added successfully!")
        # Wait briefly for search capability to register
        time.sleep(2)
        return True
    except Exception as e:
        print(f"❌ Error setting up search: {e}")
        return False

Update your main execution code:

# Add Tavily search capability
add_tavily_search(agent)

# Add local tools to the agent
agent.add_local_tools(local_tools_list)
print("🧰 Local tools added to agent")

print("\n🔍 Testing agent with time function and search capability...")
# First message - asking about time
thread_id = chat(agent, "What time is it right now? Use the get_current_time tool.", 
                local_tools_by_name=local_tools_by_name)

# Second message in same thread - asking about search
chat(agent, "What are the latest developments in AI?", thread_id, 
    local_tools_by_name=local_tools_by_name)

Best Practices

Always save your agent ID in your .env file after creating a new agent. This lets you reuse the agent in future sessions.

Development Guidelines

  • Use the agent_loop function to standardize your agent execution flow
  • Monitor execution metrics through the Xpander platform using the agent ID
  • Manage thread IDs to maintain conversation context
  • Be aware of tool name conflicts between local and built-in tools
  • Consider the UI for visual graph creation and visualizing complex tool relationships

SDK Usage

Remember that the Xpander SDK is a powerful foundation giving you direct programmatic control of your agents. The UI provides helpful visualization for agent graphs, but direct SDK access gives you maximum flexibility for custom integrations.

Summary

In this tutorial, you’ve learned how to:

  1. Set up your environment with the necessary packages
  2. Create a basic agent with a streamlined execution loop
  3. Add thread management for multi-turn conversations
  4. Create local tools that run directly in your Python environment
  5. Add search capability using Tavily integration

These building blocks allow you to create sophisticated agents that can perform complex tasks, maintain context, and leverage both local and cloud-based tools.

Complete Code

Below is the complete code that combines all the features we’ve covered in this tutorial:

from xpander_sdk import Agent, XpanderClient, LLMProvider, ToolCallResult, GraphItem, Tokens, LLMTokens, AgentGraphItemType
from openai import OpenAI
import os
import time
from datetime import datetime
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Initialize clients
xpander_client = XpanderClient(api_key=os.environ['XPANDER_API_KEY'])
openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])

# Define local function
def get_current_time(timezone=None):
    """Get the current date and time with millisecond precision."""
    now = datetime.now()
    return f"Current time{' in '+timezone if timezone else ''}: {now}"

# Set up local tools
local_tools = [
    {
        "declaration": {
            "type": "function",
            "function": {
                "name": "get_current_time",
                "description": "Get the precise current date and time.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "timezone": {
                            "type": "string",
                            "description": "Timezone (e.g., 'UTC', 'EST')"
                        }
                    },
                    "required": []
                }
            }
        },
        "fn": get_current_time
    }
]

local_tools_list = [tool['declaration'] for tool in local_tools]
local_tools_by_name = {tool['declaration']['function']['name']: tool['fn'] 
                       for tool in local_tools}

# Agent execution loop with local tools support
def agent_loop(agent: Agent, local_tools_by_name=None):
    print("🪄 Starting Agent Loop")
    # Initialize token tracking and timing
    execution_tokens = Tokens(worker=LLMTokens(completion_tokens=0, prompt_tokens=0, total_tokens=0))
    execution_start_time = time.perf_counter()
    
    while not agent.is_finished():
        # Get response from OpenAI
        start_time = time.perf_counter()
        response = openai_client.chat.completions.create(
            model="gpt-4o",
            messages=agent.messages,
            tools=agent.get_tools(llm_provider=LLMProvider.OPEN_AI),
            tool_choice=agent.tool_choice,
            temperature=0
        )
        
        # Track token usage
        execution_tokens.worker.completion_tokens += response.usage.completion_tokens
        execution_tokens.worker.prompt_tokens += response.usage.prompt_tokens
        execution_tokens.worker.total_tokens += response.usage.total_tokens
        
        # Report LLM usage to Xpander
        agent.report_llm_usage(
            llm_response=response.model_dump(),
            llm_inference_duration=time.perf_counter() - start_time,
            llm_provider=LLMProvider.OPEN_AI
        )
        
        agent.add_messages(response.model_dump())
        
        tool_calls = XpanderClient.extract_tool_calls(
            llm_response=response.model_dump(),
            llm_provider=LLMProvider.OPEN_AI
        )
        
        if tool_calls:
            # Display which tools are being used
            for call in tool_calls:
                name = getattr(call, 'name', None) or getattr(getattr(call, 'function', {}), 'name', "unnamed")
                print(f"🔧 Using tool: {name}")
                
            # Run cloud tools
            agent.run_tools(tool_calls=tool_calls)
            
            # Run local tools if provided
            if local_tools_by_name:
                pending_local_tool_execution = XpanderClient.retrieve_pending_local_tool_calls(tool_calls=tool_calls)
                local_tools_results = []
                
                for tc in pending_local_tool_execution:
                    print(f"🛠️ Running local tool: {tc.name}")
                    tool_call_result = ToolCallResult(function_name=tc.name, tool_call_id=tc.tool_call_id, payload=tc.payload)
                    try:
                        if tc.name in local_tools_by_name:
                            tool_call_result.is_success = True
                            tool_call_result.result = local_tools_by_name[tc.name](**tc.payload)
                        else:
                            raise Exception(f"Local tool {tc.name} not found")
                    except Exception as e:
                        tool_call_result.is_success = False
                        tool_call_result.is_error = True
                        tool_call_result.result = str(e)
                    finally:
                        local_tools_results.append(tool_call_result)

                if local_tools_results:
                    print(f"📝 Registering {len(local_tools_results)} local tool results...")
                    agent.memory.add_tool_call_results(tool_call_results=local_tools_results)
    
    # Report execution metrics to Xpander
    agent.report_execution_metrics(
        llm_tokens=execution_tokens,
        ai_model="gpt-4o"
    )
    
    print(f"✨ Execution duration: {time.perf_counter() - execution_start_time:.2f} seconds")
    print(f"🔢 Total tokens used: {execution_tokens.worker.total_tokens}")

# Function to chat with the agent
def chat(agent: Agent, message, thread_id=None, local_tools_by_name=None):
    """Send a message to the agent and get a response"""
    print(f"\n👤 User: {message}")
    
    # Add task to agent (using thread_id for conversation continuity)
    agent.add_task(input=message, thread_id=thread_id)
    
    # Run the agent loop
    agent_loop(agent, local_tools_by_name)
    
    # Get and return result
    result = agent.retrieve_execution_result()
    print(f"🤖 Agent: {result.result}")
    print(f"🧵 Thread ID: {result.memory_thread_id}")
    return result.memory_thread_id

# Function to create or load an agent
def setup_agent() -> Agent:
    """Create a new agent or load an existing one"""
    if 'XPANDER_AGENT_ID' in os.environ and os.environ['XPANDER_AGENT_ID']:
        # Load existing agent
        agent = xpander_client.agents.get(agent_id=os.environ['XPANDER_AGENT_ID'])
        print(f"🔄 Loaded agent: {agent.name}")
    else:
        # Create a new agent
        agent = xpander_client.agents.create(name="Hello World Agent")
        agent.instructions.goal = "Answer questions helpfully and accurately."
        
        # Save the agent ID
        agent_id = agent.id
        print(f"🎉 Created agent ID: {agent_id}")
        print(f"📝 Add this to your .env: XPANDER_AGENT_ID={agent_id}")
    
    print(f"🔍 View this agent in the Xpander platform with ID: https://app.xpander.ai/agents/{agent.id}")
    return agent

# Function to add Tavily search capability
def add_tavily_search(agent: Agent):
    """Add Tavily search capability to the agent"""
    # Check if Tavily search is already available
    print("🔍 Checking for existing search tools...")
    tools = agent.get_tools(llm_provider=LLMProvider.OPEN_AI)
    for tool in tools:
        if 'function' in tool and tool['function']['name'] == 'XpanderNewsInsightsFetchTavilyAIInsights':
            print(f"✅ Tavily search already available")
            return True
    
    print("🔄 Adding Tavily search capability...")
    
    try:
        # Find the xpanderAI Tools interface
        available_interfaces = agent.retrieve_agentic_interfaces()
        xpander_tools = next((interface for interface in available_interfaces 
                     if "xpanderai tools" in interface.name.lower()), None)
        
        if not xpander_tools:
            print("❌ xpanderAI Tools interface not found")
            return False
        
        # Find the Tavily operation
        operations = agent.retrieve_agentic_operations(agentic_interface=xpander_tools)
        tavily_op = next((op for op in operations 
                     if "tavily" in op.name.lower()), None)
        
        if not tavily_op:
            print("❌ Tavily search operation not found")
            return False
        
        print(f"✅ Found Tavily operation: {tavily_op.name}")
        
        # Add the operation to the agent
        print("🔄 Attaching search operation to agent...")
        agent.attach_operations(operations=[tavily_op])
        
        # Create a graph node for the operation
        print("🔄 Creating graph node...")
        agent.graph.add_node(
            GraphItem(
                agent=agent,
                item_id=tavily_op.id_to_use_on_graph,
                name=tavily_op.name,
                type=AgentGraphItemType.TOOL,
                is_local_tool=False
            )
        )
        agent.sync()
        
        print("✅ Tavily search capability added successfully!")
        # Wait briefly for search capability to register
        time.sleep(2)
        return True
    except Exception as e:
        print(f"❌ Error setting up search: {e}")
        return False

# Main execution - demonstrates different features
def main():
    # Set up or load the agent
    agent = setup_agent()
        
    # Add Tavily search capability
    add_tavily_search(agent)
    # Add local tools to the agent
    agent.add_local_tools(local_tools_list)
    print("🧰 Local tools added to agent")
    
    print("\n🔍 Testing agent with time function and search capability...")
    # First message - asking about time
    thread_id = chat(agent, "What time is it right now? Use the get_current_time tool.", 
                    local_tools_by_name=local_tools_by_name)
    
    # Second message in same thread - asking about search
    chat(agent, "What are the latest developments in AI?", thread_id, 
        local_tools_by_name=local_tools_by_name)

# Run the example when the script is executed directly
if __name__ == "__main__":
    main()

Save this complete code as hello_world_agent.py and run it with:

python hello_world_agent.py

Next Steps

Now that you have the basics of agent creation, you can:

  • Add more local tools to extend your agent’s capabilities
  • Explore the Xpander UI to create more complex agent graphs
  • Connect additional cloud services and APIs
  • Create specialized agents for specific domains or tasks

Happy agent building!