Skip to main content
POST
/
v1
/
agents
Create Ai Agent
curl --request POST \
  --url https://api.xpander.ai/v1/agents \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: <api-key>' \
  --data '
{
  "name": "<string>",
  "description": "",
  "icon": "🚀",
  "avatar": "male-avatar",
  "type": "manager",
  "created_by": "<string>",
  "model_provider": "anthropic",
  "model_name": "claude-sonnet-4-6",
  "llm_reasoning_effort": "medium",
  "llm_api_base": "<string>",
  "llm_credentials_key": "<string>",
  "llm_credentials_key_type": "xpander",
  "llm_credentials": {
    "name": "<string>",
    "value": "<string>",
    "description": "<string>"
  },
  "llm_extra_headers": {},
  "instructions": {
    "role": [],
    "goal": [],
    "general": ""
  },
  "expected_output": "",
  "output_format": "markdown",
  "output_schema": {},
  "framework": "agno",
  "delegation_type": "router",
  "delegation_memory_strategy": "summarization",
  "delegation_end_strategy": "return-to-start",
  "sub_agents_continuous_thread": true,
  "deep_planning": false,
  "enforce_deep_planning": false,
  "attached_tools": [],
  "graph": [],
  "llm_settings": [],
  "knowledge_bases": [],
  "source_nodes": [],
  "deployment_type": "serverless",
  "access_scope": "personal",
  "environment_id": "<string>",
  "connectivity_details": {},
  "agno_settings": {
    "session_storage": true,
    "learning": false,
    "agent_memories": false,
    "agentic_culture": false,
    "user_memories": false,
    "agentic_memory": false,
    "session_summaries": false,
    "num_history_runs": 10,
    "max_tool_calls_from_history": 0,
    "tool_call_limit": 0,
    "coordinate_mode": true,
    "pii_detection_enabled": false,
    "pii_detection_mask": true,
    "prompt_injection_detection_enabled": false,
    "openai_moderation_enabled": false,
    "openai_moderation_categories": [
      "<string>"
    ],
    "reasoning_tools_enabled": false,
    "tool_calls_compression": {
      "enabled": false,
      "threshold": 3,
      "instructions": ""
    },
    "max_plan_retries": 15,
    "plan_retry_strategy": "tiered",
    "memory_strategy": "disabled"
  },
  "task_level_strategies": {
    "retry_strategy": {
      "enabled": false,
      "max_retries": 3
    },
    "iterative_strategy": {
      "enabled": false,
      "max_iterations": 3,
      "end_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "stop_strategy": {
      "enabled": false,
      "stop_on_failure": true,
      "stop_on_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "max_runs_per_day": 123,
    "agentic_context_enabled": false,
    "duplication_prevention": {
      "selectors": [
        "<string>"
      ],
      "enabled": false,
      "ttl_minutes": 10
    }
  },
  "notification_settings": {
    "on_success": {},
    "on_error": {}
  },
  "voice_id": "<string>",
  "using_nemo": false,
  "prompts_caching_enabled": false,
  "is_supervised": false,
  "on_prem_event_streaming": true,
  "use_oidc_pre_auth": false,
  "pre_auth_audiences": [],
  "use_oidc_pre_auth_token_for_llm": false,
  "oidc_pre_auth_token_llm_audience": "<string>",
  "oidc_pre_auth_token_mcp_audience": "<string>"
}
'
{
  "name": "<string>",
  "organization_id": "<string>",
  "webhook_url": "<string>",
  "id": "<string>",
  "unique_name": "<string>",
  "origin_template": "<string>",
  "delegation_end_strategy": "return-to-start",
  "environment_id": "<string>",
  "sub_agents_continuous_thread": true,
  "deployment_type": "serverless",
  "created_by_prompt": "<string>",
  "prompts": [],
  "is_latest": false,
  "has_pending_changes": false,
  "deep_planning": false,
  "enforce_deep_planning": false,
  "connectivity_details": {},
  "framework": "agno",
  "description": "",
  "tools": [],
  "icon": "🚀",
  "avatar": "male-avatar",
  "source_nodes": [],
  "attached_tools": [],
  "access_scope": "personal",
  "instructions": {
    "role": [],
    "goal": [],
    "general": ""
  },
  "oas": {},
  "graph": [],
  "llm_settings": [],
  "status": "ACTIVE",
  "knowledge_bases": [],
  "version": 1,
  "created_by": "<string>",
  "created_at": "2023-11-07T05:31:56Z",
  "type": "manager",
  "delegation_type": "router",
  "delegation_memory_strategy": "summarization",
  "is_ai_employee": false,
  "using_nemo": false,
  "deletable": true,
  "model_provider": "anthropic",
  "model_name": "claude-sonnet-4-6",
  "llm_reasoning_effort": "medium",
  "llm_api_base": "<string>",
  "output_format": "markdown",
  "voice_id": "<string>",
  "output_schema": {},
  "llm_credentials_key": "<string>",
  "llm_credentials_key_type": "xpander",
  "llm_credentials": {
    "name": "<string>",
    "value": "<string>",
    "description": "<string>"
  },
  "llm_extra_headers": {},
  "expected_output": "",
  "agno_settings": {
    "session_storage": true,
    "learning": false,
    "agent_memories": false,
    "agentic_culture": false,
    "user_memories": false,
    "agentic_memory": false,
    "session_summaries": false,
    "num_history_runs": 10,
    "max_tool_calls_from_history": 0,
    "tool_call_limit": 0,
    "coordinate_mode": true,
    "pii_detection_enabled": false,
    "pii_detection_mask": true,
    "prompt_injection_detection_enabled": false,
    "openai_moderation_enabled": false,
    "reasoning_tools_enabled": false,
    "tool_calls_compression": {
      "enabled": false,
      "instructions": "",
      "threshold": 3
    },
    "max_plan_retries": 15,
    "plan_retry_strategy": "tiered",
    "memory_strategy": "disabled"
  },
  "on_prem_event_streaming": true,
  "prompts_caching_enabled": false,
  "is_supervised": false,
  "orchestration_nodes": [],
  "notification_settings": {},
  "task_level_strategies": {
    "retry_strategy": {
      "enabled": false,
      "max_retries": 3
    },
    "iterative_strategy": {
      "enabled": false,
      "max_iterations": 3,
      "end_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "stop_strategy": {
      "enabled": false,
      "stop_on_failure": true,
      "stop_on_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "max_runs_per_day": 123,
    "agentic_context_enabled": false,
    "duplication_prevention": {
      "selectors": [
        "<string>"
      ],
      "enabled": false,
      "ttl_minutes": 10
    }
  },
  "use_oidc_pre_auth": false,
  "pre_auth_audiences": [],
  "use_oidc_pre_auth_token_for_llm": false,
  "oidc_pre_auth_token_llm_audience": "<string>",
  "oidc_pre_auth_token_mcp_audience": "<string>"
}
Create a new AI agent with custom configuration. The agent will be created and ready for configuration and deployment.

Request Body

name
string
required
Display name for the agent
description
string
Description of the agent’s purpose and capabilities
deployment_type
string
Deployment infrastructure: serverless or container. Defaults to serverless if not specified.
container deployment type is in beta and creates agents using container-based framework execution.
model_provider
string
LLM provider: openai (default), anthropic, etc.
model_name
string
Specific model version (e.g., gpt-4o, gpt-4.1)
icon
string
Emoji icon for the agent (e.g., ”🚀”, ”📊”)
instructions
object
System instructions configuration
access_scope
string
Access control scope for the agent. Determines who can see and use the agent.Possible values: personal (visible only to the creator) or organizational (visible to the entire organization). Defaults to organizational if not specified.
environment_id
string
Target environment ID (optional)
using_nemo
boolean
Use Nvidia NeMo (default: false)
llm_api_base
string
Custom API base URL for LLM provider (for AI Gateway configurations)
llm_extra_headers
object
Custom HTTP headers to include in LLM requests for gateway integration
llm_reasoning_effort
string
Reasoning effort level for the LLM (e.g., low, medium, high)
type
string
Agent type: manager (default), regular, a2a, curl
output_format
string
Output format: text (default) or json
output_schema
object
JSON schema for structured output when output_format is json
expected_output
string
Natural-language description of the desired output
framework
string
Agent framework (e.g., agno)
attached_tools
array
Array of connector tool attachments with connection IDs and selected operation IDs
graph
array
Agent workflow graph configuration defining tool execution order
knowledge_bases
array
Array of knowledge base IDs to attach to the agent
agno_settings
object
Advanced agent settings including memory, session storage, tool limits, and safety features
task_level_strategies
object
Task-level strategies for retry, stop conditions, and iteration
notification_settings
object
Notification configuration (Slack, email, webhook) for agent events
delegation_type
string
How the agent delegates to sub-agents
delegation_memory_strategy
string
Memory strategy for agent delegation
deep_planning
object
Deep planning configuration for complex multi-step tasks
voice_id
string
Voice ID for text-to-speech output
source_nodes
array
Source node configurations (e.g., Slack, web UI triggers)
is_supervised
boolean
Whether the agent requires human approval for tool calls

Response

Returns a complete AIAgent object:
id
string
Unique identifier for the created agent (UUID)
name
string
Display name of the agent
description
string
Agent description
icon
string
Emoji icon representing the agent
status
string
Current deployment status: ACTIVE or INACTIVE
organization_id
string
UUID of the organization that owns this agent
deployment_type
string
Deployment infrastructure: serverless, container, or null
instructions
object
System instructions configuration
access_scope
string
Access control scope: personal or organizational
model_provider
string
AI model provider (e.g., openai)
model_name
string
Specific model version (e.g., gpt-4o, gpt-4.1)
framework
string
Agent framework used (e.g., agno)
unique_name
string
Auto-generated human-friendly slug identifier (e.g., “beige-swallow”)
version
integer
Agent version number
tools
array
Array of tools available to the agent
knowledge_bases
array
Array of knowledge bases attached to the agent
graph
array
Agent workflow graph configuration
webhook_url
string
Auto-generated webhook URL for agent invocations

Example Request

Minimal create request (required fields only):
curl -X POST "https://api.xpander.ai/v1/agents" \
  -H "Content-Type: application/json" \
  -H "x-api-key: <your-api-key>" \
  -d '{
    "name": "Customer Support Agent",
    "model_provider": "openai",
    "model_name": "gpt-5.2"
  }'
With full configuration:
curl -X POST "https://api.xpander.ai/v1/agents" \
  -H "Content-Type: application/json" \
  -H "x-api-key: <your-api-key>" \
  -d '{
    "name": "Customer Support Agent",
    "model_provider": "openai",
    "model_name": "gpt-5.2",
    "instructions": {
      "role": ["Customer support specialist"],
      "goal": ["Resolve customer issues efficiently"],
      "general": "Be helpful, professional, and empathetic"
    },
    "access_scope": "organizational"
  }'

Example Response

{
  "id": "<agent-id>",
  "unique_name": "jade-bass",
  "name": "Customer Support Agent",
  "description": "",
  "deployment_type": "serverless",
  "framework": "agno",
  "status": "ACTIVE",
  "model_provider": "openai",
  "model_name": "gpt-5.2",
  "organization_id": "<org-id>",
  "version": 2,
  "has_pending_changes": false,
  "is_latest": false,
  "tools": [],
  "instructions": {
    "role": [],
    "goal": [],
    "general": ""
  },
  "access_scope": "organizational",
  "knowledge_bases": [],
  "icon": "🚀",
  "llm_settings": [
    {
      "provider": "openai",
      "model": "gpt-5.2",
      "temperature": 0.0
    }
  ],
  "agno_settings": {
    "session_storage": true,
    "agent_memories": false,
    "agentic_culture": false,
    "user_memories": false,
    "agentic_memory": false,
    "session_summaries": false,
    "num_history_runs": 10,
    "max_tool_calls_from_history": 0,
    "tool_call_limit": 0,
    "coordinate_mode": true,
    "pii_detection_enabled": false,
    "pii_detection_mask": true,
    "prompt_injection_detection_enabled": false,
    "openai_moderation_enabled": false,
    "openai_moderation_categories": null,
    "reasoning_tools_enabled": true,
    "tool_calls_compression": {
      "enabled": false,
      "threshold": 3,
      "instructions": ""
    }
  },
  "webhook_url": "https://webhook.xpander.ai/?agent_id=<agent-id>&asynchronous=false"
}

Notes

  • Only name is required. All other fields are optional and will use platform defaults.
  • The unique_name is auto-generated as a human-friendly slug (e.g., “jade-bass”)
  • The webhook_url is auto-generated for agent invocations
  • Agent starts in ACTIVE status immediately upon creation and is ready for invocation
  • version starts at 2 and increments with each deployment
  • has_pending_changes indicates whether there are unpublished configuration changes
  • The API may auto-upgrade model_name (e.g., requested gpt-4.1 returns gpt-5.2)

Next Steps

After creating an agent:
  1. Update instructions using Update Agent
  2. Add tools and knowledge bases to the agent
  3. Deploy the agent using Deploy Agent
  4. Invoke the agent using the task execution endpoints

Authorizations

x-api-key
string
header
required

API Key for authentication

Body

application/json

Request model for creating a new AI agent on the xpander.ai platform.

Only name is required. All other fields have sensible defaults matching the platform's standard agent configuration. The agent will be created with type 'manager' by default, which supports tool use, sub-agent delegation, and multi-step task execution.

Fields like organization_id, id, status, version, and other system-managed properties are set automatically by the platform and should not be provided.

name
string
required

Human-readable name for the agent. Must be unique within the organization. Examples: 'Customer Support Agent', 'Research Assistant'.

description
string | null
default:""

A brief description of what the agent does. Shown in agent listings and used by other agents when deciding delegation. Keep it concise and action-oriented.

icon
string | null
default:🚀

Emoji icon displayed next to the agent name in the UI. Single emoji character.

avatar
string | null
default:male-avatar

Avatar identifier for the agent's visual representation in chat interfaces.

type
enum<string> | null
default:manager

The agent type. 'manager' is the standard type that supports tools, sub-agents, and multi-step execution. 'regular' is a simpler agent without orchestration capabilities. 'a2a' and 'curl' are for external agent integrations. Do NOT set to 'orchestration' — use the Workflows API instead.

Available options:
manager,
regular,
a2a,
curl,
orchestration
created_by
string | null

User ID of the creator. Auto-populated from the API key if not provided.

model_provider
enum<string> | null
default:anthropic

The LLM provider to use for this agent's reasoning. Must match an available provider from GET /llm_providers. Common values: 'anthropic', 'openai', 'gemini'.

Available options:
openai,
nim,
amazon_bedrock,
azure_ai_foundary,
huggingFace,
friendlyAI,
anthropic,
gemini,
fireworks,
google_ai_studio,
helicone,
bytedance,
tzafon_lightcone,
open_router,
nebius,
cloudflare_ai_gw
model_name
string | null
default:claude-sonnet-4-6

The specific model identifier within the chosen provider. Must match a model from GET /llm_providers/{provider}/models. Examples: 'claude-sonnet-4-6', 'gpt-4o', 'gemini-2.0-flash'.

llm_reasoning_effort
enum<string> | null
default:medium

Controls the depth of reasoning the LLM applies. 'low' for simple tasks, 'medium' for balanced performance, 'high' for complex reasoning, 'xhigh' for maximum reasoning depth (slower, more expensive).

Available options:
low,
medium,
high,
xhigh
llm_api_base
string | null

Custom API base URL for the LLM provider. Use this when connecting to a self-hosted or proxied LLM endpoint instead of the provider's default URL. Leave None to use the provider's standard endpoint.

llm_credentials_key
string | null

Reference key to stored LLM API credentials in the xpander.ai vault. When set, the agent uses these credentials instead of the organization's default. Create credentials via the platform settings.

llm_credentials_key_type
enum<string> | null
default:xpander

Type of credential storage. 'xpander' uses xpander.ai's built-in credential vault. 'custom' indicates externally managed credentials.

Available options:
xpander,
custom
llm_credentials
LLMCredentials · object

Direct LLM credentials object. Prefer using llm_credentials_key for secure credential management. Only use this for testing or when vault access is unavailable.

llm_extra_headers
Llm Extra Headers · object

Additional HTTP headers to include in every LLM API request. Useful for custom authentication, routing through gateways (e.g., Helicone, Cloudflare AI Gateway), or passing metadata.

instructions
AIAgentInstructions · object

Structured instructions that define the agent's behavior. Contains 'role' (list of role descriptions), 'goal' (list of objectives), and 'general' (free-form instructions text). These are injected into the agent's system prompt.

expected_output
string | null
default:""

Description of the expected output format and content. Guides the agent on what the final response should look like. Used in the system prompt to set output expectations.

output_format
enum<string> | null
default:markdown

The format for the agent's final response. 'markdown' for rich text, 'text' for plain text, 'json' for structured JSON output (pair with output_schema), 'voice' for speech synthesis.

Available options:
text,
markdown,
json,
voice
output_schema
Output Schema · object

JSON Schema defining the structure of the agent's output when output_format is 'json'. The agent will conform its response to match this schema. Must be a valid JSON Schema object.

framework
string | null
default:agno

The execution framework for this agent. 'agno' is the default xpander.ai framework with full feature support. Other options: 'langchain', 'open-ai-agents', 'google-adk', 'strands-agents'.

delegation_type
enum<string> | null
default:router

How this agent delegates work to sub-agents. 'router' means the LLM dynamically decides which sub-agent to invoke. 'sequence' means sub-agents execute in a predefined order, each passing results to the next.

Available options:
router,
sequence
delegation_memory_strategy
enum<string> | null
default:summarization

How conversation context is shared between sub-agents during delegation. 'full' passes the complete memory. 'summarization' creates a summary between each delegation (reduces token usage). 'original-input' gives each sub-agent only the original task input.

Available options:
full,
summarization,
original-input
delegation_end_strategy
enum<string> | null
default:return-to-start

What happens when the last sub-agent in a delegation chain finishes. 'return-to-start' summarizes and returns to the first agent for final response. 'finish-with-last' ends execution at the last agent.

Available options:
return-to-start,
finish-with-last
sub_agents_continuous_thread
boolean | null
default:true

When True, sub-agents share a continuous conversation thread. When False, each sub-agent starts with a fresh thread. Only relevant for multi-agent setups.

deep_planning
boolean | null
default:false

Enable deep planning mode where the agent creates a detailed execution plan before taking actions. Useful for complex multi-step tasks. Increases latency but improves accuracy on complex workflows.

enforce_deep_planning
boolean | null
default:false

When True, forces the agent to always use deep planning regardless of task complexity. When False (default), the agent decides when to plan based on the task.

attached_tools
Connector · object[] | null

List of connector connections with their operation IDs that this agent can use as tools. Each entry binds a connection (connector_organization) to specific operations the agent is allowed to invoke.

Structure: [{"id": "<connection_id>", "operation_ids": ["<catalog_operation_id_1>", "<catalog_operation_id_2>"]}]

  • 'id' is the connection ID (connector_organization.id) from POST /connectors/{connector_id}/connect
  • 'operation_ids' are the catalog operation _id values from GET /connectors/{connector_id}/{connection_id}/operations

If you provide attached_tools without corresponding graph entries, the API will automatically create graph items of type 'tool' for each operation, resolving catalog _id to operationId.

Special cases:

  • Custom functions: use id="xpander-custom-functions" and operation_ids=["<custom_function_id>", ...] Custom function IDs are the function UUIDs, not catalog operation IDs.
  • Sub-agents: do NOT use attached_tools. Add sub-agents directly to the 'graph' field with type='agent'.
  • MCP servers: do NOT use attached_tools. Add MCP servers directly to the 'graph' field with type='mcp'.

Example (connector operations): [{"id": "770832b8-c32b-4e4c-9ca2-232fec8099a7", "operation_ids": ["694bddfcd72d937c39875cf7"]}]

Example (custom functions): [{"id": "xpander-custom-functions", "operation_ids": ["my-function-uuid-1", "my-function-uuid-2"]}]

graph
AIAgentGraphItem · object[] | null

The agent's tool graph — defines which tools are available to the LLM and their execution flow. Each graph item represents a tool the agent can call during task execution.

For connector operations (type='tool'):

  • item_id: the operationId string (e.g., 'XpanderEmailServiceSendEmailWithHtmlOrTextContent')
  • name: human-readable name shown to the LLM (e.g., 'Send Email')
  • type: 'tool'
  • targets: list of graph item IDs that should execute after this tool (empty [] for no chaining)
  • NOTE: auto-created from attached_tools if not provided

For sub-agents (type='agent'):

  • item_id: the agent ID (UUID) to delegate work to
  • name: display name of the sub-agent
  • type: 'agent'
  • NO attached_tools entry needed — sub-agents are graph-only

For custom functions (type='tool', sub_type='custom_function'):

  • item_id: the custom function UUID
  • name: function name
  • type: 'tool'
  • Requires attached_tools entry with id='xpander-custom-functions'

For MCP servers (type='mcp'):

  • item_id: a unique ID for this MCP server instance
  • name: display name of the MCP server
  • type: 'mcp'
  • settings.mcp_settings: {url, transport, auth_type, name, allowed_tools, ...}
  • NO attached_tools entry needed — MCP servers are graph-only

NOTE: If you provide attached_tools with operation_ids but no corresponding graph items, the API will auto-create graph items with resolved operationId and pretty_name from catalog.

Examples: Connector tool: {"item_id": "SlackPostMessage", "name": "Send Slack Message", "type": "tool", "targets": []} Sub-agent: {"item_id": "agent-uuid-here", "name": "Research Agent", "type": "agent", "targets": []} MCP server: {"item_id": "mcp-uuid", "name": "Notion", "type": "mcp", "targets": [], "settings": {"mcp_settings": {"url": "https://mcp.notion.com/sse", "transport": "sse", "name": "Notion"}}}

llm_settings
AIAgentGraphItemLLMSettings · object[] | null

Per-tool LLM override settings. Allows configuring a different LLM for pre-processing ('before') or post-processing ('after') specific tool calls. Each entry specifies type, provider, model, and temperature.

knowledge_bases
AgentKnowledgeBase · object[] | null

Knowledge bases attached to this agent for RAG (Retrieval-Augmented Generation). Each entry references a knowledge base by ID and specifies the retrieval strategy ('vanilla' for simple retrieval, 'agentic_rag' for agent-driven retrieval).

source_nodes
AIAgentSourceNode · object[] | null

Entry points that can trigger this agent. Defines how the agent can be invoked — via SDK, scheduled tasks, webhooks, assistant UI, MCP, A2A protocol, Telegram, or Slack.

deployment_type
enum<string> | null
default:serverless

Where the agent runs. 'serverless' runs on xpander.ai's managed infrastructure (recommended). 'container' runs on your own infrastructure as a Docker container.

Available options:
serverless,
container
access_scope
enum<string> | null
default:personal

Who can access this agent. 'personal' restricts access to the creating user. 'organizational' makes it available to all organization members.

Available options:
personal,
organizational
environment_id
string | null

Target deployment environment ID. Leave None for the organization's default environment. Use this to deploy agents to specific on-premise or regional environments.

connectivity_details
AIAgentConnectivityDetailsA2A · object

Connection details for external agent integrations (A2A protocol or CURL-based). Only relevant when type is 'a2a' or 'curl'. For standard agents, leave as empty dict.

agno_settings
AgnoSettings · object

Configuration specific to the Agno framework. Controls session storage, coordinate mode (multi-agent), learning, memory strategies, guardrails (PII detection, prompt injection), tool call limits, and plan retry strategies. Only applies when framework is 'agno'.

task_level_strategies
TaskLevelStrategies · object

Execution strategies applied at the task level. Configure retry behavior (max retries), iterative execution (max iterations with stop conditions), stop strategies, daily run limits, and agentic context (persistent memory across runs).

notification_settings
NotificationSettings · object

Notification configuration for task completion events. Define notifications to send on success or error via email, Slack, or webhook. Each channel supports custom subject, body, and branding.

voice_id
string | null

Voice ID for text-to-speech output when output_format is 'voice'. References a voice profile in the platform's TTS service.

using_nemo
boolean | null
default:false

Enable NVIDIA NeMo guardrails integration for this agent. Provides additional safety and content filtering capabilities.

prompts_caching_enabled
boolean | null
default:false

Enable LLM prompt caching to reduce latency and cost for repeated similar requests. The platform caches prompt prefixes and reuses them across invocations.

is_supervised
boolean | null
default:false

Enable supervised mode where the agent requires human approval before executing mutating operations (write/update/delete). Non-mutating operations (read/search) execute automatically.

on_prem_event_streaming
boolean | null
default:true

Enable real-time event streaming for on-premise deployments. When True, task progress events are streamed to connected clients.

use_oidc_pre_auth
boolean | null
default:false

Enable OIDC pre-authentication for this agent. When enabled, the agent requires a valid OIDC token from the invoking user before execution. Used for user-context-aware operations.

pre_auth_audiences
string[] | null

List of allowed OIDC token audiences for pre-authentication validation. Only tokens with matching audience claims are accepted.

use_oidc_pre_auth_token_for_llm
boolean | null
default:false

When True, the user's OIDC token is forwarded to the LLM provider for authenticated LLM calls. Requires use_oidc_pre_auth to be enabled.

oidc_pre_auth_token_llm_audience
string | null

The audience claim to request when exchanging the user's OIDC token for LLM provider access. Only used when use_oidc_pre_auth_token_for_llm is True.

oidc_pre_auth_token_mcp_audience
string | null

The audience claim to request when exchanging the user's OIDC token for MCP server access. Enables user-context-aware MCP tool execution.

Response

Successful Response

name
string
required
organization_id
string
required
webhook_url
string
required
id
string | null
unique_name
string | null
origin_template
string | null
delegation_end_strategy
enum<string> | null
default:return-to-start

Enumeration of the agent delegation end strategies.

Attributes: ReturnToStart: when last agent is finished and about to announce "finish" it will summarize and return to the first agent. FinishWithLast: finish at the last agent.

Available options:
return-to-start,
finish-with-last
environment_id
string | null
sub_agents_continuous_thread
boolean | null
default:true
deployment_type
enum<string> | null
default:serverless
Available options:
serverless,
container
created_by_prompt
string | null
prompts
string[] | null
is_latest
boolean | null
default:false
has_pending_changes
boolean | null
default:false
deep_planning
boolean | null
default:false
enforce_deep_planning
boolean | null
default:false
connectivity_details
AIAgentConnectivityDetailsA2A · object
framework
string | null
default:agno
description
string | null
default:""
tools
any[] | null
icon
string | null
default:🚀
avatar
string | null
default:male-avatar
source_nodes
AIAgentSourceNode · object[] | null
attached_tools
Connector · object[] | null
access_scope
enum<string> | null
default:personal
Available options:
personal,
organizational
instructions
AIAgentInstructions · object
oas
Oas · object
graph
AIAgentGraphItem · object[] | null
llm_settings
AIAgentGraphItemLLMSettings · object[] | null
status
enum<string> | null
default:ACTIVE

Enumeration of possible agent statuses.

Attributes: DRAFT: Agent is in a draft state. ACTIVE: Agent is active and operational. INACTIVE: Agent is inactive and not operational.

Available options:
DRAFT,
ACTIVE,
INACTIVE
knowledge_bases
AgentKnowledgeBase · object[] | null
version
integer | null
default:1
created_by
string | null
created_at
string<date-time> | null
type
enum<string> | null

Enumeration of the agent types.

Attributes: Manager: marks the agent as a Managing agent. Regular: marks the agent as a regular agent. A2A: marks the agent as an external agent used via A2A protocol. Curl: marks the agent as an external agent used via a CURL. Orchestration: marks the agent as an Orchestration object.

Available options:
manager,
regular,
a2a,
curl,
orchestration
delegation_type
enum<string> | null
default:router

Enumeration of the agent delegation types.

Attributes: Router: Marks the agent as a router agent - xpanderAI's LLM will decide which sub-agent to trigger. Sequence: Marks the agent as a sequence agent - sub-agents will delegate to other sub-agents.

Available options:
router,
sequence
delegation_memory_strategy
enum<string> | null
default:summarization

Enumeration of the agent delegation memory strategies.

Attributes: Full: The memory object will be passed completely between agents. Summarization: Between each sub-agent delegation, a summarization will occur, and a new thread will be created for each agent. OriginalInput: the sub agent will get the initial task with a fresh memory thread

Available options:
full,
summarization,
original-input
is_ai_employee
boolean | null
default:false
using_nemo
boolean | null
default:false
deletable
boolean | null
default:true
model_provider
enum<string> | null
default:anthropic
Available options:
openai,
nim,
amazon_bedrock,
azure_ai_foundary,
huggingFace,
friendlyAI,
anthropic,
gemini,
fireworks,
google_ai_studio,
helicone,
bytedance,
tzafon_lightcone,
open_router,
nebius,
cloudflare_ai_gw
model_name
string | null
default:claude-sonnet-4-6
llm_reasoning_effort
enum<string> | null
default:medium
Available options:
low,
medium,
high,
xhigh
llm_api_base
string | null
output_format
enum<string> | null
default:markdown
Available options:
text,
markdown,
json,
voice
voice_id
string | null
output_schema
Output Schema · object
llm_credentials_key
string | null
llm_credentials_key_type
enum<string> | null
default:xpander
Available options:
xpander,
custom
llm_credentials
LLMCredentials · object
llm_extra_headers
Llm Extra Headers · object
expected_output
string | null
default:""
agno_settings
AgnoSettings · object
on_prem_event_streaming
boolean | null
default:true
prompts_caching_enabled
boolean | null
default:false
is_supervised
boolean | null
default:false
orchestration_nodes
OrchestrationNode · object[] | null
notification_settings
NotificationSettings · object

Configuration for event-based notifications.

Attributes: on_success: Notifications to send when an operation succeeds. Maps notification types to a list of notification configurations. on_error: Notifications to send when an operation fails. Maps notification types to a list of notification configurations.

task_level_strategies
TaskLevelStrategies · object

Configuration object for task-level execution strategies.

This model groups optional strategy configurations that control how a task is executed and managed over time, including retries, iterative execution, stopping conditions, and daily run limits.

Attributes: retry_strategy: Optional retry policy configuration that defines how the task should behave when execution fails (e.g., max attempts, backoff rules).

iterative_strategy:
    Optional iterative execution configuration for tasks that may run in
    repeated cycles/steps until completion or a stop condition is met.

stop_strategy:
    Optional stopping policy configuration that defines when the task
    should stop running (e.g., timeout, max iterations, success criteria).

max_runs_per_day:
    Optional limit on how many times the task is allowed to run within a
    24-hour period. If not set, no explicit daily limit is enforced.

agentic_context_enabled:
    if agentic memory is enabled and accesible to the executor.
use_oidc_pre_auth
boolean | null
default:false
pre_auth_audiences
string[] | null
use_oidc_pre_auth_token_for_llm
boolean | null
default:false
oidc_pre_auth_token_llm_audience
string | null
oidc_pre_auth_token_mcp_audience
string | null