Skip to main content
POST
/
v1
/
workflows
Create Workflow
curl --request POST \
  --url https://api.example.com/v1/workflows \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: <api-key>' \
  --data '
{
  "name": "<string>",
  "description": "",
  "icon": "🚀",
  "avatar": "male-avatar",
  "model_provider": "anthropic",
  "model_name": "claude-sonnet-4-6",
  "llm_reasoning_effort": "medium",
  "llm_api_base": "<string>",
  "llm_credentials_key": "<string>",
  "llm_credentials_key_type": "xpander",
  "llm_credentials": {
    "name": "<string>",
    "value": "<string>",
    "description": "<string>"
  },
  "llm_extra_headers": {},
  "instructions": {
    "role": [],
    "goal": [],
    "general": ""
  },
  "expected_output": "",
  "output_format": "markdown",
  "output_schema": {},
  "orchestration_nodes": [],
  "task_level_strategies": {
    "retry_strategy": {
      "enabled": false,
      "max_retries": 3
    },
    "iterative_strategy": {
      "enabled": false,
      "max_iterations": 3,
      "end_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "stop_strategy": {
      "enabled": false,
      "stop_on_failure": true,
      "stop_on_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "max_runs_per_day": 123,
    "agentic_context_enabled": false,
    "duplication_prevention": {
      "selectors": [
        "<string>"
      ],
      "enabled": false,
      "ttl_minutes": 10
    }
  },
  "notification_settings": {
    "on_success": {},
    "on_error": {}
  },
  "source_nodes": [],
  "deployment_type": "serverless",
  "access_scope": "personal",
  "environment_id": "<string>",
  "using_nemo": false,
  "prompts_caching_enabled": false,
  "on_prem_event_streaming": true,
  "use_oidc_pre_auth": false,
  "pre_auth_audiences": [],
  "use_oidc_pre_auth_token_for_llm": false,
  "oidc_pre_auth_token_llm_audience": "<string>",
  "oidc_pre_auth_token_mcp_audience": "<string>"
}
'
{
  "name": "<string>",
  "organization_id": "<string>",
  "webhook_url": "<string>",
  "id": "<string>",
  "unique_name": "<string>",
  "origin_template": "<string>",
  "delegation_end_strategy": "return-to-start",
  "environment_id": "<string>",
  "sub_agents_continuous_thread": true,
  "deployment_type": "serverless",
  "created_by_prompt": "<string>",
  "prompts": [],
  "is_latest": false,
  "has_pending_changes": false,
  "deep_planning": false,
  "enforce_deep_planning": false,
  "connectivity_details": {},
  "framework": "agno",
  "description": "",
  "tools": [],
  "icon": "🚀",
  "avatar": "male-avatar",
  "source_nodes": [],
  "attached_tools": [],
  "access_scope": "personal",
  "instructions": {
    "role": [],
    "goal": [],
    "general": ""
  },
  "oas": {},
  "graph": [],
  "llm_settings": [],
  "status": "ACTIVE",
  "knowledge_bases": [],
  "version": 1,
  "created_by": "<string>",
  "created_at": "2023-11-07T05:31:56Z",
  "type": "manager",
  "delegation_type": "router",
  "delegation_memory_strategy": "summarization",
  "is_ai_employee": false,
  "using_nemo": false,
  "deletable": true,
  "model_provider": "anthropic",
  "model_name": "claude-sonnet-4-6",
  "llm_reasoning_effort": "medium",
  "llm_api_base": "<string>",
  "output_format": "markdown",
  "voice_id": "<string>",
  "output_schema": {},
  "llm_credentials_key": "<string>",
  "llm_credentials_key_type": "xpander",
  "llm_credentials": {
    "name": "<string>",
    "value": "<string>",
    "description": "<string>"
  },
  "llm_extra_headers": {},
  "expected_output": "",
  "agno_settings": {
    "session_storage": true,
    "learning": false,
    "agent_memories": false,
    "agentic_culture": false,
    "user_memories": false,
    "agentic_memory": false,
    "session_summaries": false,
    "num_history_runs": 10,
    "max_tool_calls_from_history": 0,
    "tool_call_limit": 0,
    "coordinate_mode": true,
    "pii_detection_enabled": false,
    "pii_detection_mask": true,
    "prompt_injection_detection_enabled": false,
    "openai_moderation_enabled": false,
    "reasoning_tools_enabled": false,
    "tool_calls_compression": {
      "enabled": false,
      "instructions": "",
      "threshold": 3
    },
    "max_plan_retries": 15,
    "plan_retry_strategy": "tiered",
    "memory_strategy": "disabled"
  },
  "on_prem_event_streaming": true,
  "prompts_caching_enabled": false,
  "is_supervised": false,
  "orchestration_nodes": [],
  "notification_settings": {},
  "task_level_strategies": {
    "retry_strategy": {
      "enabled": false,
      "max_retries": 3
    },
    "iterative_strategy": {
      "enabled": false,
      "max_iterations": 3,
      "end_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "stop_strategy": {
      "enabled": false,
      "stop_on_failure": true,
      "stop_on_condition": {
        "type": "regex",
        "term": "<string>",
        "group_id": "<string>",
        "path": "<string>",
        "value": null
      }
    },
    "max_runs_per_day": 123,
    "agentic_context_enabled": false,
    "duplication_prevention": {
      "selectors": [
        "<string>"
      ],
      "enabled": false,
      "ttl_minutes": 10
    }
  },
  "use_oidc_pre_auth": false,
  "pre_auth_audiences": [],
  "use_oidc_pre_auth_token_for_llm": false,
  "oidc_pre_auth_token_llm_audience": "<string>",
  "oidc_pre_auth_token_mcp_audience": "<string>"
}
Create a new workflow — a deterministic, multi-step pipeline where AI agents, tools, and logic nodes are wired together on a canvas. Only name is required — all other fields are optional. Data flows left-to-right through the pipeline: from a START trigger, through your processing nodes, to an END output.

Request Body

name
string
required
Display name for the workflow
description
string
Description of the workflow’s purpose
icon
string
Emoji icon for the workflow
model_provider
string
LLM provider: openai (default), anthropic, etc.
model_name
string
Specific model version
llm_reasoning_effort
string
Reasoning effort level for the orchestrator LLM
instructions
object
System instructions for the orchestrator
orchestration_nodes
array
Array of node definitions that make up the workflow canvas. Each node is powered by an AI agent (except Code nodes). Node types include:
  • pointer — Invokes one of your xpander AI agents with its full tool set and memory
  • classifier — An LLM that classifies, labels, or routes data based on natural language instructions
  • parallel — Runs multiple branches simultaneously
  • code — Executes custom code for deterministic logic (the only node that doesn’t use an LLM)
  • guardrail — An AI judge that evaluates a natural language condition and returns Pass/Fail
  • summarizer — An agent that answers specific questions from large payloads
  • wait — Pauses execution until a condition is met (webhook callback or human approval)
  • send_to_end — Skips remaining nodes and routes directly to the END block
task_level_strategies
object
Task-level strategies for retry, stop conditions, and iteration
notification_settings
object
Notification configuration (Slack, email, webhook) for workflow events
output_format
string
Output format: text or json
output_schema
object
JSON schema for structured output when output_format is json
expected_output
string
Natural-language description of the desired output
deployment_type
string
Deployment infrastructure: serverless (default) or container
access_scope
string
personal or organizational (default)
source_nodes
array
Source node configurations (e.g., Slack, web UI triggers)

Response

Returns the created WorkflowResponse object with generated ID and webhook URL.

Example Request

curl -X POST "https://api.xpander.ai/v1/workflows" \
  -H "Content-Type: application/json" \
  -H "x-api-key: <your-api-key>" \
  -d '{
    "name": "Customer Onboarding Workflow",
    "description": "Orchestrates the customer onboarding process",
    "model_provider": "openai",
    "model_name": "gpt-4o",
    "instructions": {
      "role": ["Workflow orchestrator"],
      "goal": ["Coordinate onboarding steps efficiently"],
      "general": "Route tasks to the appropriate sub-agents"
    }
  }'

Notes

  • The workflow is created with type: orchestration automatically
  • Every node (except Code) is powered by an AI agent — you write natural language instructions, not field mappings
  • Nodes auto-connect: data flows from each node’s output to the next node’s input as context
  • Deploy the workflow using Deploy Workflow after configuration
  • See the Workflows user guide for details on the visual canvas and node types

Authorizations

x-api-key
string
header
required

API Key for authentication

Body

application/json

Request model for creating a new workflow on the xpander.ai platform.

A workflow is a directed acyclic graph (DAG) of execution nodes. Each node can be an agent invocation, a classifier (LLM-based routing), a code block, a guardrail, a summarizer, a wait/HITL node, or an action (API call).

Only name is required. The workflow's type is automatically set to 'orchestration' — do not use the Agents API to create workflows.

Agent-specific fields (graph, attached_tools, delegation_*, deep_planning, framework, agno_settings, connectivity_details) are not applicable to workflows and are excluded from this model.

name
string
required

Human-readable name for the workflow. Examples: 'Customer Onboarding Pipeline', 'Daily Report Generator'.

description
string | null
default:""

Description of what this workflow does, its trigger conditions, and expected outcomes.

icon
string | null
default:🚀

Emoji icon for the workflow.

avatar
string | null
default:male-avatar

Avatar identifier for visual representation.

model_provider
enum<string> | null
default:anthropic

Default LLM provider for LLM-powered nodes (classifiers, guardrails, summarizers) in this workflow. Individual nodes can override this in their own settings.

Available options:
openai,
nim,
amazon_bedrock,
azure_ai_foundary,
huggingFace,
friendlyAI,
anthropic,
gemini,
fireworks,
google_ai_studio,
helicone,
bytedance,
tzafon_lightcone,
open_router,
nebius,
cloudflare_ai_gw
model_name
string | null
default:claude-sonnet-4-6

Default model for LLM-powered nodes. Individual nodes can override.

llm_reasoning_effort
enum<string> | null
default:medium

Default reasoning depth for agent nodes in this workflow.

Available options:
low,
medium,
high,
xhigh
llm_api_base
string | null

Custom API base URL for the LLM provider.

llm_credentials_key
string | null

Reference key to stored LLM credentials.

llm_credentials_key_type
enum<string> | null
default:xpander

Credential storage type.

Available options:
xpander,
custom
llm_credentials
LLMCredentials · object

Direct LLM credentials (prefer llm_credentials_key).

llm_extra_headers
Llm Extra Headers · object

Extra headers for LLM API requests.

instructions
AIAgentInstructions · object

High-level instructions for the workflow. These provide context to agent nodes within the workflow about the overall workflow purpose.

expected_output
string | null
default:""

Description of the workflow's expected final output.

output_format
enum<string> | null
default:markdown

Format for the workflow's final output.

Available options:
text,
markdown,
json,
voice
output_schema
Output Schema · object

JSON Schema for structured workflow output (when output_format is 'json').

orchestration_nodes
OrchestrationNode · object[] | null

The workflow's execution DAG — an ordered list of nodes that define the execution flow. Nodes execute sequentially following next_node_ids edges. The first non-special node in the list is the entry point.

Node types:

  • action: Invokes a connector operation or xpander built-in tool.

    • definition.asset_id: "{connection_id}_{operation_catalog_id}" (e.g., "da84aaaa-525d-4be4-ae60-091ea8bb3ae8_6822eb5f961d9263189b4017")
    • definition.type: "action"
    • definition.instructions: Instructions for the LLM on how to use this action's output
    • definition.schema_override: Optional fixed values for tool input properties
  • agent: Invokes an AI agent as a step in the workflow.

    • definition.asset_id: the agent UUID
    • definition.type: "agent"
    • definition.instructions: Task instructions for the agent
    • definition.persist_thread_id: True to maintain conversation context across runs
  • classifier: LLM-based routing — classifies input into groups and routes to different downstream nodes.

    • definition.groups: list of {id, name, evaluation_criteria} for each classification category
    • Downstream nodes use condition.group_id to match classifier output
  • guardrail: LLM-based pass/fail check with "pass" and "fail" groups.

    • Routes to pass/fail downstream nodes based on evaluation
  • summarizer: LLM-based text processing — summarizes, extracts, or transforms the previous node's output.

    • definition.instructions: What to summarize/extract
    • The special "end-summarizer" (id="end-summarizer") runs at the end of the workflow to produce the final output
  • code: Executes arbitrary Python code.

    • definition.code: Python source code string
  • wait: Pauses execution for an external event (webhook or human-in-the-loop approval).

    • definition.type: "webhook" or "hitl"
  • send_to_end: Routes execution to end nodes (end-summarizer, end-classifier).

  • parallel: Executes multiple child nodes simultaneously.

    • definition.nodes: list of child OrchestrationNode objects

Flow control:

  • next_node_ids: list of node IDs to execute after this node completes
  • condition: optional condition on a node for conditional routing (used with classifier/guardrail)
  • The workflow ends when a node has empty next_node_ids, or routes to end-summarizer

Strategies (per-node):

  • retry_strategy: {enabled, max_retries} — retry on failure
  • iterative_strategy: {enabled, max_iterations, end_condition} — loop until condition met
  • stop_strategy: {enabled, stop_on_failure, stop_on_condition} — halt workflow

Example (Extract → Email → Summarize): [ {"type": "action", "id": "step-1", "name": "Extract Content", "next_node_ids": ["step-2"], "definition": {"asset_id": "<connection_id><operation_id>", "type": "action", "instructions": "Extract content from the provided URL"}}, {"type": "action", "id": "step-2", "name": "Send Email", "next_node_ids": [], "definition": {"asset_id": "<connection_id><operation_id>", "type": "action", "instructions": "Send summary to recipient"}}, {"type": "summarizer", "id": "end-summarizer", "name": "Output Summarizer", "next_node_ids": [], "definition": {"instructions": "Summarize the workflow execution results", "settings": {"model_provider": "anthropic", "model_name": "claude-sonnet-4-6"}}} ]

task_level_strategies
TaskLevelStrategies · object

Execution strategies: retry (max retries on failure), iterative (repeated execution with stop conditions), stop (halt conditions), max_runs_per_day (daily execution limit), and agentic_context (persistent memory across workflow runs).

notification_settings
NotificationSettings · object

Notifications on workflow completion — email, Slack, or webhook on success/error.

source_nodes
AIAgentSourceNode · object[] | null

How this workflow can be triggered: SDK, scheduled task, webhook, assistant UI, MCP, A2A, Telegram, Slack.

deployment_type
enum<string> | null
default:serverless

Where the workflow runs. 'serverless' (recommended) or 'container'.

Available options:
serverless,
container
access_scope
enum<string> | null
default:personal

Who can access this workflow. 'personal' or 'organizational'.

Available options:
personal,
organizational
environment_id
string | null

Target deployment environment.

using_nemo
boolean | null
default:false

Enable NeMo guardrails.

prompts_caching_enabled
boolean | null
default:false

Enable prompt caching for LLM-powered nodes.

on_prem_event_streaming
boolean | null
default:true

Enable real-time event streaming.

use_oidc_pre_auth
boolean | null
default:false

Require OIDC pre-authentication before workflow execution.

pre_auth_audiences
string[] | null

Allowed OIDC token audiences.

use_oidc_pre_auth_token_for_llm
boolean | null
default:false

Forward OIDC token to LLM provider.

oidc_pre_auth_token_llm_audience
string | null

OIDC audience for LLM access.

oidc_pre_auth_token_mcp_audience
string | null

OIDC audience for MCP server access.

Response

Successful Response

Response model for workflow endpoints.

Inherits from AIAgent but excludes agent-specific fields that are not relevant to workflows (orchestrations). This provides a clean API surface for workflow consumers without exposing confusing agent-only concepts.

The workflow's execution logic is defined in orchestration_nodes — a DAG of typed nodes. Agent-specific fields like graph, attached_tools, delegation_*, framework, and agno_settings are hidden.

name
string
required
organization_id
string
required
webhook_url
string
required
id
string | null
unique_name
string | null
origin_template
string | null
delegation_end_strategy
enum<string> | null
default:return-to-start

Enumeration of the agent delegation end strategies.

Attributes: ReturnToStart: when last agent is finished and about to announce "finish" it will summarize and return to the first agent. FinishWithLast: finish at the last agent.

Available options:
return-to-start,
finish-with-last
environment_id
string | null
sub_agents_continuous_thread
boolean | null
default:true
deployment_type
enum<string> | null
default:serverless
Available options:
serverless,
container
created_by_prompt
string | null
prompts
string[] | null
is_latest
boolean | null
default:false
has_pending_changes
boolean | null
default:false
deep_planning
boolean | null
default:false
enforce_deep_planning
boolean | null
default:false
connectivity_details
AIAgentConnectivityDetailsA2A · object
framework
string | null
default:agno
description
string | null
default:""
tools
any[] | null
icon
string | null
default:🚀
avatar
string | null
default:male-avatar
source_nodes
AIAgentSourceNode · object[] | null
attached_tools
Connector · object[] | null
access_scope
enum<string> | null
default:personal
Available options:
personal,
organizational
instructions
AIAgentInstructions · object
oas
Oas · object
graph
AIAgentGraphItem · object[] | null
llm_settings
AIAgentGraphItemLLMSettings · object[] | null
status
enum<string> | null
default:ACTIVE

Enumeration of possible agent statuses.

Attributes: DRAFT: Agent is in a draft state. ACTIVE: Agent is active and operational. INACTIVE: Agent is inactive and not operational.

Available options:
DRAFT,
ACTIVE,
INACTIVE
knowledge_bases
AgentKnowledgeBase · object[] | null
version
integer | null
default:1
created_by
string | null
created_at
string<date-time> | null
type
enum<string> | null

Enumeration of the agent types.

Attributes: Manager: marks the agent as a Managing agent. Regular: marks the agent as a regular agent. A2A: marks the agent as an external agent used via A2A protocol. Curl: marks the agent as an external agent used via a CURL. Orchestration: marks the agent as an Orchestration object.

Available options:
manager,
regular,
a2a,
curl,
orchestration
delegation_type
enum<string> | null
default:router

Enumeration of the agent delegation types.

Attributes: Router: Marks the agent as a router agent - xpanderAI's LLM will decide which sub-agent to trigger. Sequence: Marks the agent as a sequence agent - sub-agents will delegate to other sub-agents.

Available options:
router,
sequence
delegation_memory_strategy
enum<string> | null
default:summarization

Enumeration of the agent delegation memory strategies.

Attributes: Full: The memory object will be passed completely between agents. Summarization: Between each sub-agent delegation, a summarization will occur, and a new thread will be created for each agent. OriginalInput: the sub agent will get the initial task with a fresh memory thread

Available options:
full,
summarization,
original-input
is_ai_employee
boolean | null
default:false
using_nemo
boolean | null
default:false
deletable
boolean | null
default:true
model_provider
enum<string> | null
default:anthropic
Available options:
openai,
nim,
amazon_bedrock,
azure_ai_foundary,
huggingFace,
friendlyAI,
anthropic,
gemini,
fireworks,
google_ai_studio,
helicone,
bytedance,
tzafon_lightcone,
open_router,
nebius,
cloudflare_ai_gw
model_name
string | null
default:claude-sonnet-4-6
llm_reasoning_effort
enum<string> | null
default:medium
Available options:
low,
medium,
high,
xhigh
llm_api_base
string | null
output_format
enum<string> | null
default:markdown
Available options:
text,
markdown,
json,
voice
voice_id
string | null
output_schema
Output Schema · object
llm_credentials_key
string | null
llm_credentials_key_type
enum<string> | null
default:xpander
Available options:
xpander,
custom
llm_credentials
LLMCredentials · object
llm_extra_headers
Llm Extra Headers · object
expected_output
string | null
default:""
agno_settings
AgnoSettings · object
on_prem_event_streaming
boolean | null
default:true
prompts_caching_enabled
boolean | null
default:false
is_supervised
boolean | null
default:false
orchestration_nodes
OrchestrationNode · object[] | null
notification_settings
NotificationSettings · object

Configuration for event-based notifications.

Attributes: on_success: Notifications to send when an operation succeeds. Maps notification types to a list of notification configurations. on_error: Notifications to send when an operation fails. Maps notification types to a list of notification configurations.

task_level_strategies
TaskLevelStrategies · object

Configuration object for task-level execution strategies.

This model groups optional strategy configurations that control how a task is executed and managed over time, including retries, iterative execution, stopping conditions, and daily run limits.

Attributes: retry_strategy: Optional retry policy configuration that defines how the task should behave when execution fails (e.g., max attempts, backoff rules).

iterative_strategy:
Optional iterative execution configuration for tasks that may run in
repeated cycles/steps until completion or a stop condition is met.

stop_strategy:
Optional stopping policy configuration that defines when the task
should stop running (e.g., timeout, max iterations, success criteria).

max_runs_per_day:
Optional limit on how many times the task is allowed to run within a
24-hour period. If not set, no explicit daily limit is enforced.

agentic_context_enabled:
if agentic memory is enabled and accesible to the executor.
use_oidc_pre_auth
boolean | null
default:false
pre_auth_audiences
string[] | null
use_oidc_pre_auth_token_for_llm
boolean | null
default:false
oidc_pre_auth_token_llm_audience
string | null
oidc_pre_auth_token_mcp_audience
string | null