import Retell from 'retell-sdk';
const client = new Retell({
apiKey: 'YOUR_RETELL_API_KEY',
});
const conversationFlowResponse = await client.conversationFlow.update('conversation_flow_id');
console.log(conversationFlowResponse.conversation_flow_id);{
"conversation_flow_id": "<string>",
"version": 123,
"model_choice": {
"type": "cascading",
"model": "gpt-4.1",
"high_priority": true
},
"model_temperature": 0.7,
"tool_call_strict_mode": true,
"knowledge_base_ids": [
"kb_001",
"kb_002"
],
"kb_config": {
"top_k": 3,
"filter_score": 0.6
},
"start_speaker": "agent",
"begin_after_user_silence_ms": 2000,
"global_prompt": "You are a helpful customer service agent.",
"tools": [
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
],
"components": [
{
"name": "Customer Information Collector",
"nodes": [
{
"id": "collect_info",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Ask the customer for their name and contact information."
}
}
],
"tools": [
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
],
"start_node_id": "collect_info",
"begin_tag_display_position": {
"x": 100,
"y": 200
}
}
],
"start_node_id": "start",
"default_dynamic_variables": {
"company_name": "Retell Inc",
"support_hours": "9 AM - 5 PM"
},
"begin_tag_display_position": {
"x": 100,
"y": 200
},
"mcps": [
{
"name": "<string>",
"url": "<string>",
"headers": {
"Authorization": "Bearer 1234567890"
},
"query_params": {
"index": "1",
"key": "value"
},
"timeout_ms": 123
}
],
"is_transfer_llm": false,
"nodes": [
{
"id": "start",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Greet the customer and ask how you can help them."
},
"edges": [
{
"id": "edge_1",
"transition_condition": {
"type": "prompt",
"prompt": "Customer wants to book appointment"
},
"destination_node_id": "book_appointment"
}
]
}
]
}Update an existing conversation flow
import Retell from 'retell-sdk';
const client = new Retell({
apiKey: 'YOUR_RETELL_API_KEY',
});
const conversationFlowResponse = await client.conversationFlow.update('conversation_flow_id');
console.log(conversationFlowResponse.conversation_flow_id);{
"conversation_flow_id": "<string>",
"version": 123,
"model_choice": {
"type": "cascading",
"model": "gpt-4.1",
"high_priority": true
},
"model_temperature": 0.7,
"tool_call_strict_mode": true,
"knowledge_base_ids": [
"kb_001",
"kb_002"
],
"kb_config": {
"top_k": 3,
"filter_score": 0.6
},
"start_speaker": "agent",
"begin_after_user_silence_ms": 2000,
"global_prompt": "You are a helpful customer service agent.",
"tools": [
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
],
"components": [
{
"name": "Customer Information Collector",
"nodes": [
{
"id": "collect_info",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Ask the customer for their name and contact information."
}
}
],
"tools": [
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
],
"start_node_id": "collect_info",
"begin_tag_display_position": {
"x": 100,
"y": 200
}
}
],
"start_node_id": "start",
"default_dynamic_variables": {
"company_name": "Retell Inc",
"support_hours": "9 AM - 5 PM"
},
"begin_tag_display_position": {
"x": 100,
"y": 200
},
"mcps": [
{
"name": "<string>",
"url": "<string>",
"headers": {
"Authorization": "Bearer 1234567890"
},
"query_params": {
"index": "1",
"key": "value"
},
"timeout_ms": 123
}
],
"is_transfer_llm": false,
"nodes": [
{
"id": "start",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Greet the customer and ask how you can help them."
},
"edges": [
{
"id": "edge_1",
"transition_condition": {
"type": "prompt",
"prompt": "Customer wants to book appointment"
},
"destination_node_id": "book_appointment"
}
]
}
]
}Authentication header containing API key (find it in dashboard). The format is "Bearer YOUR_API_KEY"
Unique id of the conversation flow to be updated.
Optional version of the conversation flow to update. Default to latest version.
1
Override properties for conversation flow configuration in agent override requests.
The model choice for the conversation flow.
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
Controls the randomness of the model's responses. Lower values make responses more deterministic.
0 <= x <= 10.7
Whether to use strict mode for tool calls. Only applicable when using certain supported models.
true
Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"]Who starts the conversation - user or agent.
user, agent "agent"
If set, the AI will begin the conversation after waiting for the user for the duration (in milliseconds) specified by this attribute. This only applies if the agent is configured to wait for the user to speak first. If not set, the agent will wait indefinitely for the user to speak.
2000
Global prompt used in every node of the conversation flow.
"You are a helpful customer service agent."
Tools available in the conversation flow.
Show child attributes
Type of the tool
custom Name of the tool
Server URL to call the tool. Dynamic variables can be used in the URL.
Unique identifier for the tool
Description of the tool
1024Tool parameters schema
Show child attributes
Type must be "object" for a JSON Schema object.
object List of names of required property when generating this parameter. LLM will do its best to generate the required properties in its function arguments. Property must exist in properties.
HTTP method to use for the request, defaults to POST
GET, POST, PUT, PATCH, DELETE Timeout in milliseconds for the function call, defaults to 2 min
1000 <= x <= 600000[
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
]Local components embedded within the conversation flow.
Show child attributes
Name of the component
"Customer Information Collector"
Nodes that make up the component
Show child attributes
Unique identifier for the node
Type of the node
conversation Optional name for display purposes
Show child attributes
Condition for global node activation, cannot be empty
Transition to this node
Show child attributes
Find tune the transition condition to this global node
Don't transition to this node
Show child attributes
Find tune the transition condition to this global node
Show child attributes
Unique identifier for the edge
Show child attributes
ID of the destination node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the example
The example transcript to finetune how the conversation should be.
Show child attributes
Unique identifier for the example
The example transcript to finetune how the node should transition.
Optional destination node ID
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
0 <= x <= 1Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"][
{
"id": "collect_info",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Ask the customer for their name and contact information."
}
}
]Tools available within the component
Show child attributes
Type of the tool
custom Name of the tool
Server URL to call the tool. Dynamic variables can be used in the URL.
Unique identifier for the tool
Description of the tool
1024Tool parameters schema
Show child attributes
Type must be "object" for a JSON Schema object.
object List of names of required property when generating this parameter. LLM will do its best to generate the required properties in its function arguments. Property must exist in properties.
HTTP method to use for the request, defaults to POST
GET, POST, PUT, PATCH, DELETE Timeout in milliseconds for the function call, defaults to 2 min
1000 <= x <= 600000[
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
]ID of the starting node
"collect_info"
ID of the start node in the conversation flow.
"start"
A list of MCP server configurations to use for this conversation flow.
Show child attributes
The URL of the MCP server.
Maximum time to wait for a connection to be established (in milliseconds). Default to 120,000 ms (2 minutes).
Whether this conversation flow is used for transfer LLM.
false
Array of nodes in the conversation flow.
Show child attributes
Unique identifier for the node
Type of the node
conversation Optional name for display purposes
Show child attributes
Condition for global node activation, cannot be empty
Transition to this node
Show child attributes
Find tune the transition condition to this global node
Don't transition to this node
Show child attributes
Find tune the transition condition to this global node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the example
Show child attributes
Unique identifier for the example
Optional destination node ID
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
0 <= x <= 1Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"][
{
"id": "start",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Greet the customer and ask how you can help them."
},
"edges": [
{
"id": "edge_1",
"transition_condition": {
"type": "prompt",
"prompt": "Customer wants to book appointment"
},
"destination_node_id": "book_appointment"
}
]
}
]Successfully updated the conversation flow
Override properties for conversation flow configuration in agent override requests.
Unique identifier for the conversation flow
Version number of the conversation flow
The model choice for the conversation flow.
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
Controls the randomness of the model's responses. Lower values make responses more deterministic.
0 <= x <= 10.7
Whether to use strict mode for tool calls. Only applicable when using certain supported models.
true
Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"]Who starts the conversation - user or agent.
user, agent "agent"
If set, the AI will begin the conversation after waiting for the user for the duration (in milliseconds) specified by this attribute. This only applies if the agent is configured to wait for the user to speak first. If not set, the agent will wait indefinitely for the user to speak.
2000
Global prompt used in every node of the conversation flow.
"You are a helpful customer service agent."
Tools available in the conversation flow.
Show child attributes
Type of the tool
custom Name of the tool
Server URL to call the tool. Dynamic variables can be used in the URL.
Unique identifier for the tool
Description of the tool
1024Tool parameters schema
Show child attributes
Type must be "object" for a JSON Schema object.
object List of names of required property when generating this parameter. LLM will do its best to generate the required properties in its function arguments. Property must exist in properties.
HTTP method to use for the request, defaults to POST
GET, POST, PUT, PATCH, DELETE Timeout in milliseconds for the function call, defaults to 2 min
1000 <= x <= 600000[
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
]Local components embedded within the conversation flow.
Show child attributes
Name of the component
"Customer Information Collector"
Nodes that make up the component
Show child attributes
Unique identifier for the node
Type of the node
conversation Optional name for display purposes
Show child attributes
Condition for global node activation, cannot be empty
Transition to this node
Show child attributes
Find tune the transition condition to this global node
Don't transition to this node
Show child attributes
Find tune the transition condition to this global node
Show child attributes
Unique identifier for the edge
Show child attributes
ID of the destination node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the example
The example transcript to finetune how the conversation should be.
Show child attributes
Unique identifier for the example
The example transcript to finetune how the node should transition.
Optional destination node ID
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
0 <= x <= 1Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"][
{
"id": "collect_info",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Ask the customer for their name and contact information."
}
}
]Tools available within the component
Show child attributes
Type of the tool
custom Name of the tool
Server URL to call the tool. Dynamic variables can be used in the URL.
Unique identifier for the tool
Description of the tool
1024Tool parameters schema
Show child attributes
Type must be "object" for a JSON Schema object.
object List of names of required property when generating this parameter. LLM will do its best to generate the required properties in its function arguments. Property must exist in properties.
HTTP method to use for the request, defaults to POST
GET, POST, PUT, PATCH, DELETE Timeout in milliseconds for the function call, defaults to 2 min
1000 <= x <= 600000[
{
"type": "custom",
"name": "get_customer_info",
"description": "Get customer information from database",
"tool_id": "tool_001",
"url": "https://api.example.com/customer",
"method": "GET"
}
]ID of the starting node
"collect_info"
ID of the start node in the conversation flow.
"start"
A list of MCP server configurations to use for this conversation flow.
Show child attributes
The URL of the MCP server.
Maximum time to wait for a connection to be established (in milliseconds). Default to 120,000 ms (2 minutes).
Whether this conversation flow is used for transfer LLM.
false
Array of nodes in the conversation flow.
Show child attributes
Unique identifier for the node
Type of the node
conversation Optional name for display purposes
Show child attributes
Condition for global node activation, cannot be empty
Transition to this node
Show child attributes
Find tune the transition condition to this global node
Don't transition to this node
Show child attributes
Find tune the transition condition to this global node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the edge
ID of the destination node
Show child attributes
Unique identifier for the example
Show child attributes
Unique identifier for the example
Optional destination node ID
Show child attributes
Type of model choice
cascading The LLM model to use
gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-5, gpt-5-mini, gpt-5-nano, claude-4.5-sonnet, claude-4.5-haiku, gemini-2.5-flash, gemini-2.5-flash-lite Whether to use high priority pool with more dedicated resource, default false
0 <= x <= 1Knowledge base IDs for RAG (Retrieval-Augmented Generation).
["kb_001", "kb_002"][
{
"id": "start",
"type": "conversation",
"instruction": {
"type": "prompt",
"text": "Greet the customer and ask how you can help them."
},
"edges": [
{
"id": "edge_1",
"transition_condition": {
"type": "prompt",
"prompt": "Customer wants to book appointment"
},
"destination_node_id": "book_appointment"
}
]
}
]Was this page helpful?