curl --request GET \
--url https://api.cozmox.ai/campaign \
--header 'Authorization: Bearer <token>'{
"results": [
{
"status": "scheduled",
"name": "Q2 Sales Campaign",
"phoneNumberId": "<string>",
"customers": [
{
"numberE164CheckEnabled": true,
"extension": null,
"assistantOverrides": {
"transcriber": {
"provider": "assembly-ai",
"language": "en",
"confidenceThreshold": 0.4,
"enableUniversalStreamingApi": false,
"formatTurns": false,
"endOfTurnConfidenceThreshold": 0.7,
"minEndOfTurnSilenceWhenConfident": 160,
"wordFinalizationMaxWaitTime": 160,
"maxTurnSilence": 400,
"realtimeUrl": "<string>",
"wordBoost": [
"<string>"
],
"endUtteranceSilenceThreshold": 123,
"disablePartialTranscripts": true,
"fallbackPlan": {
"transcribers": [
{
"provider": "assembly-ai",
"language": "en",
"confidenceThreshold": 0.4,
"enableUniversalStreamingApi": false,
"formatTurns": false,
"endOfTurnConfidenceThreshold": 0.7,
"minEndOfTurnSilenceWhenConfident": 160,
"wordFinalizationMaxWaitTime": 160,
"maxTurnSilence": 400,
"realtimeUrl": "<string>",
"wordBoost": [
"<string>"
],
"endUtteranceSilenceThreshold": 123,
"disablePartialTranscripts": true
}
]
}
},
"model": {
"model": "claude-3-opus-20240229",
"provider": "anthropic",
"messages": [
{
"content": "<string>",
"role": "assistant"
}
],
"tools": [
{
"type": "apiRequest",
"method": "POST",
"url": "<string>",
"messages": [
{
"type": "request-start",
"contents": [
"<unknown>"
],
"blocking": false,
"content": "<string>",
"conditions": [
{
"operator": "eq",
"param": "<string>",
"value": "<string>"
}
]
}
],
"timeoutSeconds": 20,
"name": "<string>",
"description": "<string>",
"body": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"headers": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
},
"variableExtractionPlan": {
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"aliases": [
{
"key": "<string>",
"value": "<string>"
}
]
},
"function": {
"name": "<string>",
"strict": false,
"description": "<string>",
"parameters": {
"type": "object",
"properties": {},
"required": [
"<string>"
]
}
}
}
],
"toolIds": [
"<string>"
],
"knowledgeBase": {
"provider": "custom-knowledge-base",
"server": {
"timeoutSeconds": 20,
"url": "<string>",
"headers": {},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
}
}
},
"knowledgeBaseId": "<string>",
"thinking": {
"type": "enabled",
"budgetTokens": 50512
},
"temperature": 1,
"maxTokens": 5025,
"emotionRecognitionEnabled": true,
"numFastTurns": 1
},
"voice": {
"provider": "azure",
"voiceId": "andrew",
"cachingEnabled": true,
"chunkPlan": {
"enabled": true,
"minCharacters": 30,
"punctuationBoundaries": [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
],
"formatPlan": {
"enabled": true,
"numberToDigitsCutoff": 2025,
"replacements": [
{
"type": "exact",
"key": "<string>",
"value": "<string>",
"replaceAllEnabled": false
}
],
"formattersEnabled": "markdown"
}
},
"speed": 1.25,
"fallbackPlan": {
"voices": [
{
"provider": "azure",
"voiceId": "andrew",
"cachingEnabled": true,
"speed": 1.25,
"chunkPlan": {
"enabled": true,
"minCharacters": 30,
"punctuationBoundaries": [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
],
"formatPlan": {
"enabled": true,
"numberToDigitsCutoff": 2025,
"replacements": [
{
"type": "exact",
"key": "<string>",
"value": "<string>",
"replaceAllEnabled": false
}
],
"formattersEnabled": "markdown"
}
}
}
]
}
},
"firstMessage": "Hello! How can I help you today?",
"firstMessageInterruptionsEnabled": false,
"firstMessageMode": "assistant-speaks-first",
"voicemailDetection": {
"provider": "google",
"beepMaxAwaitSeconds": 30,
"backoffPlan": {
"startAtSeconds": 5,
"frequencySeconds": 5,
"maxRetries": 6
}
},
"clientMessages": [
"conversation-update",
"function-call",
"hang",
"model-output",
"speech-update",
"status-update",
"transfer-update",
"transcript",
"tool-calls",
"user-interrupted",
"voice-input",
"workflow.node.started"
],
"serverMessages": [
"conversation-update",
"end-of-call-report",
"function-call",
"hang",
"speech-update",
"status-update",
"tool-calls",
"transfer-destination-request",
"user-interrupted"
],
"silenceTimeoutSeconds": 30,
"maxDurationSeconds": 600,
"backgroundSound": "office",
"backgroundDenoisingEnabled": false,
"modelOutputInMessagesEnabled": false,
"transportConfigurations": [
{
"provider": "twilio",
"timeout": 60,
"record": false,
"recordingChannels": "mono"
}
],
"observabilityPlan": {
"provider": "langfuse",
"tags": [
"<string>"
],
"metadata": {}
},
"credentials": [
{
"provider": "anthropic",
"apiKey": "<string>",
"name": "<string>"
}
],
"hooks": [
{
"on": "call.ending",
"do": [
{
"type": "transfer",
"destination": {
"type": "number",
"number": "<string>",
"message": "<string>",
"numberE164CheckEnabled": true,
"extension": "<string>",
"callerId": "<string>",
"transferPlan": {
"mode": "blind-transfer",
"message": "<string>",
"timeout": 60,
"sipVerb": "refer",
"holdAudioUrl": "<string>",
"transferCompleteAudioUrl": "<string>",
"twiml": "<string>",
"summaryPlan": {
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
},
"sipHeadersInReferToEnabled": true,
"fallbackPlan": {
"message": "<string>",
"endCallEnabled": true
}
},
"description": "<string>"
}
}
],
"filters": [
{
"type": "oneOf",
"key": "<string>",
"oneOf": [
"<string>"
]
}
]
}
],
"variableValues": {},
"name": "<string>",
"voicemailMessage": "<string>",
"endCallMessage": "<string>",
"endCallPhrases": [
"<string>"
],
"compliancePlan": {
"hipaaEnabled": {
"hipaaEnabled": false
},
"pciEnabled": {
"pciEnabled": false
}
},
"metadata": {},
"backgroundSpeechDenoisingPlan": {
"smartDenoisingPlan": {
"enabled": false
},
"fourierDenoisingPlan": {
"enabled": false,
"mediaDetectionEnabled": true,
"staticThreshold": -35,
"baselineOffsetDb": -15,
"windowSizeMs": 3000,
"baselinePercentile": 85
}
},
"analysisPlan": {
"minMessagesThreshold": 1,
"summaryPlan": {
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
},
"structuredDataPlan": {
"messages": [
{}
],
"enabled": true,
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"timeoutSeconds": 30.5
},
"structuredDataMultiPlan": [
{
"key": "<string>",
"plan": {
"messages": [
{}
],
"enabled": true,
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"timeoutSeconds": 30.5
}
}
],
"successEvaluationPlan": {
"rubric": "NumericScale",
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
}
},
"artifactPlan": {
"recordingEnabled": true,
"recordingFormat": "wav;l16",
"videoRecordingEnabled": false,
"pcapEnabled": true,
"pcapS3PathPrefix": "/pcaps",
"transcriptPlan": {
"enabled": true,
"assistantName": "<string>",
"userName": "<string>"
},
"recordingPath": "<string>"
},
"messagePlan": {
"idleMessages": [
"<string>"
],
"idleMessageMaxSpokenCount": 5.5,
"idleMessageResetCountOnUserSpeechEnabled": true,
"idleTimeoutSeconds": 32.5,
"silenceTimeoutMessage": "<string>"
},
"startSpeakingPlan": {
"waitSeconds": 0.4,
"smartEndpointingEnabled": false,
"smartEndpointingPlan": {
"provider": "cozmox"
},
"customEndpointingRules": [
{
"type": "assistant",
"regex": "<string>",
"timeoutSeconds": 7.5,
"regexOptions": [
{
"type": "ignore-case",
"enabled": true
}
]
}
],
"transcriptionEndpointingPlan": {
"onPunctuationSeconds": 0.1,
"onNoPunctuationSeconds": 1.5,
"onNumberSeconds": 0.5
}
},
"stopSpeakingPlan": {
"numWords": 0,
"voiceSeconds": 0.2,
"backoffSeconds": 1,
"acknowledgementPhrases": [
"i understand",
"i see",
"i got it",
"i hear you",
"im listening",
"im with you",
"right",
"okay",
"ok",
"sure",
"alright",
"got it",
"understood",
"yeah",
"yes",
"uh-huh",
"mm-hmm",
"gotcha",
"mhmm",
"ah",
"yeah okay",
"yeah sure"
],
"interruptionPhrases": [
"stop",
"shut",
"up",
"enough",
"quiet",
"silence",
"but",
"dont",
"not",
"no",
"hold",
"wait",
"cut",
"pause",
"nope",
"nah",
"nevermind",
"never",
"bad",
"actually"
]
},
"monitorPlan": {
"listenEnabled": false,
"listenAuthenticationEnabled": false,
"controlEnabled": false,
"controlAuthenticationEnabled": false
},
"credentialIds": [
"<string>"
],
"server": {
"timeoutSeconds": 20,
"url": "<string>",
"headers": {},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
}
},
"keypadInputPlan": {
"enabled": true,
"timeoutSeconds": 5,
"delimiters": "#"
}
},
"number": "<string>",
"sipUri": "<string>",
"name": "<string>",
"email": "<string>",
"externalId": "<string>"
}
],
"id": "<string>",
"orgId": "<string>",
"createdAt": "2023-11-07T05:31:56Z",
"updatedAt": "2023-11-07T05:31:56Z",
"calls": {},
"callsCounterScheduled": 123,
"callsCounterQueued": 123,
"callsCounterInProgress": 123,
"callsCounterEndedVoicemail": 123,
"callsCounterEnded": 123,
"endedReason": "campaign.scheduled.ended-by-user",
"assistantId": "<string>",
"workflowId": "<string>",
"schedulePlan": {
"earliestAt": "2023-11-07T05:31:56Z",
"latestAt": "2023-11-07T05:31:56Z"
}
}
],
"metadata": {
"itemsPerPage": 123,
"totalItems": 123,
"currentPage": 123
}
}curl --request GET \
--url https://api.cozmox.ai/campaign \
--header 'Authorization: Bearer <token>'{
"results": [
{
"status": "scheduled",
"name": "Q2 Sales Campaign",
"phoneNumberId": "<string>",
"customers": [
{
"numberE164CheckEnabled": true,
"extension": null,
"assistantOverrides": {
"transcriber": {
"provider": "assembly-ai",
"language": "en",
"confidenceThreshold": 0.4,
"enableUniversalStreamingApi": false,
"formatTurns": false,
"endOfTurnConfidenceThreshold": 0.7,
"minEndOfTurnSilenceWhenConfident": 160,
"wordFinalizationMaxWaitTime": 160,
"maxTurnSilence": 400,
"realtimeUrl": "<string>",
"wordBoost": [
"<string>"
],
"endUtteranceSilenceThreshold": 123,
"disablePartialTranscripts": true,
"fallbackPlan": {
"transcribers": [
{
"provider": "assembly-ai",
"language": "en",
"confidenceThreshold": 0.4,
"enableUniversalStreamingApi": false,
"formatTurns": false,
"endOfTurnConfidenceThreshold": 0.7,
"minEndOfTurnSilenceWhenConfident": 160,
"wordFinalizationMaxWaitTime": 160,
"maxTurnSilence": 400,
"realtimeUrl": "<string>",
"wordBoost": [
"<string>"
],
"endUtteranceSilenceThreshold": 123,
"disablePartialTranscripts": true
}
]
}
},
"model": {
"model": "claude-3-opus-20240229",
"provider": "anthropic",
"messages": [
{
"content": "<string>",
"role": "assistant"
}
],
"tools": [
{
"type": "apiRequest",
"method": "POST",
"url": "<string>",
"messages": [
{
"type": "request-start",
"contents": [
"<unknown>"
],
"blocking": false,
"content": "<string>",
"conditions": [
{
"operator": "eq",
"param": "<string>",
"value": "<string>"
}
]
}
],
"timeoutSeconds": 20,
"name": "<string>",
"description": "<string>",
"body": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"headers": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
},
"variableExtractionPlan": {
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"aliases": [
{
"key": "<string>",
"value": "<string>"
}
]
},
"function": {
"name": "<string>",
"strict": false,
"description": "<string>",
"parameters": {
"type": "object",
"properties": {},
"required": [
"<string>"
]
}
}
}
],
"toolIds": [
"<string>"
],
"knowledgeBase": {
"provider": "custom-knowledge-base",
"server": {
"timeoutSeconds": 20,
"url": "<string>",
"headers": {},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
}
}
},
"knowledgeBaseId": "<string>",
"thinking": {
"type": "enabled",
"budgetTokens": 50512
},
"temperature": 1,
"maxTokens": 5025,
"emotionRecognitionEnabled": true,
"numFastTurns": 1
},
"voice": {
"provider": "azure",
"voiceId": "andrew",
"cachingEnabled": true,
"chunkPlan": {
"enabled": true,
"minCharacters": 30,
"punctuationBoundaries": [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
],
"formatPlan": {
"enabled": true,
"numberToDigitsCutoff": 2025,
"replacements": [
{
"type": "exact",
"key": "<string>",
"value": "<string>",
"replaceAllEnabled": false
}
],
"formattersEnabled": "markdown"
}
},
"speed": 1.25,
"fallbackPlan": {
"voices": [
{
"provider": "azure",
"voiceId": "andrew",
"cachingEnabled": true,
"speed": 1.25,
"chunkPlan": {
"enabled": true,
"minCharacters": 30,
"punctuationBoundaries": [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
],
"formatPlan": {
"enabled": true,
"numberToDigitsCutoff": 2025,
"replacements": [
{
"type": "exact",
"key": "<string>",
"value": "<string>",
"replaceAllEnabled": false
}
],
"formattersEnabled": "markdown"
}
}
}
]
}
},
"firstMessage": "Hello! How can I help you today?",
"firstMessageInterruptionsEnabled": false,
"firstMessageMode": "assistant-speaks-first",
"voicemailDetection": {
"provider": "google",
"beepMaxAwaitSeconds": 30,
"backoffPlan": {
"startAtSeconds": 5,
"frequencySeconds": 5,
"maxRetries": 6
}
},
"clientMessages": [
"conversation-update",
"function-call",
"hang",
"model-output",
"speech-update",
"status-update",
"transfer-update",
"transcript",
"tool-calls",
"user-interrupted",
"voice-input",
"workflow.node.started"
],
"serverMessages": [
"conversation-update",
"end-of-call-report",
"function-call",
"hang",
"speech-update",
"status-update",
"tool-calls",
"transfer-destination-request",
"user-interrupted"
],
"silenceTimeoutSeconds": 30,
"maxDurationSeconds": 600,
"backgroundSound": "office",
"backgroundDenoisingEnabled": false,
"modelOutputInMessagesEnabled": false,
"transportConfigurations": [
{
"provider": "twilio",
"timeout": 60,
"record": false,
"recordingChannels": "mono"
}
],
"observabilityPlan": {
"provider": "langfuse",
"tags": [
"<string>"
],
"metadata": {}
},
"credentials": [
{
"provider": "anthropic",
"apiKey": "<string>",
"name": "<string>"
}
],
"hooks": [
{
"on": "call.ending",
"do": [
{
"type": "transfer",
"destination": {
"type": "number",
"number": "<string>",
"message": "<string>",
"numberE164CheckEnabled": true,
"extension": "<string>",
"callerId": "<string>",
"transferPlan": {
"mode": "blind-transfer",
"message": "<string>",
"timeout": 60,
"sipVerb": "refer",
"holdAudioUrl": "<string>",
"transferCompleteAudioUrl": "<string>",
"twiml": "<string>",
"summaryPlan": {
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
},
"sipHeadersInReferToEnabled": true,
"fallbackPlan": {
"message": "<string>",
"endCallEnabled": true
}
},
"description": "<string>"
}
}
],
"filters": [
{
"type": "oneOf",
"key": "<string>",
"oneOf": [
"<string>"
]
}
]
}
],
"variableValues": {},
"name": "<string>",
"voicemailMessage": "<string>",
"endCallMessage": "<string>",
"endCallPhrases": [
"<string>"
],
"compliancePlan": {
"hipaaEnabled": {
"hipaaEnabled": false
},
"pciEnabled": {
"pciEnabled": false
}
},
"metadata": {},
"backgroundSpeechDenoisingPlan": {
"smartDenoisingPlan": {
"enabled": false
},
"fourierDenoisingPlan": {
"enabled": false,
"mediaDetectionEnabled": true,
"staticThreshold": -35,
"baselineOffsetDb": -15,
"windowSizeMs": 3000,
"baselinePercentile": 85
}
},
"analysisPlan": {
"minMessagesThreshold": 1,
"summaryPlan": {
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
},
"structuredDataPlan": {
"messages": [
{}
],
"enabled": true,
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"timeoutSeconds": 30.5
},
"structuredDataMultiPlan": [
{
"key": "<string>",
"plan": {
"messages": [
{}
],
"enabled": true,
"schema": {
"type": "string",
"items": {},
"properties": {},
"description": "<string>",
"pattern": "<string>",
"format": "date-time",
"required": [
"<string>"
],
"enum": [
"<string>"
],
"title": "<string>"
},
"timeoutSeconds": 30.5
}
}
],
"successEvaluationPlan": {
"rubric": "NumericScale",
"messages": [
{}
],
"enabled": true,
"timeoutSeconds": 30.5
}
},
"artifactPlan": {
"recordingEnabled": true,
"recordingFormat": "wav;l16",
"videoRecordingEnabled": false,
"pcapEnabled": true,
"pcapS3PathPrefix": "/pcaps",
"transcriptPlan": {
"enabled": true,
"assistantName": "<string>",
"userName": "<string>"
},
"recordingPath": "<string>"
},
"messagePlan": {
"idleMessages": [
"<string>"
],
"idleMessageMaxSpokenCount": 5.5,
"idleMessageResetCountOnUserSpeechEnabled": true,
"idleTimeoutSeconds": 32.5,
"silenceTimeoutMessage": "<string>"
},
"startSpeakingPlan": {
"waitSeconds": 0.4,
"smartEndpointingEnabled": false,
"smartEndpointingPlan": {
"provider": "cozmox"
},
"customEndpointingRules": [
{
"type": "assistant",
"regex": "<string>",
"timeoutSeconds": 7.5,
"regexOptions": [
{
"type": "ignore-case",
"enabled": true
}
]
}
],
"transcriptionEndpointingPlan": {
"onPunctuationSeconds": 0.1,
"onNoPunctuationSeconds": 1.5,
"onNumberSeconds": 0.5
}
},
"stopSpeakingPlan": {
"numWords": 0,
"voiceSeconds": 0.2,
"backoffSeconds": 1,
"acknowledgementPhrases": [
"i understand",
"i see",
"i got it",
"i hear you",
"im listening",
"im with you",
"right",
"okay",
"ok",
"sure",
"alright",
"got it",
"understood",
"yeah",
"yes",
"uh-huh",
"mm-hmm",
"gotcha",
"mhmm",
"ah",
"yeah okay",
"yeah sure"
],
"interruptionPhrases": [
"stop",
"shut",
"up",
"enough",
"quiet",
"silence",
"but",
"dont",
"not",
"no",
"hold",
"wait",
"cut",
"pause",
"nope",
"nah",
"nevermind",
"never",
"bad",
"actually"
]
},
"monitorPlan": {
"listenEnabled": false,
"listenAuthenticationEnabled": false,
"controlEnabled": false,
"controlAuthenticationEnabled": false
},
"credentialIds": [
"<string>"
],
"server": {
"timeoutSeconds": 20,
"url": "<string>",
"headers": {},
"backoffPlan": {
"type": "fixed",
"maxRetries": 0,
"baseDelaySeconds": 1
}
},
"keypadInputPlan": {
"enabled": true,
"timeoutSeconds": 5,
"delimiters": "#"
}
},
"number": "<string>",
"sipUri": "<string>",
"name": "<string>",
"email": "<string>",
"externalId": "<string>"
}
],
"id": "<string>",
"orgId": "<string>",
"createdAt": "2023-11-07T05:31:56Z",
"updatedAt": "2023-11-07T05:31:56Z",
"calls": {},
"callsCounterScheduled": 123,
"callsCounterQueued": 123,
"callsCounterInProgress": 123,
"callsCounterEndedVoicemail": 123,
"callsCounterEnded": 123,
"endedReason": "campaign.scheduled.ended-by-user",
"assistantId": "<string>",
"workflowId": "<string>",
"schedulePlan": {
"earliestAt": "2023-11-07T05:31:56Z",
"latestAt": "2023-11-07T05:31:56Z"
}
}
],
"metadata": {
"itemsPerPage": 123,
"totalItems": 123,
"currentPage": 123
}
}scheduled, in-progress, ended This is the page number to return. Defaults to 1.
x >= 1This is the sort order for pagination. Defaults to 'DESC'.
ASC, DESC This is the maximum number of items to return. Defaults to 100.
0 <= x <= 1000This will return items where the createdAt is greater than the specified value.
This will return items where the createdAt is less than the specified value.
This will return items where the createdAt is greater than or equal to the specified value.
This will return items where the createdAt is less than or equal to the specified value.
This will return items where the updatedAt is greater than the specified value.
This will return items where the updatedAt is less than the specified value.
This will return items where the updatedAt is greater than or equal to the specified value.
This will return items where the updatedAt is less than or equal to the specified value.
Show child attributes
This is the status of the campaign.
scheduled, in-progress, ended This is the name of the campaign. This is just for your own reference.
"Q2 Sales Campaign"
This is the phone number ID that will be used for the campaign calls.
These are the customers that will be called in the campaign.
Show child attributes
This is the flag to toggle the E164 check for the number field. This is an advanced property which should be used if you know your use case requires it.
Use cases:
false: To allow non-E164 numbers like +001234567890, 1234, or abc. This is useful for dialing out to non-E164 numbers on your SIP trunks.true (default): To allow only E164 numbers like +14155551234. This is standard for PSTN calls.If false, the number is still required to only contain alphanumeric characters (regex: /^\+?[a-zA-Z0-9]+$/).
@default true (E164 check is enabled)
This is the extension that will be dialed after the call is answered.
10null
These are the overrides for the assistant's settings and template variables specific to this customer. This allows customization of the assistant's behavior for individual customers in batch calls.
Show child attributes
These are the options for the assistant's transcriber.
Show child attributes
This is the transcription provider that will be used.
assembly-ai This is the language that will be set for the transcription.
en Transcripts below this confidence threshold will be discarded.
@default 0.4
0 <= x <= 10.4
Uses Assembly AI's new Universal Streaming API. See: https://www.assemblyai.com/docs/speech-to-text/universal-streaming
@default false
false
This enables formatting of transcripts. Only used when enableUniversalStreamingApi is true.
@default false
false
The confidence threshold to use when determining if the end of a turn has been reached. Only used when enableUniversalStreamingApi is true.
@default 0.7
0 <= x <= 10.7
The minimum amount of silence in milliseconds required to detect end of turn when confident. Only used when enableUniversalStreamingApi is true.
@default 160
x >= 0160
The maximum wait time for word finalization. Only used when enableUniversalStreamingApi is true.
@default 160
x >= 0160
The maximum amount of silence in milliseconds allowed in a turn before end of turn is triggered. Only used when enableUniversalStreamingApi is true.
@default 400
x >= 0400
The WebSocket URL that the transcriber connects to.
Add up to 2500 characters of custom vocabulary.
2500The duration of the end utterance silence threshold in milliseconds.
Disable partial transcripts.
Set to true to not receive partial transcripts. Defaults to false.
This is the plan for voice provider fallbacks in the event that the primary voice provider fails.
Show child attributes
Show child attributes
This is the transcription provider that will be used.
assembly-ai This is the language that will be set for the transcription.
en Transcripts below this confidence threshold will be discarded.
@default 0.4
0 <= x <= 10.4
Uses Assembly AI's new Universal Streaming API. See: https://www.assemblyai.com/docs/speech-to-text/universal-streaming
@default false
false
This enables formatting of transcripts. Only used when enableUniversalStreamingApi is true.
@default false
false
The confidence threshold to use when determining if the end of a turn has been reached. Only used when enableUniversalStreamingApi is true.
@default 0.7
0 <= x <= 10.7
The minimum amount of silence in milliseconds required to detect end of turn when confident. Only used when enableUniversalStreamingApi is true.
@default 160
x >= 0160
The maximum wait time for word finalization. Only used when enableUniversalStreamingApi is true.
@default 160
x >= 0160
The maximum amount of silence in milliseconds allowed in a turn before end of turn is triggered. Only used when enableUniversalStreamingApi is true.
@default 400
x >= 0400
The WebSocket URL that the transcriber connects to.
Add up to 2500 characters of custom vocabulary.
2500The duration of the end utterance silence threshold in milliseconds.
Disable partial transcripts.
Set to true to not receive partial transcripts. Defaults to false.
These are the options for the assistant's LLM.
Show child attributes
The specific Anthropic/Claude model that will be used.
claude-3-opus-20240229, claude-3-sonnet-20240229, claude-3-haiku-20240307, claude-3-5-sonnet-20240620, claude-3-5-sonnet-20241022, claude-3-5-haiku-20241022, claude-3-7-sonnet-20250219, claude-opus-4-20250514, claude-sonnet-4-20250514 The provider identifier for Anthropic.
anthropic This is the starting state for the conversation.
These are the tools that the assistant can use during the call. To use existing tools, use toolIds.
Both tools and toolIds can be used together.
Show child attributes
The type of tool. "apiRequest" for API request tool.
apiRequest POST, GET This is where the request will be sent.
These are the messages that will be spoken to the user as the tool is running.
For some tools, this is auto-filled based on special fields like tool.destinations. For others like the function tool, these can be custom configured.
Show child attributes
This message is triggered when the tool call starts.
This message is never triggered for async tools.
If this message is not provided, one of the default filler messages "Hold on a sec", "One moment", "Just a sec", "Give me a moment" or "This'll just take a sec" will be used.
request-start This is an alternative to the content property. It allows to specify variants of the same content, one per language.
Usage:
This will override the content property.
This is an optional boolean that if true, the tool call will only trigger after the message is spoken. Default is false.
@default false
false
This is the content that the assistant says when this message is triggered.
1000This is an optional array of conditions that the tool call arguments must meet in order for this message to be triggered.
Show child attributes
This is the operator you want to use to compare the parameter and value.
eq, neq, gt, gte, lt, lte This is the name of the parameter that you want to check.
1000This is the value you want to compare against the parameter.
1000This is the timeout in seconds for the request. Defaults to 20 seconds.
@default 20
1 <= x <= 30020
This is the name of the tool. This will be passed to the model.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.
40This is the description of the tool. This will be passed to the model.
1000This is the body of the request.
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
These are the headers to send in the request.
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).
@default undefined (the request will not be retried)
Show child attributes
This is the type of backoff plan to use. Defaults to fixed.
@default fixed
"fixed"
This is the maximum number of retries to attempt if the request fails. Defaults to 0 (no retries).
@default 0
0 <= x <= 100
This is the base delay in seconds. For linear backoff, this is the delay between each retry. For exponential backoff, this is the initial delay.
0 <= x <= 101
This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in call.artifact.variableValues after the call.
Usage:
aliases to extract variables from the tool's response body. (Most common case){
"aliases": [
{
"key": "customerName",
"value": "{{customer.name}}"
},
{
"key": "customerAge",
"value": "{{customer.age}}"
}
]
}The tool response body is made available to the liquid template.
aliases to extract variables from the tool's response body if the response is an array.{
"aliases": [
{
"key": "customerName",
"value": "{{$[0].name}}"
},
{
"key": "customerAge",
"value": "{{$[0].age}}"
}
]
}$ is a shorthand for the tool's response body. $[0] is the first item in the array. $[n] is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).
aliases to extract variables from the tool's response headers.{
"aliases": [
{
"key": "customerName",
"value": "{{tool.response.headers.customer-name}}"
},
{
"key": "customerAge",
"value": "{{tool.response.headers.customer-age}}"
}
]
}tool.response is made available to the liquid template. Particularly, both tool.response.headers and tool.response.body are available. Note, tool.response is available regardless of the response body type (both object and array).
schema to extract a large portion of the tool's response body.4.1. If you hit example.com and it returns {"name": "John", "age": 30}, then you can specify the schema as:
{
"schema": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "number"
}
}
}
}4.2. If you hit example.com and it returns {"name": {"first": "John", "last": "Doe"}}, then you can specify the schema as:
{
"schema": {
"type": "object",
"properties": {
"name": {
"type": "object",
"properties": {
"first": {
"type": "string"
},
"last": {
"type": "string"
}
}
}
}
}
}These will be extracted as {{ name }} and {{ age }} respectively. To emphasize, object properties are extracted as direct global variables.
4.3. If you hit example.com and it returns {"name": {"first": "John", "last": "Doe"}}, then you can specify the schema as:
{
"schema": {
"type": "object",
"properties": {
"name": {
"type": "object",
"properties": {
"first": {
"type": "string"
},
"last": {
"type": "string"
}
}
}
}
}
}These will be extracted as {{ name }}. And, {{ name.first }} and {{ name.last }} will be accessible.
4.4. If you hit example.com and it returns ["94123", "94124"], then you can specify the schema as:
{
"schema": {
"type": "array",
"title": "zipCodes",
"items": {
"type": "string"
}
}
}This will be extracted as {{ zipCodes }}. To access the array items, you can use {{ zipCodes[0] }} and {{ zipCodes[1] }}.
4.5. If you hit example.com and it returns [{"name": "John", "age": 30, "zipCodes": ["94123", "94124"]}, {"name": "Jane", "age": 25, "zipCodes": ["94125", "94126"]}], then you can specify the schema as:
{
"schema": {
"type": "array",
"title": "people",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "number"
},
"zipCodes": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
}This will be extracted as {{ people }}. To access the array items, you can use {{ people[n].name }}, {{ people[n].age }}, {{ people[n].zipCodes }}, {{ people[n].zipCodes[0] }} and {{ people[n].zipCodes[1] }}.
Note: Both aliases and schema can be used together.
Show child attributes
This is the schema to extract.
Examples:
{
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "number"
}
}
}These will be extracted as {{ name }} and {{ age }} respectively. To emphasize, object properties are extracted as direct global variables.
{
"type": "object",
"properties": {
"name": {
"type": "object",
"properties": {
"first": {
"type": "string"
},
"last": {
"type": "string"
}
}
}
}
}These will be extracted as {{ name }}. And, {{ name.first }} and {{ name.last }} will be accessible.
{
"type": "array",
"title": "zipCodes",
"items": {
"type": "string"
}
}This will be extracted as {{ zipCodes }}. To access the array items, you can use {{ zipCodes[0] }} and {{ zipCodes[1] }}.
{
"type": "array",
"name": "people",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "number"
},
"zipCodes": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}This will be extracted as {{ people }}. To access the array items, you can use {{ people[n].name }}, {{ people[n].age }}, {{ people[n].zipCodes }}, {{ people[n].zipCodes[0] }} and {{ people[n].zipCodes[1] }}.
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
These are additional variables to create.
These will be accessible during the call as {{key}} and stored in call.artifact.variableValues after the call.
Example:
{
"aliases": [
{
"key": "customerName",
"value": "{{name}}"
},
{
"key": "fullName",
"value": "{{firstName}} {{lastName}}"
},
{
"key": "greeting",
"value": "Hello {{name}}, welcome to {{company}}!"
},
{
"key": "customerEmail",
"value": "{{addresses[0].city}}"
},
{
"key": "something",
"value": "{{any liquid}}"
}
]
}This will create variables customerName, fullName, customerEmail, greeting, and something. To access these variables, you can reference them as {{customerName}}, {{fullName}}, {{customerEmail}}, {{greeting}}, and {{something}}.
Show child attributes
This is the key of the variable.
This variable will be accessible during the call as {{key}} and stored in call.artifact.variableValues after the call.
Rules:
1 - 40This is the value of the variable.
This can reference existing variables, use filters, and perform transformations.
Examples: "{{name}}", "{{customer.email}}", "Hello {{name | upcase}}"
10000This is the function definition of the tool.
For endCall, transferCall, and dtmf tools, this is auto-filled based on tool-specific fields like tool.destinations. But, even in those cases, you can provide a custom function definition for advanced use cases.
An example of an advanced use case is if you want to customize the message that's spoken for endCall tool. You can specify a function where it returns an argument "reason". Then, in messages array, you can have many "request-complete" messages. One of these messages will be triggered if the messages[].conditions matches the "reason" argument.
Show child attributes
This is the the name of the function to be called.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
64This is a boolean that controls whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the OpenAI guide.
@default false
This is the description of what the function does, used by the AI to choose when and how to call the function.
1000These are the parameters the functions accepts, described as a JSON Schema object.
See the OpenAI guide for examples, and the JSON Schema reference for documentation about the format.
Omitting parameters defines a function with an empty parameter list.
Show child attributes
This must be set to 'object'. It instructs the model to return a JSON object containing the function call properties.
object This provides a description of the properties required by the function. JSON Schema can be used to specify expectations for each property. Refer to this doc for a comprehensive guide on JSON Schema.
Show child attributes
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
This specifies the properties that are required by the function.
These are the tools that the assistant can use during the call. To use transient tools, use tools.
Both tools and toolIds can be used together.
These are the options for the knowledge base.
Show child attributes
This knowledge base is bring your own knowledge base implementation.
custom-knowledge-base This is where the knowledge base request will be sent.
Request Example:
POST https://{server.url} Content-Type: application/json
{ "messsage": { "type": "knowledge-base-request", "messages": [ { "role": "user", "content": "Why is ocean blue?" } ], ...other metadata about the call... } }
Response Expected:
{
"message": {
"role": "assistant",
"content": "The ocean is blue because water absorbs everything but blue.",
}, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK
"documents": [
{
"content": "The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.",
"similarity": 1
},
{
"content": "Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.",
"similarity": .5
}
] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL
}Show child attributes
This is the timeout in seconds for the request. Defaults to 20 seconds.
@default 20
1 <= x <= 30020
This is where the request will be sent.
These are the headers to include in the request.
Each key-value pair represents a header name and its value.
This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).
@default undefined (the request will not be retried)
Show child attributes
This is the type of backoff plan to use. Defaults to fixed.
@default fixed
"fixed"
This is the maximum number of retries to attempt if the request fails. Defaults to 0 (no retries).
@default 0
0 <= x <= 100
This is the base delay in seconds. For linear backoff, this is the delay between each retry. For exponential backoff, this is the initial delay.
0 <= x <= 101
This is the ID of the knowledge base the model will use.
Optional configuration for Anthropic's thinking feature. Only applicable for claude-3-7-sonnet-20250219 model. If provided, maxTokens must be greater than thinking.budgetTokens.
Show child attributes
enabled The maximum number of tokens to allocate for thinking. Must be between 1024 and 100000 tokens.
1024 <= x <= 100000This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
0 <= x <= 2This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
50 <= x <= 10000This determines whether we detect user's emotion while they speak and send it as an additional info to model.
Default false because the model is usually are good at understanding the user's emotion from text.
@default false
This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.
Default is 0.
@default 0
x >= 0These are the options for the assistant's voice.
Show child attributes
This is the voice provider that will be used.
azure This is the provider-specific ID that will be used.
andrew, brian, emma This is the flag to toggle voice caching for the assistant.
true
This is the plan for chunking the model output before it is sent to the voice provider.
Show child attributes
This determines whether the model output is chunked before being sent to the voice provider. Default true.
Usage:
false.true.If disabled, cozmox-provided audio control tokens like
@default true
true
This is the minimum number of characters in a chunk.
Usage:
@default 30
1 <= x <= 8030
These are the punctuations that are considered valid boundaries for a chunk to be created.
Usage:
Default is automatically set to balance the trade-off between quality and latency based on the provider.
。, ,, ., !, ?, ;, ), ،, ۔, ।, ॥, |, ||, ,, : [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
]This is the plan for formatting the chunk before it is sent to the voice provider.
Show child attributes
This determines whether the chunk is formatted before being sent to the voice provider. This helps with enunciation. This includes phone numbers, emails and addresses. Default true.
Usage:
false.If voice.chunkPlan.enabled is false, this is automatically false since there's no chunk to format.
@default true
true
This is the cutoff after which a number is converted to individual digits instead of being spoken as words.
Example:
Usage:
@default 2025
x >= 02025
These are the custom replacements you can make to the chunk before it is sent to the voice provider.
Usage:
ExactReplacement type. Eg. { type: 'exact', key: 'hello', value: 'hi' }RegexReplacement type. Eg. { type: 'regex', regex: '\\b[a-zA-Z]{5}\\b', value: 'hi' }@default []
Show child attributes
This is the exact replacement type. You can use this to replace a specific word or phrase with a different word or phrase.
Usage:
exact This is the key to replace.
This is the value that will replace the match.
1000This option let's you control whether to replace all instances of the key or only the first one. By default, it only replaces the first instance. Examples:
List of formatters to apply. If not provided, all default formatters will be applied. If provided, only the specified formatters will be applied. Note: Some essential formatters like angle bracket removal will always be applied. @default undefined
markdown, asterisk, quote, dash, newline, colon, acronym, dollarAmount, email, date, time, distance, unit, percentage, phoneNumber, number, stripAsterisk This is the speed multiplier that will be used.
0.5 <= x <= 2This is the plan for voice provider fallbacks in the event that the primary voice provider fails.
Show child attributes
This is the list of voices to fallback to in the event that the primary voice provider fails.
Show child attributes
This is the voice provider that will be used.
azure This is the provider-specific ID that will be used.
andrew, brian, emma This is the flag to toggle voice caching for the assistant.
true
This is the speed multiplier that will be used.
0.5 <= x <= 2This is the plan for chunking the model output before it is sent to the voice provider.
Show child attributes
This determines whether the model output is chunked before being sent to the voice provider. Default true.
Usage:
false.true.If disabled, cozmox-provided audio control tokens like
@default true
true
This is the minimum number of characters in a chunk.
Usage:
@default 30
1 <= x <= 8030
These are the punctuations that are considered valid boundaries for a chunk to be created.
Usage:
Default is automatically set to balance the trade-off between quality and latency based on the provider.
。, ,, ., !, ?, ;, ), ،, ۔, ।, ॥, |, ||, ,, : [
"。",
",",
".",
"!",
"?",
";",
"،",
"۔",
"।",
"॥",
"|",
"||",
",",
":"
]This is the plan for formatting the chunk before it is sent to the voice provider.
Show child attributes
This determines whether the chunk is formatted before being sent to the voice provider. This helps with enunciation. This includes phone numbers, emails and addresses. Default true.
Usage:
false.If voice.chunkPlan.enabled is false, this is automatically false since there's no chunk to format.
@default true
true
This is the cutoff after which a number is converted to individual digits instead of being spoken as words.
Example:
Usage:
@default 2025
x >= 02025
These are the custom replacements you can make to the chunk before it is sent to the voice provider.
Usage:
ExactReplacement type. Eg. { type: 'exact', key: 'hello', value: 'hi' }RegexReplacement type. Eg. { type: 'regex', regex: '\\b[a-zA-Z]{5}\\b', value: 'hi' }@default []
Show child attributes
This is the exact replacement type. You can use this to replace a specific word or phrase with a different word or phrase.
Usage:
exact This is the key to replace.
This is the value that will replace the match.
1000This option let's you control whether to replace all instances of the key or only the first one. By default, it only replaces the first instance. Examples:
List of formatters to apply. If not provided, all default formatters will be applied. If provided, only the specified formatters will be applied. Note: Some essential formatters like angle bracket removal will always be applied. @default undefined
markdown, asterisk, quote, dash, newline, colon, acronym, dollarAmount, email, date, time, distance, unit, percentage, phoneNumber, number, stripAsterisk This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).
If unspecified, assistant will wait for user to speak and use the model to respond once they speak.
"Hello! How can I help you today?"
This is the mode for the first message. Default is 'assistant-speaks-first'.
Use:
assistant.model.messages at call start, call.messages at squad transfer points).@default 'assistant-speaks-first'
assistant-speaks-first, assistant-speaks-first-with-model-generated-message, assistant-waits-for-user "assistant-speaks-first"
These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. This uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached. You can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.
Show child attributes
This is the provider to use for voicemail detection.
google This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message
If we detect a voicemail beep before this, we will speak the message at that point.
Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.
@default 30 @min 0 @max 60
0 <= x <= 30This is the backoff plan for the voicemail detection.
Show child attributes
This is the number of seconds to wait before starting the first retry attempt.
x >= 0This is the interval in seconds between retry attempts.
x >= 2.5This is the maximum number of retry attempts before giving up.
1 <= x <= 10These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.
conversation-update, function-call, function-call-result, hang, language-changed, metadata, model-output, speech-update, status-update, transcript, tool-calls, tool-calls-result, tool.completed, transfer-update, user-interrupted, voice-input, workflow.node.started [
"conversation-update",
"function-call",
"hang",
"model-output",
"speech-update",
"status-update",
"transfer-update",
"transcript",
"tool-calls",
"user-interrupted",
"voice-input",
"workflow.node.started"
]These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.
conversation-update, end-of-call-report, function-call, hang, language-changed, language-change-detected, model-output, phone-call-control, speech-update, status-update, transcript, transcript[transcriptType="final"], tool-calls, transfer-destination-request, transfer-update, user-interrupted, voice-input [
"conversation-update",
"end-of-call-report",
"function-call",
"hang",
"speech-update",
"status-update",
"tool-calls",
"transfer-destination-request",
"user-interrupted"
]How many seconds of silence to wait before ending the call. Defaults to 30.
@default 30
10 <= x <= 360030
This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.
@default 600 (10 minutes)
10 <= x <= 43200600
This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'. You can also provide a custom sound by providing a URL to an audio file.
off, office "office"
This enables filtering of noise and background speech while the user is talking.
Default false while in beta.
@default false
false
This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.
Default false while in beta.
@default false
false
These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.
Show child attributes
twilio The integer number of seconds that we should allow the phone to ring before assuming there is no answer.
The default is 60 seconds and the maximum is 600 seconds.
For some call flows, we will add a 5-second buffer to the timeout value you provide.
For this reason, a timeout value of 10 seconds could result in an actual timeout closer to 15 seconds.
You can set this to a short time, such as 15 seconds, to hang up before reaching an answering machine or voicemail.
@default 60
1 <= x <= 60060
Whether to record the call.
Can be true to record the phone call, or false to not.
The default is false.
@default false
false
The number of channels in the final recording.
Can be: mono or dual.
The default is mono.
mono records both legs of the call in a single channel of the recording file.
dual records each leg to a separate channel of the recording file.
The first channel of a dual-channel recording contains the parent call and the second channel contains the child call.
@default 'mono'
mono, dual "mono"
This is the plan for observability of assistant's calls.
Currently, only Langfuse is supported.
Show child attributes
langfuse This is an array of tags to be added to the Langfuse trace. Tags allow you to categorize and filter traces. https://langfuse.com/docs/tracing-features/tags This is an array of tags to be added to the Langfuse trace. Tags allow you to categorize and filter traces. https://langfuse.com/docs/tracing-features/tags
This is a JSON object that will be added to the Langfuse trace. Traces can be enriched with metadata to better understand your users, application, and experiments. https://langfuse.com/docs/tracing-features/metadata By default it includes the call metadata, assistant metadata, and assistant overrides. This is a JSON object that will be added to the Langfuse trace. Traces can be enriched with metadata to better understand your users, application, and experiments. https://langfuse.com/docs/tracing-features/metadata By default it includes the call metadata, assistant metadata, and assistant overrides.
These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.
Show child attributes
anthropic This is not returned in the API.
10000This is the name of credential. This is just for your reference.
1 - 40This is a set of actions that will be performed on certain events.
Show child attributes
This is the event that triggers this hook
call.ending 1000This is the set of actions to perform when the hook triggers
Show child attributes
This is the type of action - must be "transfer"
transfer This is the destination details for the transfer - can be a phone number or SIP URI
Show child attributes
number This is the phone number to transfer the call to.
3 - 40This is spoken to the customer before connecting them to the destination.
Usage:
assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message for the destination assistant.This accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the contents field.
This is the flag to toggle the E164 check for the number field. This is an advanced property which should be used if you know your use case requires it.
Use cases:
false: To allow non-E164 numbers like +001234567890, 1234, or abc. This is useful for dialing out to non-E164 numbers on your SIP trunks.true (default): To allow only E164 numbers like +14155551234. This is standard for PSTN calls.If false, the number is still required to only contain alphanumeric characters (regex: /^\+?[a-zA-Z0-9]+$/).
@default true (E164 check is enabled)
This is the extension to dial after transferring the call to the number.
1 - 10This is the caller ID to use when transferring the call to the number.
Usage:
callerId.For Twilio, you can read up more here: https://www.twilio.com/docs/voice/twiml/dial#callerid
40This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to blind-transfer.
@default transferPlan.mode='blind-transfer'
Show child attributes
This configures how transfer is executed and the experience of the destination party receiving the call.
Usage:
blind-transfer: The assistant forwards the call to the destination without any message or summary.blind-transfer-add-summary-to-sip-header: The assistant forwards the call to the destination and adds a SIP header X-Transfer-Summary to the call to include the summary.warm-transfer-say-message: The assistant dials the destination, delivers the message to the destination party, connects the customer, and leaves the call.warm-transfer-say-summary: The assistant dials the destination, provides a summary of the call to the destination party, connects the customer, and leaves the call.warm-transfer-wait-for-operator-to-speak-first-and-then-say-message: The assistant dials the destination, waits for the operator to speak, delivers the message to the destination party, and then connects the customer.warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary: The assistant dials the destination, waits for the operator to speak, provides a summary of the call to the destination party, and then connects the customer.warm-transfer-twiml: The assistant dials the destination, executes the twiml instructions on the destination call leg, connects the customer, and leaves the call.warm-transfer-experimental: The assistant puts the customer on hold, dials the destination, and if the destination answers (and is human), delivers a message or summary before connecting the customer. If the destination is unreachable or not human (e.g., with voicemail detection), the assistant delivers the fallbackMessage to the customer and optionally ends the call.@default 'blind-transfer'
blind-transfer, blind-transfer-add-summary-to-sip-header, warm-transfer-say-message, warm-transfer-say-summary, warm-transfer-twiml, warm-transfer-wait-for-operator-to-speak-first-and-then-say-message, warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary, warm-transfer-experimental This is the message the assistant will deliver to the destination party before connecting the customer.
Usage:
mode is blind-transfer-add-summary-to-sip-header, warm-transfer-say-message, warm-transfer-wait-for-operator-to-speak-first-and-then-say-message, or warm-transfer-experimental.This is the timeout in seconds for the warm-transfer-wait-for-operator-to-speak-first-and-then-say-message/summary
@default 60
1 <= x <= 600This specifies the SIP verb to use while transferring the call.
This is the URL to an audio file played while the customer is on hold during transfer.
Usage:
mode is warm-transfer-experimental.This is the URL to an audio file played after the warm transfer message or summary is delivered to the destination party. It can be used to play a custom sound like 'beep' to notify that the transfer is complete.
Usage:
mode is warm-transfer-experimental.This is the TwiML instructions to execute on the destination call leg before connecting the customer.
Usage:
mode is warm-transfer-twiml.Play, Say, Gather, Hangup and Pause verbs.Example:
<Say voice="alice" language="en-US">Hello, transferring a customer to you.</Say>
<Pause length="2"/>
<Say>They called about billing questions.</Say>4096This is the plan for generating a summary of the call to present to the destination party.
Usage:
mode is blind-transfer-add-summary-to-sip-header or warm-transfer-say-summary or warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary or warm-transfer-experimental.Show child attributes
These are the messages used to generate the summary.
@default: [ { "role": "system", "content": "You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences. DO NOT return anything except the summary." }, { "role": "user", "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" } ]
You can customize by providing any messages you want.
Here are the template variables available:
call.artifact.transcriptassistant.model.messages[type=system].contentcall.endedReasonThis determines whether a summary is generated and stored in call.analysis.summary. Defaults to true.
Usage:
@default true
This is how long the request is tried before giving up. When request times out, call.analysis.summary will be empty.
Usage:
@default 5 seconds
1 <= x <= 60This flag includes the sipHeaders from above in the refer to sip uri as url encoded query params.
@default false
This configures the fallback plan when the transfer fails (destination unreachable, busy, or not human).
Usage:
mode is warm-transfer-experimental.warm-transfer-experimental, a default message will be used.Show child attributes
This is the message the assistant will deliver to the customer if the transfer fails.
This controls what happens after delivering the failure message to the customer.
@default true
This is the description of the destination, used by the AI to choose when and how to transfer the call.
This is the set of filters that must match for the hook to trigger
Show child attributes
This is the type of filter - currently only "oneOf" is supported
oneOf 1000This is the key to filter on (e.g. "call.endedReason")
1000This is the array of possible values to match against
1000These are values that will be used to replace the template variables in the assistant messages and other text-based fields. This uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html
So for example, {{ name }} will be replaced with the value of name in variableValues.
{{"now" | date: "%b %d, %Y, %I:%M %p", "America/New_York"}} will be replaced with the current date and time in New York.
Some cozmox reserved defaults:
This is the name of the assistant.
This is required when you want to transfer between assistants in a call.
40This is the message that the assistant will say if the call is forwarded to voicemail.
If unspecified, it will hang up.
1000This is the message that the assistant will say if it ends the call.
If unspecified, it will hang up without saying anything.
1000This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.
2 - 140Show child attributes
When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.
{ "hipaaEnabled": false }When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored. At the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false.
{ "pciEnabled": false }This is for metadata you want to store on the assistant.
This enables filtering of noise and background speech while the user is talking.
Features:
Smart denoising can be combined with or used independently of Fourier denoising.
Order of precedence:
Show child attributes
Whether smart denoising using Krisp is enabled.
Show child attributes
Whether smart denoising using Krisp is enabled.
Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.
This can be combined with smart denoising, and will be run afterwards.
Show child attributes
Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.
Whether automatic media detection is enabled. When enabled, the filter will automatically detect consistent background TV/music/radio and switch to more aggressive filtering settings. Only applies when enabled is true.
true
Static threshold in dB used as fallback when no baseline is established.
-80 <= x <= 0-35
How far below the rolling baseline to filter audio, in dB. Lower values (e.g., -10) are more aggressive, higher values (e.g., -20) are more conservative.
-30 <= x <= -5-15
Rolling window size in milliseconds for calculating the audio baseline. Larger windows adapt more slowly but are more stable.
1000 <= x <= 300003000
Percentile to use for baseline calculation (1-99). Higher percentiles (e.g., 85) focus on louder speech, lower percentiles (e.g., 50) include quieter speech.
1 <= x <= 9985
This is the plan for analysis of assistant's calls. Stored in call.analysis.
Show child attributes
The minimum number of messages required to run the analysis plan. If the number of messages is less than this, analysis will be skipped. @default 2
x >= 0This is the plan for generating the summary of the call. This outputs to call.analysis.summary.
Show child attributes
These are the messages used to generate the summary.
@default: [ { "role": "system", "content": "You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences. DO NOT return anything except the summary." }, { "role": "user", "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" } ]
You can customize by providing any messages you want.
Here are the template variables available:
call.artifact.transcriptassistant.model.messages[type=system].contentcall.endedReasonThis determines whether a summary is generated and stored in call.analysis.summary. Defaults to true.
Usage:
@default true
This is how long the request is tried before giving up. When request times out, call.analysis.summary will be empty.
Usage:
@default 5 seconds
1 <= x <= 60This is the plan for generating the structured data from the call. This outputs to call.analysis.structuredData.
Show child attributes
These are the messages used to generate the structured data.
@default: [ { "role": "system", "content": "You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\n\nJson Schema:\\n{{schema}}\n\nOnly respond with the JSON." }, { "role": "user", "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" } ]
You can customize by providing any messages you want.
Here are the template variables available:
call.artifact.transcript- {{systemPrompt}}: the system prompt of the call from assistant.model.messages[type=system].content- {{schema}}: the schema of the structured data from structuredDataPlan.schema- {{endedReason}}: the ended reason of the call from call.endedReasonThis determines whether structured data is generated and stored in call.analysis.structuredData. Defaults to false.
Usage:
schema.@default false
This is the schema of the structured data. The output is stored in call.analysis.structuredData.
Complete guide on JSON Schema can be found here.
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
This is how long the request is tried before giving up. When request times out, call.analysis.structuredData will be empty.
Usage:
@default 5 seconds
1 <= x <= 60This is an array of structured data plan catalogs. Each entry includes a key and a plan for generating the structured data from the call. This outputs to call.analysis.structuredDataMulti.
Show child attributes
This is the key of the structured data plan in the catalog.
This is an individual structured data plan in the catalog.
Show child attributes
These are the messages used to generate the structured data.
@default: [ { "role": "system", "content": "You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\n\nJson Schema:\\n{{schema}}\n\nOnly respond with the JSON." }, { "role": "user", "content": "Here is the transcript:\n\n{{transcript}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" } ]
You can customize by providing any messages you want.
Here are the template variables available:
call.artifact.transcript- {{systemPrompt}}: the system prompt of the call from assistant.model.messages[type=system].content- {{schema}}: the schema of the structured data from structuredDataPlan.schema- {{endedReason}}: the ended reason of the call from call.endedReasonThis determines whether structured data is generated and stored in call.analysis.structuredData. Defaults to false.
Usage:
schema.@default false
This is the schema of the structured data. The output is stored in call.analysis.structuredData.
Complete guide on JSON Schema can be found here.
Show child attributes
This is the type of output you'd like.
string, number, integer, boolean are the primitive types and should be obvious.
array and object are more interesting and quite powerful. They allow you to define nested structures.
For array, you can define the schema of the items in the array using the items property.
For object, you can define the properties of the object using the properties property.
string, number, integer, boolean, array, object This is required if the type is "array". This is the schema of the items in the array.
This is of type JsonSchema. However, Swagger doesn't support circular references.
This is required if the type is "object". This specifies the properties of the object.
This is a map of string to JsonSchema. However, Swagger doesn't support circular references.
This is the description to help the model understand what it needs to output.
This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the format property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties
This is the format of the string. To pass a regex, use the pattern property instead.
OpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions
date-time, time, date, duration, email, hostname, ipv4, ipv6, uuid This is a list of properties that are required.
This only makes sense if the type is "object".
This array specifies the allowed values that can be used to restrict the output of the model.
This is the title of the schema.
This is how long the request is tried before giving up. When request times out, call.analysis.structuredData will be empty.
Usage:
@default 5 seconds
1 <= x <= 60This is the plan for generating the success evaluation of the call. This outputs to call.analysis.successEvaluation.
Show child attributes
This enforces the rubric of the evaluation. The output is stored in call.analysis.successEvaluation.
Options include:
Default is 'PassFail'.
NumericScale, DescriptiveScale, Checklist, Matrix, PercentageScale, LikertScale, AutomaticRubric, PassFail These are the messages used to generate the success evaluation.
@default: [ { "role": "system", "content": "You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\n\nRubric:\\n{{rubric}}\n\nOnly respond with the result." }, { "role": "user", "content": "Here is the transcript:\n\n{{transcript}}\n\n" }, { "role": "user", "content": "Here was the system prompt of the call:\n\n{{systemPrompt}}\n\n. Here is the ended reason of the call:\n\n{{endedReason}}\n\n" } ]
You can customize by providing any messages you want.
Here are the template variables available:
call.artifact.transcript- {{systemPrompt}}: the system prompt of the call from assistant.model.messages[type=system].content- {{rubric}}: the rubric of the success evaluation from successEvaluationPlan.rubric- {{endedReason}}: the ended reason of the call from call.endedReasonThis determines whether a success evaluation is generated and stored in call.analysis.successEvaluation. Defaults to true.
Usage:
@default true
This is how long the request is tried before giving up. When request times out, call.analysis.successEvaluation will be empty.
Usage:
@default 5 seconds
1 <= x <= 60This is the plan for artifacts generated during assistant's calls. Stored in call.artifact.
Show child attributes
This determines whether assistant's calls are recorded. Defaults to true.
Usage:
assistant.hipaaEnabled (deprecated) or assistant.compliancePlan.hipaaEnabled explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard.You can find the recording at call.artifact.recordingUrl and call.artifact.stereoRecordingUrl after the call is ended.
@default true
true
This determines the format of the recording. Defaults to wav;l16.
@default 'wav;l16'
wav;l16, mp3 This determines whether the video is recorded during the call. Defaults to false. Only relevant for webCall type.
You can find the video recording at call.artifact.videoRecordingUrl after the call is ended.
@default false
false
This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for phone type calls where phone number's provider is cozmox or byo-phone-number.
You can find the packet capture at call.artifact.pcapUrl after the call is ended.
@default true
true
This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.
If credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.
Usage:
/my-assistant-captures./.@default '/'
"/pcaps"
This is the plan for call.artifact.transcript. To disable, set transcriptPlan.enabled to false.
Show child attributes
This determines whether the transcript is stored in call.artifact.transcript. Defaults to true.
@default true
true
This is the name of the assistant in the transcript. Defaults to 'AI'.
Usage:
assistantName set to 'Buyer':User: Hello, how are you?
Buyer: I'm fine.
User: Do you want to buy a car?
Buyer: No.@default 'AI'
This is the name of the user in the transcript. Defaults to 'User'.
Usage:
userName set to 'Seller':Seller: Hello, how are you?
AI: I'm fine.
Seller: Do you want to buy a car?
AI: No.@default 'User'
This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.
If credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.
Usage:
/my-assistant-recordings./.@default '/'
This is the plan for static predefined messages that can be spoken by the assistant during the call, like idleMessages.
Note: firstMessage, voicemailMessage, and endCallMessage are currently at the root level. They will be moved to messagePlan in the future, but will remain backwards compatible.
Show child attributes
This are the messages that the assistant will speak when the user hasn't responded for idleTimeoutSeconds. Each time the timeout is triggered, a random message will be chosen from this array.
Usage:
@default null (no idle message is spoken)
1000This determines the maximum number of times idleMessages can be spoken during the call.
@default 3
1 <= x <= 10This determines whether the idle message count is reset whenever the user speaks.
@default false
This is the timeout in seconds before a message from idleMessages is spoken. The clock starts when the assistant finishes speaking and remains active until the user speaks.
@default 10
5 <= x <= 60This is the message that the assistant will say if the call ends due to silence.
If unspecified, it will hang up without saying anything.
1000This is the plan for when the assistant should start talking.
You should configure this if you're running into these issues:
Show child attributes
This is how long assistant waits before speaking. Defaults to 0.4.
This is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.
Example:
Usage:
@default 0.4
0 <= x <= 50.4
false
This is the plan for smart endpointing. Pick between cozmox smart endpointing or LiveKit smart endpointing (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet.
If this is set, it will override and take precedence over transcriptionEndpointingPlan.
This plan will still be overridden by any matching customEndpointingRules.
Show child attributes
This is the provider for the smart endpointing plan.
cozmox, livekit, custom-endpointing-model "cozmox"
These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.
Usage:
These rules have the highest precedence and will override both smartEndpointingPlan and transcriptionEndpointingPlan when a rule is matched.
The rules are evaluated in order and the first one that matches will be used.
Order of precedence for endpointing:
@default []
Show child attributes
This endpointing rule is based on the last assistant message before customer started speaking.
Flow:
regex, the endpointing timeout is set to timeoutSecondsUsage:
assistant This is the regex pattern to match.
Note:
RegExp.test method in Node.JS. Eg. /hello/.test("hello there") will return true.Hot tip:
\ when sending the regex pattern. Eg. "hello\sthere" will be sent over the wire as "hellosthere". Send "hello\\sthere" instead.RegExp.test does substring matching, so /cat/.test("I love cats") will return true. To do full string matching, send "^cat$".This is the endpointing timeout in seconds, if the rule is matched.
0 <= x <= 15These are the options for the regex match. Defaults to all disabled.
@default []
Show child attributes
This is the type of the regex option. Options are:
ignore-case: Ignores the case of the text being matched. Addwhole-word: Matches whole words only.multi-line: Matches across multiple lines.ignore-case, whole-word, multi-line This is whether to enable the option.
@default false
This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.
Once an endpoint is triggered, the request is sent to assistant.model.
Note: This plan is only used if smartEndpointingPlan is not set. If both are provided, smartEndpointingPlan takes precedence.
This plan will also be overridden by any matching customEndpointingRules.
Show child attributes
The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1.
This setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought.
@default 0.1
0 <= x <= 30.1
The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5.
This setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time.
@default 1.5
0 <= x <= 31.5
The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4.
This setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks.
@default 0.5
0 <= x <= 30.5
This is the plan for when assistant should stop talking on customer interruption.
You should configure this if you're running into these issues:
Show child attributes
This is the number of words that the customer has to say before the assistant will stop talking.
Words like "stop", "actually", "no", etc. will always interrupt immediately regardless of this value.
Words like "okay", "yeah", "right" will never interrupt.
When set to 0, voiceSeconds is used in addition to the transcriptions to determine the customer has started speaking.
Defaults to 0.
@default 0
0 <= x <= 100
This is the seconds customer has to speak before the assistant stops talking. This uses the VAD (Voice Activity Detection) spike to determine if the customer has started speaking.
Considerations:
This is only used if numWords is set to 0.
Defaults to 0.2
@default 0.2
0 <= x <= 0.50.2
This is the seconds to wait before the assistant will start talking again after being interrupted.
Defaults to 1.
@default 1
0 <= x <= 101
These are the phrases that will never interrupt the assistant, even if numWords threshold is met. These are typically acknowledgement or backchanneling phrases.
240[
"i understand",
"i see",
"i got it",
"i hear you",
"im listening",
"im with you",
"right",
"okay",
"ok",
"sure",
"alright",
"got it",
"understood",
"yeah",
"yes",
"uh-huh",
"mm-hmm",
"gotcha",
"mhmm",
"ah",
"yeah okay",
"yeah sure"
]These are the phrases that will always interrupt the assistant immediately, regardless of numWords. These are typically phrases indicating disagreement or desire to stop.
240[
"stop",
"shut",
"up",
"enough",
"quiet",
"silence",
"but",
"dont",
"not",
"no",
"hold",
"wait",
"cut",
"pause",
"nope",
"nah",
"nevermind",
"never",
"bad",
"actually"
]This is the plan for real-time monitoring of the assistant's calls.
Usage:
monitorPlan.listenEnabled to true.monitorPlan.controlEnabled to true.Show child attributes
This determines whether the assistant's calls allow live listening. Defaults to true.
Fetch call.monitor.listenUrl to get the live listening URL.
@default true
false
This enables authentication on the call.monitor.listenUrl.
If listenAuthenticationEnabled is true, the call.monitor.listenUrl will require an Authorization: Bearer <cozmox-public-api-key> header.
@default false
false
This determines whether the assistant's calls allow live control. Defaults to true.
Fetch call.monitor.controlUrl to get the live control URL.
To use, send any control message via a POST request to call.monitor.controlUrl. Here are the types of controls supported: https://docs.cozmox.ai/api-reference/messages/client-inbound-message
@default true
false
This enables authentication on the call.monitor.controlUrl.
If controlAuthenticationEnabled is true, the call.monitor.controlUrl will require an Authorization: Bearer <cozmox-public-api-key> header.
@default false
false
These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.
This is where cozmox will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.
The order of precedence is:
Show child attributes
This is the timeout in seconds for the request. Defaults to 20 seconds.
@default 20
1 <= x <= 30020
This is where the request will be sent.
These are the headers to include in the request.
Each key-value pair represents a header name and its value.
This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).
@default undefined (the request will not be retried)
Show child attributes
This is the type of backoff plan to use. Defaults to fixed.
@default fixed
"fixed"
This is the maximum number of retries to attempt if the request fails. Defaults to 0 (no retries).
@default 0
0 <= x <= 100
This is the base delay in seconds. For linear backoff, this is the delay between each retry. For exponential backoff, this is the initial delay.
0 <= x <= 101
Show child attributes
This keeps track of whether the user has enabled keypad input. By default, it is off.
@default false
This is the time in seconds to wait before processing the input. If the input is not received within this time, the input will be ignored. If set to "off", the input will be processed when the user enters a delimiter or immediately if no delimiter is used.
@default 2
0 <= x <= 10This is the delimiter(s) that will be used to process the input. Can be '#', '*', or an empty array.
#, *, This is the number of the customer.
3 - 40This is the SIP URI of the customer.
This is the name of the customer. This is just for your own reference.
For SIP inbound calls, this is extracted from the From SIP header with format "Display Name" <sip:username@domain>.
40This is the email of the customer.
40This is the external ID of the customer.
40This is the unique identifier for the campaign.
This is the unique identifier for the org that this campaign belongs to.
This is the ISO 8601 date-time string of when the campaign was created.
This is the ISO 8601 date-time string of when the campaign was last updated.
This is a map of call IDs to campaign call details.
This is the number of calls that have been scheduled.
This is the number of calls that have been queued.
This is the number of calls that have been in progress.
This is the number of calls whose ended reason is 'voicemail'.
This is the number of calls that have ended.
This is the explanation for how the campaign ended.
campaign.scheduled.ended-by-user, campaign.in-progress.ended-by-user, campaign.ended.success This is the assistant ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both.
This is the workflow ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both.
This is the schedule plan for the campaign.