{ "asyncapi": "3.0.0", "info": { "title": "DeepL Voice API - WebSocket Streaming", "version": "1.0.0", "description": "WebSocket streaming API for real-time voice transcription and translation. After obtaining a streaming URL and token via the REST API, establish a WebSocket connection to stream audio data and receive real-time transcriptions and translations.\nThe API supports two message encoding formats: JSON (default) and MessagePack. The encoding format is configured via the message_format parameter when requesting a session. JSON format uses TEXT WebSocket frames. Fields with binary data (such as audio chunks) are base64-encoded strings, while MessagePack format uses BINARY WebSocket frames where fields with binary data contain raw binary data. Compared to JSON, MessagePack typically reduces bandwidth usage by 25-30% and improves message encoding/decoding speed by 2x-4x.\nWhen using MessagePack encoding, messages must be encoded as maps with string keys, not as arrays. The message structure must match the JSON schema exactly, with all field names preserved as string keys. Array-based or integer-keyed encoding is not supported.\n", "contact": { "name": "DeepL - Contact us", "url": "https://www.deepl.com/contact-us" } }, "servers": { "production": { "host": "api.deepl.com", "pathname": "/v3/voice/realtime/connect", "protocol": "wss", "description": "DeepL Voice API WebSocket endpoint.", "variables": { "token": { "description": "This is the ephemeral authentication token obtained from the REST API", "examples": [ "VGhpcyBpcyBhIGZha2UgdG9rZW4K" ] } } } }, "channels": { "voiceStream": { "address": "/v3/voice/realtime/connect?token={token}", "description": "WebSocket channel for streaming audio and receiving transcriptions and translations. Messages are exchanged in JSON or MessagePack format. WebSocket messages are exchanged in TEXT frames when using JSON format and in BINARY frames when using MessagePack format. Sending the wrong frame type will result in connection errors.\n", "parameters": { "token": { "description": "This is the ephemeral authentication token obtained from the [Request Session](/api-reference/voice/request-session) endpoint. The token is valid for one-time use only and must be passed as a query parameter when establishing the WebSocket connection.\n", "examples": [ "VGhpcyBpcyBhIGZha2UgdG9rZW4K" ] } }, "messages": { "SourceMediaChunk": { "$ref": "#/components/messages/SourceMediaChunk" }, "EndOfSourceMedia": { "$ref": "#/components/messages/EndOfSourceMedia" }, "SourceTranscriptUpdate": { "$ref": "#/components/messages/SourceTranscriptUpdate" }, "TargetTranscriptUpdate": { "$ref": "#/components/messages/TargetTranscriptUpdate" }, "TargetMediaChunk": { "$ref": "#/components/messages/TargetMediaChunk" }, "EndOfSourceTranscript": { "$ref": "#/components/messages/EndOfSourceTranscript" }, "EndOfTargetTranscript": { "$ref": "#/components/messages/EndOfTargetTranscript" }, "EndOfTargetMedia": { "$ref": "#/components/messages/EndOfTargetMedia" }, "EndOfStream": { "$ref": "#/components/messages/EndOfStream" }, "Error": { "$ref": "#/components/messages/Error" } } } }, "operations": { "sendAudioData": { "action": "send", "channel": { "$ref": "#/channels/voiceStream" }, "summary": "Send audio data to the server", "description": "Send audio chunks and control messages to the server.\nAudio data must be base64-encoded and match the configured format.", "messages": [ { "$ref": "#/channels/voiceStream/messages/SourceTranscriptUpdate" }, { "$ref": "#/channels/voiceStream/messages/TargetTranscriptUpdate" }, { "$ref": "#/channels/voiceStream/messages/TargetMediaChunk" }, { "$ref": "#/channels/voiceStream/messages/EndOfSourceTranscript" }, { "$ref": "#/channels/voiceStream/messages/EndOfTargetTranscript" }, { "$ref": "#/channels/voiceStream/messages/EndOfTargetMedia" }, { "$ref": "#/channels/voiceStream/messages/EndOfStream" }, { "$ref": "#/channels/voiceStream/messages/Error" } ] }, "receiveTranscriptions": { "action": "receive", "channel": { "$ref": "#/channels/voiceStream" }, "summary": "Receive transcriptions and translations", "description": "Receive real-time transcription and translation messages from the server.\nThe server sends transcript updates, translated audio chunks, end of transcript notifications and errors.", "messages": [ { "$ref": "#/channels/voiceStream/messages/SourceMediaChunk" }, { "$ref": "#/channels/voiceStream/messages/EndOfSourceMedia" } ] } }, "components": { "messages": { "SourceMediaChunk": { "name": "SourceMediaChunk", "title": "Source Media Chunk", "description": " The message contains a chunk of audio data. The audio encoding must be the same that was specified in the [Request Session](/api-reference/voice/request-session) request. \n\n When using JSON format, the audio data is base64-encoded. When using MessagePack format, the audio data is raw binary data. \n\n The chunk size must not be more than 100 kilobyte or one second in duration. The recommended duration is 50 - 250 milliseconds to achieve the best tradeoff between latency and quality. The interval between chunks must not be less than half of the duration of the preceding chunk and not exceed 30 seconds. Otherwise you will run into rate limits or the session will terminate due to timing out and the connection will be closed. \n\n For PCM data the chunk size must be a multiple of the frame size aka encoding unit.", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/SourceMediaChunkPayload" }, "examples": [ { "name": "SourceMediaChunk", "summary": "Source media chunk", "payload": { "source_media_chunk": { "data": "VGhpcyBpcyBhIGZha2UgYXVkaW8gY2h1bmsK" } } } ] }, "EndOfSourceMedia": { "name": "EndOfSourceMedia", "title": "End of Source Media", "description": "The message indicates the end of source media data. It causes the finalization of tentative transcript segments and triggers the emission of final transcript updates, end of transcript messages and the end of stream message. No more data chunks can be sent afterwards. It marks the end of the stream input.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/EndOfSourceMediaPayload" }, "examples": [ { "name": "EndStream", "summary": "End of audio stream", "payload": { "end_of_source_media": {} } } ] }, "SourceTranscriptUpdate": { "name": "SourceTranscriptUpdate", "title": "Source Transcript Update", "description": " The message contains an update to the transcription of the supplied media in the *source* language. \n\n Each message is an incremental addition to the already received updates of the *source* transcript with concluded and tentative text segments. Concluded segments are fixed and will only appear once, while tentative segments may be updated in subsequent messages as more audio is processed. \n\n Clients should merge the concluded segments into a final transcript and update the tentative segments as new updates arrive.", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/SourceTranscriptUpdatePayload" }, "examples": [ { "name": "TranscriptUpdate", "summary": "Source language transcript update", "payload": { "source_transcript_update": { "concluded": [ { "language": "en", "text": "Hello, how are you", "start_time": 0, "end_time": 1500 } ], "tentative": [ { "language": "en", "text": " today?", "start_time": 1500, "end_time": 2000 } ] } } } ] }, "TargetTranscriptUpdate": { "name": "TargetTranscriptUpdate", "title": "Target Transcript Update", "description": " The message contains an update to the transcription of the supplied media in the *target* language. \n\n Each message is an incremental addition to the already received updates of the *target* transcript with concluded and tentative text segments. Concluded segments are fixed and will only appear once, while tentative segments may be updated in subsequent messages as more audio is processed. \n\n Clients should merge the concluded segments into a final transcript and update the tentative segments as new updates arrive.", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/TargetTranscriptUpdatePayload" }, "examples": [ { "name": "TranslationUpdate", "summary": "Target language translation update", "payload": { "target_transcript_update": { "language": "es", "concluded": [ { "text": "Hola, ¿cómo estás", "start_time": 0, "end_time": 1500 } ], "tentative": [ { "text": " hoy?", "start_time": 1500, "end_time": 2000 } ] } } } ] }, "TargetMediaChunk": { "name": "TargetMediaChunk", "title": "Target Media Chunk", "description": " (closed beta) The message contains translated audio data in the target language. \n\n The audio data is provided as an array of base64-encoded indivisible chunks (e.g., codec packets or container pages/clusters). The first message of this type includes the content type and optional headers field. The `headers` field (when present) indicates how many packets at the start of the `data` array contain initialization/header data required by the decoder. For containerized formats, all packets can be passed directly to the demuxer. For raw codec formats with headers, the header packets must be used to initialize the decoder before processing subsequent audio packets. When `headers` is `null` or absent, all packets in the `data` array are audio data. \n\n The audio stream contains only synthesized speech segments, without silence or padding. \n\n Clients should decode and play back the audio chunks in the order received and sequence given in `data`. For subtitle synchronization, use the `text` field to identify subtitle segments and accumulate `duration` values to calculate total playback time for each subtitle.", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/TargetMediaChunkPayload" }, "examples": [ { "name": "TargetMediaChunkFirst", "summary": "First target media chunk with headers", "payload": { "target_media_chunk": { "language": "de", "content_type": "audio/webm;codecs=opus;", "headers": 1, "data": [ "GkXfo59ChoEBQveBAULygQRC84EIQoKEd0VFSUgBU0WIQo17hEgBc0WjgQBBxYWIAvLhEKBjYEfA", "H0O2dBUMRQyBElkIBE9nZ1MAAgAAAAAAAAAAtJhTXAAAAAAAoyC5AQAAAAA=", "H0O2dBUMRQyBElkIBE9nZ1MAAAAAAAAAAAAAtJhTXAIAAAAAamZ0BwE=" ], "duration": 2400, "text": "Hallo, wie geht es dir heute?" } } }, { "name": "TargetMediaChunkSubsequent", "summary": "Subsequent target media chunk", "payload": { "target_media_chunk": { "language": "de", "data": [ "H0O2dBUMRQyBElkIBE9nZ1MAAQAAAAAAAAAAtJhTXAMAAAAAcGh1CAE=", "H0O2dBUMRQyBElkIBE9nZ1MAAgAAAAAAAAAAtJhTXAQAAAAAdGltZQkB" ], "duration": 1800 } } }, { "name": "TargetMediaChunkRawOpus", "summary": "Raw Opus with RFC 7845 header", "payload": { "target_media_chunk": { "language": "de", "content_type": "audio/opus", "headers": 1, "data": [ "T3B1c0hlYWQBAgA8AABAC0AAAAAA", "z/nldGFnAAAAAAAAAAAAAAAAAAAA", "KP9J1XN0YXJ0IG9mIGF1ZGlvIGRhdGE=" ], "duration": 3200, "text": "Guten Tag" } } }, { "name": "TargetMediaChunkRawPCM", "summary": "Raw PCM without headers", "payload": { "target_media_chunk": { "language": "de", "content_type": "audio/pcm;encoding=s16le;rate=24000", "data": [ "AAEAAgADAAQABQAGAAcACAAJAAoACwAMAA0ADgAPABAAEQASABMAFAAVABYAFw==" ], "duration": 1500, "text": "Willkommen" } } } ] }, "EndOfSourceTranscript": { "name": "EndOfSourceTranscript", "title": "End of Source Transcript", "description": "The message indicates that the *source* transcript is complete and no further updates will be sent. It gets emitted after client sends End of Source Media.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/EndOfSourceTranscriptPayload" }, "examples": [ { "name": "EndSource", "summary": "Source transcript complete", "payload": { "end_of_source_transcript": {} } } ] }, "EndOfTargetTranscript": { "name": "EndOfTargetTranscript", "title": "End of Target Transcript", "description": "This message indicates that the *target* transcript is complete and no further updates will be sent. It gets emitted after client sends End of Source Media.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/EndOfTargetTranscriptPayload" }, "examples": [ { "name": "EndTarget", "summary": "Target transcript complete", "payload": { "end_of_target_transcript": { "language": "fr" } } } ] }, "EndOfTargetMedia": { "name": "EndOfTargetMedia", "title": "End of Target Media", "description": "(closed beta) This message indicates that the *target* media stream is complete and no further audio chunks will be sent for this target language. It gets emitted after client sends End of Source Media and all target audio has been sent.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/EndOfTargetMediaPayload" }, "examples": [ { "name": "EndTargetMedia", "summary": "Target media complete", "payload": { "end_of_target_media": { "language": "es" } } } ] }, "EndOfStream": { "name": "EndOfStream", "title": "End of Stream", "description": "This message indicates that all outputs are complete and the stream ended. It is the very last message the client will receive after it sends End of Source Media. You can safely close the connection after you received this message.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/EndOfStreamPayload" }, "examples": [ { "name": "EndStream", "summary": "Stream ended", "payload": { "end_of_stream": {} } } ] }, "Error": { "name": "Error", "title": "Error", "description": "This message reports errors encountered during audio processing or streaming. It includes an error code, reason code, and a human-readable message. After an error, the session is terminated and reconnection is not possible. You need to request a new session.\n", "contentType": "application/json", "payload": { "$ref": "#/components/schemas/ErrorPayload" }, "examples": [ { "name": "AudioFormatError", "summary": "Audio format error", "payload": { "error": { "request_type": "source_media_chunk", "error_code": 400, "reason_code": 4000403, "error_message": "Audio format not supported" } } } ] } }, "schemas": { "SourceMediaChunkPayload": { "type": "object", "required": [ "source_media_chunk" ], "properties": { "source_media_chunk": { "type": "object", "required": [ "data" ], "properties": { "data": { "type": "string", "format": "binary", "description": "Audio data in the audio format specified during session initialization. Encoded as base64 string when using JSON. Raw binary data when using MessagePack.\n" } } } } }, "EndOfSourceMediaPayload": { "type": "object", "required": [ "end_of_source_media" ], "properties": { "end_of_source_media": { "type": "object", "description": "Empty object signaling end of media stream" } } }, "TranscriptSegment": { "type": "object", "required": [ "text", "start_time", "end_time" ], "properties": { "text": { "type": "string", "description": "Source or target transcript text" }, "start_time": { "type": "integer", "description": "Estimated start time of the segment in the input stream in milliseconds", "examples": [ 1250 ] }, "end_time": { "type": "integer", "description": "Estimated end time of the segment in the input stream in milliseconds", "examples": [ 1570 ] } } }, "SourceTranscriptSegment": { "allOf": [ { "$ref": "#/components/schemas/TranscriptSegment" }, { "type": "object", "required": [ "language" ], "properties": { "language": { "type": "string", "description": "IETF BCP 47 language tag of the detected source language", "examples": [ "en" ] } } } ] }, "SourceTranscriptUpdatePayload": { "type": "object", "required": [ "source_transcript_update" ], "properties": { "source_transcript_update": { "type": "object", "required": [ "concluded", "tentative" ], "properties": { "concluded": { "type": "array", "description": "Array of fixed transcript segments that will not change anymore. Array objects contain `language` property of type `string` (IETF BCP 47 language tag of the detected source language), `text` property of type `string` (Source or target transcript text), `start_time` property of type `integer` (Estimated start time of the segment in the input stream in milliseconds) and `end_time` property of type `integer` (Estimated end time of the segment in the input stream in milliseconds).\n", "items": { "$ref": "#/components/schemas/SourceTranscriptSegment" } }, "tentative": { "type": "array", "description": "Array of preliminary transcript segments that are subject to change. Array objects contain `language` property of type `string` (IETF BCP 47 language tag of the detected source language), `text` property of type `string` (Source or target transcript text), `start_time` property of type `integer` (Estimated start time of the segment in the input stream in milliseconds) and `end_time` property of type `integer` (Estimated end time of the segment in the input stream in milliseconds).\n", "items": { "$ref": "#/components/schemas/SourceTranscriptSegment" } } } } } }, "TargetTranscriptUpdatePayload": { "type": "object", "required": [ "target_transcript_update" ], "properties": { "target_transcript_update": { "type": "object", "required": [ "language", "concluded", "tentative" ], "properties": { "language": { "type": "string", "description": "IETF BCP 47 language tag of the target language", "examples": [ "es" ] }, "concluded": { "type": "array", "description": "Array of fixed transcript segments that will not change anymore. Array objects contain `text` property of type `string` (Source or target transcript text), `start_time` property of type `integer` (Estimated start time of the segment in the input stream in milliseconds) and `end_time` property of type `integer` (Estimated end time of the segment in the input stream in milliseconds).\n", "items": { "$ref": "#/components/schemas/TranscriptSegment" } }, "tentative": { "type": "array", "description": "Array of preliminary transcript segments that are subject to change. Array objects contain `text` property of type `string` (Source or target transcript text), `start_time` property of type `integer` (Estimated start time of the segment in the input stream in milliseconds) and `end_time` property of type `integer` (Estimated end time of the segment in the input stream in milliseconds).\n", "items": { "$ref": "#/components/schemas/TranscriptSegment" } } } } } }, "TargetMediaChunkPayload": { "type": "object", "required": [ "target_media_chunk" ], "properties": { "target_media_chunk": { "type": "object", "required": [ "language", "data", "duration" ], "properties": { "language": { "type": "string", "description": "IETF BCP 47 language tag of the target media language", "examples": [ "de" ] }, "content_type": { "type": "string", "description": "(Optional) MIME type of the audio stream. Only present in the first message of this type.", "examples": [ "audio/webm;codecs=opus;" ] }, "headers": { "type": "integer", "description": "(Optional) Number of packets at the start of the data array that contain initialization/header data. Only present in the first message of this type. When present, the first N elements in the data array (where N equals the headers value) contain header/initialization data required by the decoder, and subsequent elements contain audio packets. For containerized formats, all packets can be passed directly to the demuxer. For raw codec formats, header packets must be used to initialize the decoder before processing audio packets. When null or absent, all packets are audio data.", "examples": [ 1 ] }, "data": { "type": "array", "description": "Array of indivisible chunks of audio data (e.g., codec packets or container pages/clusters). Each element is encoded as base64 string when using JSON message format, or raw binary data when using MessagePack message format. When the headers field is present, the first N elements contain header data, and subsequent elements contain audio packets.", "items": { "type": "string", "format": "byte" } }, "duration": { "type": "integer", "description": "The total playback duration of all audio data in this chunk, measured in milliseconds. Accumulate duration values across chunks belonging to the same text segment to determine the total playback time for that subtitle. Also useful for synchronization, buffering calculations, and determining the timing of subsequent chunks.", "examples": [ 2400 ] }, "text": { "type": "string", "description": "(Optional) The target transcript segment from which this audio was synthesized. Present only in the first audio chunk belonging to a new transcript segment. Subsequent audio chunks for the same transcript segment will have this field set to null. Multiple audio chunks can belong to the same text segment. The cumulative content of this field across all chunks matches the cumulative target transcript received via target transcript updates. This allows clients to associate audio chunks with their corresponding transcript segments and display synchronized captions or subtitles during playback." } } } } }, "EndOfSourceTranscriptPayload": { "type": "object", "required": [ "end_of_source_transcript" ], "properties": { "end_of_source_transcript": { "type": "object", "description": "Empty object indicating source transcript is complete" } } }, "EndOfTargetTranscriptPayload": { "type": "object", "required": [ "end_of_target_transcript" ], "properties": { "end_of_target_transcript": { "type": "object", "required": [ "language" ], "properties": { "language": { "type": "string", "description": "IETF BCP 47 language tag indicating which target transcript has ended", "examples": [ "fr" ] } } } } }, "EndOfTargetMediaPayload": { "type": "object", "required": [ "end_of_target_media" ], "properties": { "end_of_target_media": { "type": "object", "required": [ "language" ], "properties": { "language": { "type": "string", "description": "IETF BCP 47 language tag indicating which target media stream has ended", "examples": [ "es" ] } } } } }, "EndOfStreamPayload": { "type": "object", "required": [ "end_of_stream" ], "properties": { "end_of_stream": { "type": "object", "description": "Empty object indicating all outputs are complete" } } }, "ErrorPayload": { "type": "object", "required": [ "error" ], "properties": { "error": { "type": "object", "required": [ "request_type", "error_code", "reason_code", "error_message" ], "properties": { "request_type": { "type": "string", "description": "The type of request that caused the error", "examples": [ "source_media_chunk" ] }, "error_code": { "type": "integer", "description": "HTTP-style error code", "examples": [ 400 ] }, "reason_code": { "type": "integer", "description": "Detailed reason code for debugging", "examples": [ 4000403 ] }, "error_message": { "type": "string", "description": "Human-readable error description", "examples": [ "Audio format not supported" ] } } } } } } } }