| 1 | /** |
| 2 | * Voximplant + OpenAI Realtime API + Cartesia TTS demo |
| 3 | * Scenario: OpenAI handles STT/LLM, Cartesia handles TTS (half-cascade). |
| 4 | */ |
| 5 | |
| 6 | require(Modules.OpenAI); |
| 7 | require(Modules.Cartesia); |
| 8 | require(Modules.ApplicationStorage); |
| 9 | |
| 10 | const SYSTEM_PROMPT = ` |
| 11 | You are Voxi, a helpful phone assistant. |
| 12 | Keep responses short and telephony-friendly. |
| 13 | Reply in English. |
| 14 | `; |
| 15 | |
| 16 | const CARTESIA_MODEL_ID = "sonic-2"; |
| 17 | const CARTESIA_VOICE_ID = "a0e99841-438c-4a64-b679-ae501e7d6091"; |
| 18 | |
| 19 | const SESSION_CONFIG = { |
| 20 | session: { |
| 21 | type: "realtime", |
| 22 | instructions: SYSTEM_PROMPT, |
| 23 | output_modalities: ["text"], |
| 24 | turn_detection: {type: "server_vad", interrupt_response: true}, |
| 25 | }, |
| 26 | }; |
| 27 | |
| 28 | VoxEngine.addEventListener(AppEvents.CallAlerting, async ({call}) => { |
| 29 | let voiceAIClient; |
| 30 | let ttsPlayer; |
| 31 | |
| 32 | call.addEventListener(CallEvents.Disconnected, () => VoxEngine.terminate()); |
| 33 | call.addEventListener(CallEvents.Failed, () => VoxEngine.terminate()); |
| 34 | |
| 35 | try { |
| 36 | call.answer(); |
| 37 | // call.record({hd_audio: true, stereo: true}); // Optional: record the call |
| 38 | |
| 39 | const openAiKey = (await ApplicationStorage.get("OPENAI_API_KEY")).value; |
| 40 | |
| 41 | voiceAIClient = await OpenAI.createRealtimeAPIClient({ |
| 42 | apiKey: openAiKey, |
| 43 | model: "gpt-realtime-1.5", |
| 44 | onWebSocketClose: (event) => { |
| 45 | Logger.write("===OpenAI.WebSocket.Close==="); |
| 46 | if (event) Logger.write(JSON.stringify(event)); |
| 47 | VoxEngine.terminate(); |
| 48 | }, |
| 49 | }); |
| 50 | |
| 51 | voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.SessionCreated, () => { |
| 52 | voiceAIClient.sessionUpdate(SESSION_CONFIG); |
| 53 | }); |
| 54 | |
| 55 | voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.SessionUpdated, () => { |
| 56 | call.sendMediaTo(voiceAIClient); |
| 57 | voiceAIClient.responseCreate({instructions: "Hello! How can I help today?"}); |
| 58 | }); |
| 59 | |
| 60 | voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.ResponseOutputTextDone, (event) => { |
| 61 | const payload = event?.data?.payload || event?.data || {}; |
| 62 | const text = payload.text || payload.delta; |
| 63 | if (!text) return; |
| 64 | Logger.write(`===AGENT_TEXT=== ${text}`); |
| 65 | |
| 66 | // Cartesia TTS requires input on initialization, so we lazily create the player here as needed |
| 67 | if (!ttsPlayer) { |
| 68 | const contextId = `openai-cartesia-${Date.now()}`; |
| 69 | const cartesiaOptions = { |
| 70 | // apikey: (await ApplicationStorage.get("CARTESIA_API_KEY")).value, // optional |
| 71 | generationRequestParameters: { |
| 72 | model_id: CARTESIA_MODEL_ID, |
| 73 | transcript: text, |
| 74 | language: "en", |
| 75 | voice: {mode: "id", id: CARTESIA_VOICE_ID}, |
| 76 | context_id: contextId, |
| 77 | continue: false, |
| 78 | }, |
| 79 | }; |
| 80 | |
| 81 | ttsPlayer = Cartesia.createRealtimeTTSPlayer(text, cartesiaOptions); |
| 82 | ttsPlayer.sendMediaTo(call); |
| 83 | return; |
| 84 | } |
| 85 | |
| 86 | const contextId = `openai-cartesia-${Date.now()}`; |
| 87 | ttsPlayer.generationRequest({ |
| 88 | model_id: CARTESIA_MODEL_ID, |
| 89 | transcript: text, |
| 90 | language: "en", |
| 91 | voice: {mode: "id", id: CARTESIA_VOICE_ID}, |
| 92 | context_id: contextId, |
| 93 | continue: false, |
| 94 | }); |
| 95 | }); |
| 96 | |
| 97 | // Barge-in: clear both OpenAI and Cartesia buffers |
| 98 | voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.InputAudioBufferSpeechStarted, () => { |
| 99 | Logger.write("===BARGE-IN: OpenAI.InputAudioBufferSpeechStarted==="); |
| 100 | voiceAIClient.clearMediaBuffer(); |
| 101 | ttsPlayer?.clearBuffer(); |
| 102 | }); |
| 103 | |
| 104 | // ---------------------- Log all other events for debugging ----------------------- |
| 105 | [ |
| 106 | OpenAI.RealtimeAPIEvents.ResponseCreated, |
| 107 | OpenAI.RealtimeAPIEvents.ResponseDone, |
| 108 | OpenAI.RealtimeAPIEvents.ResponseOutputTextDelta, |
| 109 | OpenAI.RealtimeAPIEvents.ConnectorInformation, |
| 110 | OpenAI.RealtimeAPIEvents.HTTPResponse, |
| 111 | OpenAI.RealtimeAPIEvents.WebSocketError, |
| 112 | OpenAI.RealtimeAPIEvents.Unknown, |
| 113 | OpenAI.Events.WebSocketMediaStarted, |
| 114 | OpenAI.Events.WebSocketMediaEnded, |
| 115 | ].forEach((eventName) => { |
| 116 | voiceAIClient.addEventListener(eventName, (event) => { |
| 117 | Logger.write(`===${event.name}===`); |
| 118 | if (event?.data) Logger.write(JSON.stringify(event.data)); |
| 119 | }); |
| 120 | }); |
| 121 | } catch (error) { |
| 122 | Logger.write("===UNHANDLED_ERROR==="); |
| 123 | Logger.write(error); |
| 124 | voiceAIClient?.close(); |
| 125 | VoxEngine.terminate(); |
| 126 | } |
| 127 | }); |