> For a complete documentation index, fetch https://docs.voximplant.ai/llms.txt

# Example: Answering an incoming call

> This example answers an inbound Voximplant call and bridges audio to OpenAI Realtime for speech‑to‑speech conversations.

<blockquote>
  For the complete documentation index, see <a href="/llms.txt">llms.txt</a>.
</blockquote>

This example answers an inbound Voximplant call and bridges audio to OpenAI Realtime for speech‑to‑speech conversations.

**Jump to the [Full VoxEngine scenario](#full-voxengine-scenario).**

## Prerequisites

* Set up an inbound entrypoint for the caller:
  * Phone number: [https://voximplant.com/docs/getting-started/basic-concepts/phone-numbers](https://voximplant.com/docs/getting-started/basic-concepts/phone-numbers)
  * WhatsApp: [https://voximplant.com/docs/guides/integrations/whatsapp](https://voximplant.com/docs/guides/integrations/whatsapp)
  * SIP user / SIP registration: [https://voximplant.com/docs/guides/calls/sip](https://voximplant.com/docs/guides/calls/sip)
  * App user: [https://voximplant.com/docs/getting-started/basic-concepts/users](https://voximplant.com/docs/getting-started/basic-concepts/users) (see also [https://voximplant.com/docs/guides/calls/scenarios#how-to-call-a-voximplant-user](https://voximplant.com/docs/guides/calls/scenarios#how-to-call-a-voximplant-user))
* Create a routing rule that points the destination (phone number / WhatsApp / SIP username / app user alias) to this scenario: [https://voximplant.com/docs/getting-started/basic-concepts/routing-rules](https://voximplant.com/docs/getting-started/basic-concepts/routing-rules)
* Store your OpenAI API key in Voximplant [Secrets](/platform/voxengine/secrets) under `OPENAI_API_KEY`.

## Session setup

The example configures the OpenAI Realtime session with a short system prompt, a voice, and server VAD:

```js title="Session setup"
voiceAIClient.sessionUpdate({
  session: {
    type: "realtime",
    instructions: SYSTEM_PROMPT,
    voice: "alloy",
    output_modalities: ["audio"],
    turn_detection: { type: "server_vad", interrupt_response: true },
  },
});
```

## Connect call audio

Once the session is ready, bridge audio both ways between the call and OpenAI:

```js title="Connect call audio"
VoxEngine.sendMediaBetween(call, voiceAIClient);
```

## Barge-in

```js title="Barge-in"
voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.InputAudioBufferSpeechStarted, () => {
  voiceAIClient.clearMediaBuffer();
});
```

## Notes

[See the VoxEngine API Reference for more details](https://voximplant.com/docs/references/voxengine/openai).

## Full VoxEngine scenario

```javascript title={"voxeengine-openai-answer-incoming-call.js"} maxLines={0}
/**
 * Voximplant + OpenAI Realtime API connector demo
 * Scenario: answer an incoming call and bridge it to OpenAI Realtime.
 */

require(Modules.OpenAI);
const SYSTEM_PROMPT = `
You are Voxi, a helpful voice assistant for phone callers.
Keep responses short and telephony-friendly (usually 1-2 sentences).
`;

const SESSION_CONFIG = {
    session: {
        type: "realtime",
        instructions: SYSTEM_PROMPT,
        voice: "alloy",
        output_modalities: ["audio"],
        turn_detection: {type: "server_vad", interrupt_response: true},
    },
};

VoxEngine.addEventListener(AppEvents.CallAlerting, async ({call}) => {
    let voiceAIClient;

    call.addEventListener(CallEvents.Disconnected, () => VoxEngine.terminate());
    call.addEventListener(CallEvents.Failed, () => VoxEngine.terminate());

    try {
        call.answer();
        // call.record({hd_audio: true, stereo: true}); // Optional: record the call

        voiceAIClient = await OpenAI.createRealtimeAPIClient({
            apiKey: VoxEngine.getSecretValue('OPENAI_API_KEY'),
            model: "gpt-realtime-1.5",
            onWebSocketClose: (event) => {
                Logger.write("===OpenAI.WebSocket.Close===");
                if (event) Logger.write(JSON.stringify(event));
                VoxEngine.terminate();
            },
        });

        //---------------------- Event handlers -----------------------
        voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.SessionCreated, () => {
            voiceAIClient.sessionUpdate(SESSION_CONFIG);
        });

        voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.SessionUpdated, () => {
            VoxEngine.sendMediaBetween(call, voiceAIClient);
            voiceAIClient.responseCreate({instructions: "Hello! How can I help today?"});
        });

        // Barge-in: clear buffered audio when the caller starts speaking
        voiceAIClient.addEventListener(OpenAI.RealtimeAPIEvents.InputAudioBufferSpeechStarted, () => {
            Logger.write("===BARGE-IN: OpenAI.InputAudioBufferSpeechStarted===");
            voiceAIClient.clearMediaBuffer();
        });

        voiceAIClient.addEventListener(
            OpenAI.RealtimeAPIEvents.ConversationItemInputAudioTranscriptionCompleted,
            (event) => {
                const payload = event?.data?.payload || event?.data || {};
                const transcript = payload.transcript || payload.text || payload.delta;
                if (transcript) Logger.write(`===USER=== ${transcript}`);
            }
        );

        voiceAIClient.addEventListener(
            OpenAI.RealtimeAPIEvents.ResponseOutputAudioTranscriptDelta,
            (event) => {
                const payload = event?.data?.payload || event?.data || {};
                if (payload.delta) Logger.write(`===AGENT_DELTA=== ${payload.delta}`);
            }
        );

        voiceAIClient.addEventListener(
            OpenAI.RealtimeAPIEvents.ResponseOutputAudioTranscriptDone,
            (event) => {
                const payload = event?.data?.payload || event?.data || {};
                const transcript = payload.transcript || payload.text;
                if (transcript) Logger.write(`===AGENT=== ${transcript}`);
            }
        );

        // Consolidated "log-only" handlers - key OpenAI/VoxEngine debugging events
        [
            OpenAI.RealtimeAPIEvents.ResponseCreated,
            OpenAI.RealtimeAPIEvents.ResponseDone,
            OpenAI.RealtimeAPIEvents.ResponseOutputAudioDone,
            OpenAI.RealtimeAPIEvents.ConnectorInformation,
            OpenAI.RealtimeAPIEvents.HTTPResponse,
            OpenAI.RealtimeAPIEvents.WebSocketError,
            OpenAI.RealtimeAPIEvents.Unknown,
            OpenAI.Events.WebSocketMediaStarted,
            OpenAI.Events.WebSocketMediaEnded,
        ].forEach((eventName) => {
            voiceAIClient.addEventListener(eventName, (event) => {
                Logger.write(`===${event.name}===`);
                if (event?.data) Logger.write(JSON.stringify(event.data));
            });
        });
    } catch (error) {
        Logger.write("===UNHANDLED_ERROR===");
        Logger.write(error);
        voiceAIClient?.close();
        VoxEngine.terminate();
    }
});

```