Raw Audio Data

Update time: 2025/06/11 16:45:39

The audio module of the NERTC SDK is able to strictly control the collection and playback logic of the audio device, and supports custom pre-processing and post-processing of the collected raw audio and video data to achieve the desired playback effect. It is suitable for non-standard device access, custom audio effects, voice processing, voice recognition and other scenarios.

  • Pre-processing: It refers to obtaining the raw audio data for modification before sending it to the encoder, and it is used mainly for the audio data collected by the local microphone or customized external audio stream.
  • Post-processing: It refers to obtaining the raw audio data for modification after sending it to the decoder, and it is used mainly for the audio data received from remote users.

The NERTC SDK implements the functions of collecting and modifying raw audio data by providing the NERtcAudioFrameObserver class.

Considerations

Method

Before processing any raw audio data, please make sure that you have implemented the basic real-time audio and video functions in your project.

You can take the following steps to implement the raw audio data processing functions in your project:

  1. Set the audio sample rate of the callback.
  2. Call the setAudioFrameObserver method to register the voice observer, and implement a NERtcEngineAudioFrameObserver class in this method.
  3. The SDK returns the callback.
  4. After obtaining the audio data, you can process it according to the needs of specific scenarios.
  5. After the audio data is processed, you can play it directly, or send it to the SDK through the onRecordFrame and onPlaybackFrame callbacks according to the needs of specific scenarios.

Sample code

///Set the audio callback parameters
NERtcAudioFrameRequestFormat formatMix = new NERtcAudioFrameRequestFormat();
    formatMix.setChannels(channel);
    formatMix.setSampleRate(sampleRate);
    formatMix.setOpMode(readOnly.isChecked() ? NERtcAudioFrameOpMode.kNERtcAudioFrameOpModeReadOnly : NERtcAudioFrameOpMode.kNERtcAudioFrameOpModeReadWrite);
    Log.i(TAG, "AudioCallback ,channel: "+formatMix.getChannels()+ " Mixed Sample:" + formatMix.getSampleRate() + " ReadWrite:" + formatMix.getOpMode());
NERtcEx.getInstance().setMixedAudioFrameParameters(formatMix);
NERtcEx.getInstance().setPlaybackAudioFrameParameters(formatMix);
NERtcEx.getInstance().setRecordingAudioFrameParameters(formatMix);
NERtcEx.getInstance().setAudioFrameObserver(observer);

///Audio data callback processing
observer = new NERtcAudioFrameObserver() {
            @Override
            public void onRecordFrame(NERtcAudioFrame audioFrame) {
                try {
                    if(!isAudioCallbackDump){
                        return;
                    }
                    if (pcmCallbackRecordDump == null) {
                        pcmCallbackRecordDump = createPCMDump("Record_" +audioFrame.getFormat().getChannels()
                                +"_"+ audioFrame.getFormat().getSampleRate());

                        if(pcmCallbackMixDump == null) {
                            Log.e(TAG, "create dump file failed!");
                            return;
                        }
                    }

                    byte[] remaining = new byte[audioFrame.getData().remaining()];
                    audioFrame.getData().get(remaining);
                    pcmCallbackRecordDump.write(remaining);

                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

            @Override
            public void onPlaybackFrame(NERtcAudioFrame audioFrame) {
                if(!isAudioCallbackDump){
                    return;
                }
                try {

                    if (pcmCallbackPlaybackDump == null) {
                        pcmCallbackPlaybackDump = createPCMDump("PlayBack_" +audioFrame.getFormat().getChannels()
                                +"_"+ audioFrame.getFormat().getSampleRate());
                        if(pcmCallbackMixDump == null) {
                            Log.e(TAG, "create dump file failed!");
                            return;
                        }
                    }

                    byte[] remaining = new byte[audioFrame.getData().remaining()];
                    audioFrame.getData().get(remaining);
                    pcmCallbackPlaybackDump.write(remaining);

                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

            @Override
            public void onPlaybackAudioFrameBeforeMixingWithUserID(long userID, NERtcAudioFrame audioFrame) {
                if(!isAudioCallbackDump){
                    return;
                }
                try {
                    if(mRemoteUserMap.get(userID) != null){

                        if(mRemoteUserMap.get(userID).audioPCMDump == null){

                            mRemoteUserMap.get(userID).audioPCMDump = createPCMDump("PlayBackUid_"+ userID +"_"+ audioFrame.getFormat().getChannels()
                                    +"_"+ audioFrame.getFormat().getSampleRate());
                            if(mRemoteUserMap.get(userID).audioPCMDump == null){
                                Log.e(TAG, "create dump file failed!");
                                return;
                            }
                        }

                        byte[] remaining = new byte[audioFrame.getData().remaining()];
                        audioFrame.getData().get(remaining);
                        mRemoteUserMap.get(userID).audioPCMDump.write(remaining);

                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

            @Override
            public void onMixedAudioFrame(NERtcAudioFrame audioFrame) {
                if(!isAudioCallbackDump){
                    return;
                }
                try {
                    if (pcmCallbackMixDump == null) {
                        pcmCallbackMixDump = createPCMDump("Mix_" +audioFrame.getFormat().getChannels()
                                +"_"+ audioFrame.getFormat().getSampleRate());
                        if(pcmCallbackMixDump == null) {
                            Log.e(TAG, "create dump file failed!");
                            return;
                        }
                    }

                    byte[] remaining = new byte[audioFrame.getData().remaining()];
                    audioFrame.getData().get(remaining);
                    pcmCallbackMixDump.write(remaining);

                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        };
Was this page helpful?
Yes
No
  • Considerations
  • Method
  • Sample code