5

I am developing Voip application with Opus for iOS (Objective-C and C++).
It works fine with 8000, 12000, 24000 and 48000 sampling rate except with 16000, where the application crashes on opus_encode method.


Here is what i am doing:

m_oAudioSession = [AVAudioSession sharedInstance];
[m_oAudioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:&m_oError];
[m_oAudioSession setMode:AVAudioSessionModeVoiceChat error:&m_oError];

[m_oAudioSession setPreferredSampleRate:VOIP_AUDIO_DRIVER_DEFAULT_SAMPLE_RATE error:&m_oError];
[m_oAudioSession setPreferredInputNumberOfChannels:VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS error:&m_oError];
[m_oAudioSession setPreferredOutputNumberOfChannels:VOIP_AUDIO_DRIVER_DEFAULT_OUTPUT_CHANNELS error:&m_oError];
[m_oAudioSession setPreferredIOBufferDuration:VOIP_AUDIO_DRIVER_DEFAULT_BUFFER_DURATION error:&m_oError];

[m_oAudioSession setActive:YES error:&m_oError];


Constants:

VOIP_AUDIO_DRIVER_DEFAULT_SAMPLE_RATE is 16000
VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS is 1
VOIP_AUDIO_DRIVER_DEFAULT_OUTPUT_CHANNELS is 1
VOIP_AUDIO_DRIVER_DEFAULT_BUFFER_DURATION is 0.02
VOIP_AUDIO_DRIVER_FRAMES_PER_PACKET is 1


After that i am using a real sampling rate and buffer duration from m_oAudioSession.sampleRate and m_oAudioSession.IOBufferDuration. They are set into m_fSampleRate and m_fBufferDuration variables.

The configurations are:

//Describes audio component:
m_sAudioDescription.componentType         = kAudioUnitType_Output;
m_sAudioDescription.componentSubType      = kAudioUnitSubType_VoiceProcessingIO/*kAudioUnitSubType_RemoteIO*/;
m_sAudioDescription.componentFlags        = 0;
m_sAudioDescription.componentFlagsMask    = 0;
m_sAudioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;

m_sAudioFormat.mSampleRate          = m_fSampleRate;
m_sAudioFormat.mFormatID            = kAudioFormatLinearPCM;
m_sAudioFormat.mFormatFlags         = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
m_sAudioFormat.mFramesPerPacket     = VOIP_AUDIO_DRIVER_FRAMES_PER_PACKET;
m_sAudioFormat.mChannelsPerFrame    = VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS;
m_sAudioFormat.mBitsPerChannel      = (UInt32)(8 * m_iBytesPerSample);
m_sAudioFormat.mBytesPerFrame       = (UInt32)((m_sAudioFormat.mBitsPerChannel / 8) * m_sAudioFormat.mChannelsPerFrame); 
m_sAudioFormat.mBytesPerPacket      = m_sAudioFormat.mBytesPerFrame * m_sAudioFormat.mFramesPerPacket; 
m_sAudioFormat.mReserved            = 0;


The calculations i make are:

m_iBytesPerSample = sizeof(/*AudioSampleType*/SInt16);

//Calculating buffer size:
int samplesPerFrame = (int)(m_fBufferDuration * m_fSampleRate) + 1;
m_iBufferSizeBytes = samplesPerFrame * m_iBytesPerSample;

//Allocating input buffer:
UInt32 inputBufferListSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_sAudioFormat.mChannelsPerFrame);
m_sInputBuffer = (AudioBufferList *)VoipAlloc(inputBufferListSize);
m_sInputBuffer->mNumberBuffers = m_sAudioFormat.mChannelsPerFrame;

//Pre-mallocating buffers for AudioBufferLists
for(VoipUInt32 tmp_int1 = 0; tmp_int1 < m_sInputBuffer->mNumberBuffers; tmp_int1++)
{
    m_sInputBuffer->mBuffers[tmp_int1].mNumberChannels = VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS;
    m_sInputBuffer->mBuffers[tmp_int1].mDataByteSize = (UInt32)m_iBufferSizeBytes;
    m_sInputBuffer->mBuffers[tmp_int1].mData = VoipAlloc(m_iBufferSizeBytes);
    memset(m_sInputBuffer->mBuffers[tmp_int1].mData, 0, m_iBufferSizeBytes);
}


The reading and writing from audio unit are done using m_sInputBuffer.


Here is the Opus creation:

m_oEncoder = opus_encoder_create(m_iSampleRate, m_iNumberOfChannels, VOIP_AUDIO_CODECS_OPUS_APPLICATION_TYPE, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create an encoder: %s\n", opus_strerror(_error));

    return;
}

_error = opus_encoder_ctl(m_oEncoder, OPUS_SET_BITRATE(VOIP_AUDIO_CODECS_OPUS_BITRATE));
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to set the bitrate: %s\n", opus_strerror(_error));

    return;
}

m_oDecoder = opus_decoder_create(m_iSampleRate, m_iNumberOfChannels, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create a decoder: %s\n", opus_strerror(_error));

    return;
}


Opus configurations are:

VOIP_AUDIO_CODECS_OPUS_BITRATE is OPUS_BITRATE_MAX 
//64000 //70400 //84800 //112000
VOIP_AUDIO_CODECS_OPUS_APPLICATION_TYPE is OPUS_APPLICATION_VOIP
//OPUS_APPLICATION_AUDIO
VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE is 5760 
//Minimum: (120ms; 5760 for 48kHz)
VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE is 960 
//120, 240, 480, 960, 1920, 2880


When i encode and decode i use these methods:

Encode_Opus(VoipInt16* rawSamples, int rawSamplesSize)
{
    unsigned char encodedData[m_iMaxPacketSize];
    VoipInt32 bytesEncoded;
    int frameSize = rawSamplesSize / m_iBytesPerSample;

    bytesEncoded = opus_encode(m_oEncoder, rawSamples, frameSize, encodedData, m_iMaxPacketSize);
    if (bytesEncoded < 0)
    {
        fprintf(stderr, "VoipAudioCodecs error: encode failed: %s\n", opus_strerror(bytesEncoded));

        return nullptr;
    }

    sVoipAudioCodecOpusEncoded* resultStruct = (sVoipAudioCodecOpusEncoded* )VoipAlloc(sizeof(sVoipAudioCodecOpusEncoded));

    resultStruct->m_data = (unsigned char*)VoipAlloc(bytesEncoded);
    memcpy(resultStruct->m_data, encodedData, bytesEncoded);
    resultStruct->m_dataSize = bytesEncoded;

    return resultStruct;
}

Decode_Opus(void* encodedSamples, VoipInt32 encodedSamplesSize)
{
    VoipInt16 decodedPacket[VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE];

    int _frameSize = opus_decode(m_oDecoder, (const unsigned char*)encodedSamples, encodedSamplesSize, decodedPacket, VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE, 0);
    if (_frameSize < 0)
    {
        fprintf(stderr, "VoipAudioCodecs error: decoder failed: %s\n", opus_strerror(_frameSize));

        return nullptr;
    }

    size_t frameSize = (size_t)_frameSize;

    sVoipAudioCodecOpusDecoded* resultStruct = (sVoipAudioCodecOpusDecoded* )VoipAlloc(sizeof(sVoipAudioCodecOpusDecoded));

    resultStruct->m_data = (VoipInt16*)VoipAlloc(frameSize * m_iBytesPerSample);
    memcpy(resultStruct->m_data, decodedPacket, (frameSize * m_iBytesPerSample));
    resultStruct->m_dataSize = frameSize * m_iBytesPerSample;

    return resultStruct;
}


When the app should send data:

VoipUInt32 itemsForProcess = inputAudioQueue->getItemCount();
for (int tmp_queueItems = 0; tmp_queueItems < itemsForProcess; tmp_queueItems++)
{
    sVoipQueue* tmp_samples = inputAudioQueue->popItem();

    m_oCircularTempInputBuffer->writeDataToBuffer(tmp_samples->m_pData, tmp_samples->m_iDataSize);
    while (void* tmp_buffer = m_oCircularTempInputBuffer->readDataFromBuffer(VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE))
    {
        sVoipAudioCodecOpusEncoded* encodedSamples = Encode_Opus((VoipInt16*)tmp_buffer, VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE);

        //Then packeting and the real sending using tcp socket…
    }
    //Rest of the code…
}


Here is the reading:

sVoipAudioCodecOpusDecoded* decodedSamples = Decode_Opus(inputPacket->m_pPacketData, (VoipInt32)inputPacket->m_iPacketSize);
if (decodedSamples != nullptr)
{
    m_oCircularTempOutputBuffer->writeDataToBuffer(decodedSamples->m_data, decodedSamples->m_dataSize);

    VoipFree((void**)&decodedSamples->m_data);
    VoipFree((void**)&decodedSamples);
}

while (void* tmp_buffer = m_oCircularTempOutputBuffer->readDataFromBuffer(m_iBufferSizeBytes))
{
    outputAudioQueue->pushItem(tmp_buffer, m_iBufferSizeBytes);
}

inputAudioQueue is queue with recorded data from my audio unit’s callback.
outputAudioQueue is a queue used from my audio unit’s callback to play the sound.
m_iMaxPacketSize is the same as m_iBufferSizeBytes.


My questions are:
I was wondering, are my calculations correct?
And if not, how can i improve them?
Do you see any mistake into the code?
Do you have a suggestion for fixing the crash bug on opus_encode method when the sampling rate is set to 16000?

Thank you in advance.

PS. I made some tests with sampling rate on 16000 and found this:
If i use this formula: frame_duration = frame_size / sample rate, and if i set frame_duration to preferedIOBufferDuration:
120 / 16000 = 0.0075 //AVAudioSession sets 0.008000 —— Crashes
240 / 16000 = 0.015 //AVAudioSession sets 0.016000 —— Crashes
480 / 16000 = 0.03 //AVAudioSession sets 0.032000 —— Crashes
960 / 16000 = 0.06 //AVAudioSession sets 0.064000 —— Crashes
1920 / 16000 = 0.12 //AVAudioSession sets 0.128000 —— Works
2880 / 16000 = 0.18 //AVAudioSession sets 0.128000 —— Crashes
Then i found that there is no encoder crash with sampling rate 16000 and preferredIOBufferDuration 0.12(1920), where AVAudioSession sets 0.128000. So it works only in this case.

Any ideas ?

0 Answers0