1

I'm looking to pan a mono signal using MTAudioProcessingTap and a Multichannel Mixer audio unit, but am getting a mono output instead of a panned, stereo output. The documentation states:

"The Multichannel Mixer unit (subtype kAudioUnitSubType_MultiChannelMixer) takes any number of mono or stereo streams and combines them into a single stereo output."

So, the mono output was unexpected. Any way around this? I ran a stereo signal through the exact same code and everything worked great: stereo output, panned as expected. Here's the code from my tap's prepare callback:

static void tap_PrepareCallback(MTAudioProcessingTapRef tap,
                                CMItemCount maxFrames,
                                const AudioStreamBasicDescription *processingFormat) {

    AVAudioTapProcessorContext *context = (AVAudioTapProcessorContext *)MTAudioProcessingTapGetStorage(tap);

    // Store sample rate for -setCenterFrequency:.
    context->sampleRate = processingFormat->mSampleRate;

    /* Verify processing format (this is not needed for Audio Unit, but for RMS calculation). */
    context->supportedTapProcessingFormat = true;

    if (processingFormat->mFormatID != kAudioFormatLinearPCM) {
        NSLog(@"Unsupported audio format ID for audioProcessingTap. LinearPCM only.");
        context->supportedTapProcessingFormat = false;
    }

    if (!(processingFormat->mFormatFlags & kAudioFormatFlagIsFloat)) {
        NSLog(@"Unsupported audio format flag for audioProcessingTap. Float only.");
        context->supportedTapProcessingFormat = false;
    }

    if (processingFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
        context->isNonInterleaved = true;
    }


    AudioUnit audioUnit;

    AudioComponentDescription audioComponentDescription;
    audioComponentDescription.componentType = kAudioUnitType_Mixer;
    audioComponentDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
    audioComponentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    audioComponentDescription.componentFlags = 0;
    audioComponentDescription.componentFlagsMask = 0;

    AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioComponentDescription);
    if (audioComponent) {
        if (noErr == AudioComponentInstanceNew(audioComponent, &audioUnit)) {
            OSStatus status = noErr;

            // Set audio unit input/output stream format to processing format.
            if (noErr == status) {
                status = AudioUnitSetProperty(audioUnit,
                                              kAudioUnitProperty_StreamFormat,
                                              kAudioUnitScope_Input,
                                              0,
                                              processingFormat,
                                              sizeof(AudioStreamBasicDescription));
            }

            if (noErr == status) {
                status = AudioUnitSetProperty(audioUnit,
                                              kAudioUnitProperty_StreamFormat,
                                              kAudioUnitScope_Output,
                                              0,
                                              processingFormat,
                                              sizeof(AudioStreamBasicDescription));
            }

            // Set audio unit render callback.
            if (noErr == status) {
                AURenderCallbackStruct renderCallbackStruct;
                renderCallbackStruct.inputProc = AU_RenderCallback;
                renderCallbackStruct.inputProcRefCon = (void *)tap;
                status = AudioUnitSetProperty(audioUnit,
                                              kAudioUnitProperty_SetRenderCallback,
                                              kAudioUnitScope_Input,
                                              0,
                                              &renderCallbackStruct,
                                              sizeof(AURenderCallbackStruct));
            }

            // Set audio unit maximum frames per slice to max frames.
            if (noErr == status) {
                UInt32 maximumFramesPerSlice = (UInt32)maxFrames;
                status = AudioUnitSetProperty(audioUnit,
                                              kAudioUnitProperty_MaximumFramesPerSlice,
                                              kAudioUnitScope_Global,
                                              0,
                                              &maximumFramesPerSlice,
                                              (UInt32)sizeof(UInt32));
            }

            // Initialize audio unit.
            if (noErr == status) {
                status = AudioUnitInitialize(audioUnit);
            }

            if (noErr != status) {
                AudioComponentInstanceDispose(audioUnit);
                audioUnit = NULL;
            }
            context->audioUnit = audioUnit;
        }
    }
    NSLog(@"Tap channels: %d",processingFormat->mChannelsPerFrame); // = 1 for mono source file
}

I've tried a few different options for the output stream format, e.g., AVAudioFormat *outFormat = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:processingFormat->mSampleRate channels:2];, but get this error each time: "Client did not see 20 I/O cycles; giving up." Here's the code that creates the exact same ASBD as the input format except for 2 channels instead of one, and this gives the same "20 I/O cycles" error too:

AudioStreamBasicDescription asbd;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = 0x29;
asbd.mSampleRate = 44100;
asbd.mBitsPerChannel = 32;
asbd.mChannelsPerFrame = 2;
asbd.mBytesPerFrame = 4;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerPacket = 4;
asbd.mReserved = 0;
Rogare
  • 3,234
  • 3
  • 27
  • 50

0 Answers0