0

I can't seem to find what I'm looking for in the documentation. This code works great, but I want stereo output.

- (void)createToneUnit
{
    // Configure the search parameters to find the default playback output unit
    // (called the kAudioUnitSubType_RemoteIO on iOS but
    // kAudioUnitSubType_DefaultOutput on Mac OS X)
    AudioComponentDescription defaultOutputDescription;
    defaultOutputDescription.componentType = kAudioUnitType_Output;
    defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
    defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    defaultOutputDescription.componentFlags = 0;
    defaultOutputDescription.componentFlagsMask = 0;

    // Get the default playback output unit
    AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
    NSAssert(defaultOutput, @"Can't find default output");

    // Create a new unit based on this that we'll use for output
    OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
    NSAssert1(_toneUnit, @"Error creating unit: %d", err);

    // Set our tone rendering function on the unit
    AURenderCallbackStruct input;
    input.inputProc = RenderTone;
    input.inputProcRefCon = (__bridge void*)self;
    err = AudioUnitSetProperty(_toneUnit,
                               kAudioUnitProperty_SetRenderCallback,
                               kAudioUnitScope_Input,
                               0,
                               &input,
                               sizeof(input));
    NSAssert1(err == noErr, @"Error setting callback: %d", err);

    // Set the format to 32 bit, single channel, floating point, linear PCM
    const int four_bytes_per_float = 4;
    const int eight_bits_per_byte = 8;
    AudioStreamBasicDescription streamFormat;
    streamFormat.mSampleRate = kSampleRate;
    streamFormat.mFormatID = kAudioFormatLinearPCM;
    streamFormat.mFormatFlags =
    kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
    streamFormat.mBytesPerPacket = four_bytes_per_float;
    streamFormat.mFramesPerPacket = 1;
    streamFormat.mBytesPerFrame = four_bytes_per_float;
    streamFormat.mChannelsPerFrame = 1;
    streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
    err = AudioUnitSetProperty (_toneUnit,
                                kAudioUnitProperty_StreamFormat,
                                kAudioUnitScope_Input,
                                0,
                                &streamFormat,
                                sizeof(AudioStreamBasicDescription));
    NSAssert1(err == noErr, @"Error setting stream format: %dd", err);
}

And here is the callback:

OSStatus RenderTone( void* inRefCon,
                       AudioUnitRenderActionFlags  *ioActionFlags,
                       const AudioTimeStamp        *inTimeStamp,
                       UInt32                      inBusNumber,
                       UInt32                      inNumberFrames,
                       AudioBufferList             *ioData){



    // Get the tone parameters out of the view controller
    VWWSynthesizerC *synth = (__bridge VWWSynthesizerC *)inRefCon;
    double theta = synth.theta;
    double theta_increment = 2.0 * M_PI * synth.frequency / kSampleRate;




    // This is a mono tone generator so we only need the first buffer
    const int channel = 0;
    Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

    // Generate the samples
    for (UInt32 frame = 0; frame < inNumberFrames; frame++)
    {
        if(synth.muted){
            buffer[frame] = 0;
        }
        else{
            switch(synth.waveType){
                case VWWWaveTypeSine:{
                    buffer[frame] = sin(theta) * synth.amplitude;
                    break;
                }
                case VWWWaveTypeSquare:{
                    buffer[frame] = square(theta) * synth.amplitude;
                    break;
                }
                case VWWWaveTypeSawtooth:{
                    buffer[frame] = sawtooth(theta) * synth.amplitude;
                    break;
                }
                case VWWWaveTypeTriangle:{
                    buffer[frame] = triangle(theta) * synth.amplitude;
                    break;
                }
                default:
                    break;

            }
        }
        theta += theta_increment;
        if (theta > 2.0 * M_PI)
        {
            theta -= 2.0 * M_PI;
        }
    }

    synth.theta = theta;

    return noErr;
}

If there is a different or better way to render this data, I'm open to suggestions. I'm rendering sine, square, triangle, sawtooth, etc... waves.

VaporwareWolf
  • 10,143
  • 10
  • 54
  • 80
  • 2
    I have made the following change but I can't figure out how to correctly populated the buffers in the render callback: streamFormat.mChannelsPerFrame = 2; – VaporwareWolf Jan 17 '14 at 03:36
  • By "stereo" do you mean the same signal output through two channels of the remoteIO? Or two different signals, each going through different remoteIO output channels? – Nick Jan 17 '14 at 15:02
  • I want one signal to be left ear and one right ear. Completely separated. My idea is to actually use this to drive some electronics with a serial interface, if I can separate the channels to left/right I can use one as a clock and the other as data. At least that's the idea. – VaporwareWolf Jan 21 '14 at 19:27
  • did you get any solution for stereo? – Bhavesh Lathigara Aug 08 '14 at 07:15
  • Yes I did, but haven't done much with it. I think I had issues panning completely left of right. I decided to just use mono in my project. I have a branch with stereo implementation though. You can find the relevant file here: https://github.com/zakkhoyt/Synthesizer/blob/stereo/Synthesizer/Synthesizer/Classes/VWW/VWWSynthesizerC.mm. If you get the project and run unit tests, I believe it's in a working state. Best of luck. Let me know if you get anywhere with it. – VaporwareWolf Aug 08 '14 at 20:07

0 Answers0