2012-05-15 37 views
4

我正在使用下面的代碼來初始化我的音頻組件。AudioUnitInitialize在初始化AudioComponentInstance時拋出錯誤

-(void) startListeningWithCoreAudio 
{ 
    NSError *error = nil; 

    [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error]; 
    if (error) 
      NSLog(@"error setting up audio session: %@", [error localizedDescription]); 

    [[AVAudioSession sharedInstance] setDelegate:self]; 

    OSStatus status = AudioSessionSetActive(YES); 
    checkStatus(status); 

      // Find the apple mic 
    AudioComponentDescription desc; 
    desc.componentType = kAudioUnitType_Output; 
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; 
    desc.componentFlags = 0; 
    desc.componentFlagsMask = 0; 
    desc.componentManufacturer = kAudioUnitManufacturer_Apple; 

    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc); 
    status = AudioComponentInstanceNew(inputComponent, &kAudioUnit); 
    checkStatus(status); 

      // enable mic output as our input 
    UInt32 flag = 1; 
    status = AudioUnitSetProperty(kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); 
    checkStatus(status); 


      // Define mic output audio format 
    AudioStreamBasicDescription audioFormat; 
    audioFormat.mSampleRate   = 16000.0; 
    audioFormat.mFormatID   = kAudioFormatLinearPCM; 
    audioFormat.mFormatFlags  = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; 
    audioFormat.mFramesPerPacket = 1; 
    audioFormat.mChannelsPerFrame = 1; 
    audioFormat.mBitsPerChannel  = 16; 
    audioFormat.mBytesPerPacket  = 2; 
    audioFormat.mBytesPerFrame  = 2; 

    status = AudioUnitSetProperty(kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat)); 
    checkStatus(status); 

      // Define our callback methods 
    AURenderCallbackStruct callbackStruct; 
    callbackStruct.inputProc = recordingCallback; 
    callbackStruct.inputProcRefCon = self; 
    status = AudioUnitSetProperty(kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct)); 
    checkStatus(status); 

      // By pass voice processing 
    UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING]; 
    status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing, 
            kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing)); 
    checkStatus(status); 

      // Automatic Gain Control 
    UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC]; 
    status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, 
            kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC)); 
    checkStatus(status); 

      //Non Audio Voice Ducking 
    UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING]; 
    status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio, 
            kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking)); 
    checkStatus(status); 

      //Audio Quality 
    UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY]; 
    status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality, 
            kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality)); 
    checkStatus(status); 

    status = AudioUnitInitialize(kAudioUnit); 
    checkStatus(status); 

    status = AudioOutputUnitStart(kAudioUnit); 
    checkStatus(status); 

    UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker; 
    status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute); 
    checkStatus(status);  
} 


-(void) stopListeningWithCoreAudio 
{ 
    OSStatus  status = AudioUnitUninitialize(kAudioUnit); 
    checkStatus(status); 

    status = AudioOutputUnitStop(kAudioUnit); 
    checkStatus(status); 

//  if(kAudioUnit) 
//  { 
//   status = AudioComponentInstanceDispose(kAudioUnit); 
//   checkStatus(status); 
//   kAudioUnit = nil; 
//  } 

    status = AudioSessionSetActive(NO); 
    checkStatus(status); 

    NSError *error = nil; 
    [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error]; 
    if (error) 
      NSLog(@"error setting up audio session: %@", [error localizedDescription]); 
} 

它工作正常,第一次。我的意思是startListeningWithCoreAudio被一個按鈕按下的事件調用。它可以很好地記錄/處理音頻。在其他事件中,我打電話stopListeningWithCoreAudio停止錄製/處理音頻。

當我再次嘗試調用函數startListeningWithCoreAudio時,問題即將到來。它引發兩個函數的錯誤。 AudioUnitInitializeAudioOutputUnitStart,其從startListeningWithCoreAudio調用。

任何人都可以請幫助我什麼是問題?

謝謝先進。

回答

2

我找到了解決方案。 如果我們將以下函數背靠背調用,會產生問題。

extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)      
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance) 

因此,我通過以下方式在主線程上調用了配置方法。

[self performSelectorOnMainThread:@selector(disposeCoreAudio) withObject:nil waitUntilDone:NO]; 

-(void) disposeCoreAudio 
{ 
    OSStatus status = AudioComponentInstanceDispose(kAudioUnit); 
    kAudioUnit = nil; 
} 

它解決了這個問題。因此,正確的序列是停止錄製,未初始化記錄器並在主線程上配置記錄器。

1

一個可能的問題是,您的代碼試圖在停止運行之前對運行的音頻單元進行初始化。

+0

感謝您的回答。 +1。實際的原因是音樂會。我在應用程序變爲活動狀態時設置會話,並在應用程序進入後臺時刪除會話。事後沒有初始化音頻單元。 – Apurv