2011-11-19 127 views
2

我正在使用ffmpeg爲iOS設計一個rtsp流媒體(AAC格式)客戶端。現在我只能說我的應用程序是可行的,但流式聲音非常嘈雜,甚至有點失真,遠遠低於vlc或mplayer播放時的聲音。爲ffmpeg rtsp流播放設置音頻隊列

流由av_read_frame()讀取,由avcodec_decode_audio3()解碼。然後,我只是將解碼後的原始音頻發送到音頻隊列。

使用我的應用程序解碼本地aac文件時,聲音聽起來似乎沒有那麼吵。我知道初始編碼會顯着影響結果。但至少我應該嘗試讓它聽起來像其他流客戶端...

我的實施/修改中的許多部分實際上來自嘗試和錯誤。我相信我在設置音頻隊列方面做了一些錯誤,並且在填充音頻緩衝區時使用了回調函數。

任何提示,建議或幫助,非常感謝。

//()由av_dump_format傾倒測試材料的--info -

Metadata: 
    title   : /demo/test.3gp 
    Duration: 00:00:30.11, start: 0.000000, bitrate: N/A 
    Stream #0:0: Audio: aac, 32000 Hz, stereo, s16 
aac Advanced Audio Coding 

// - 音頻隊列設置過程 -

- (void) startPlayback 
{ 
    OSStatus err = 0; 
    if(playState.playing) return; 

    playState.started = false; 

    if(!playState.queue) 
    { 

     UInt32 bufferSize; 


     playState.format.mSampleRate = _av->audio.sample_rate; 
     playState.format.mFormatID = kAudioFormatLinearPCM; 
     playState.format.mFormatFlags = kAudioFormatFlagsCanonical; 
     playState.format.mChannelsPerFrame = _av->audio.channels_per_frame; 
     playState.format.mBytesPerPacket = sizeof(AudioSampleType) *_av->audio.channels_per_frame; 
     playState.format.mBytesPerFrame = sizeof(AudioSampleType) *_av->audio.channels_per_frame; 
     playState.format.mBitsPerChannel = 8 * sizeof(AudioSampleType); 

     playState.format.mFramesPerPacket = 1;   
     playState.format.mReserved = 0; 


     pauseStart = 0; 
     DeriveBufferSize(playState.format,playState.format.mBytesPerPacket,BUFFER_DURATION,&bufferSize,&numPacketsToRead); 
     err= AudioQueueNewOutput(&playState.format, aqCallback, &playState, NULL, kCFRunLoopCommonModes, 0, &playState.queue); 

     if(err != 0) 
     { 
      printf("AQHandler.m startPlayback: Error creating new AudioQueue: %d \n", (int)err); 
     } 

     for(int i = 0 ; i < NUM_BUFFERS ; i ++) 
     { 
      err = AudioQueueAllocateBufferWithPacketDescriptions(playState.queue, bufferSize, numPacketsToRead , &playState.buffers[i]); 

      if(err != 0) 
       printf("AQHandler.m startPlayback: Error allocating buffer %d", i); 
      fillAudioBuffer(&playState,playState.queue, playState.buffers[i]); 
     } 

    } 

    startTime = mu_currentTimeInMicros(); 

    err=AudioQueueStart(playState.queue, NULL); 

    if(err) 
    { 

     char sErr[4]; 
     printf("AQHandler.m startPlayback: Could not start queue %ld %s.", err, FormatError(sErr,err)); 

     playState.playing = NO; 
    } 
    else 
    { 
     AudioSessionSetActive(true); 
     playState.playing = YES; 
    }   
} 

// - 回調填充音頻緩衝區 -

static int ct = 0; 
static void fillAudioBuffer(void *info,AudioQueueRef queue, AudioQueueBufferRef buffer) 
{ 

    int lengthCopied = INT32_MAX; 
    int dts= 0; 
    int isDone = 0; 

    buffer->mAudioDataByteSize = 0; 
    buffer->mPacketDescriptionCount = 0; 

    OSStatus err = 0; 
    AudioTimeStamp bufferStartTime; 

    AudioQueueGetCurrentTime(queue, NULL, &bufferStartTime, NULL); 


    PlayState *ps = (PlayState *)info; 

    if (!ps->started) 
     ps->started = true; 

    while(buffer->mPacketDescriptionCount < numPacketsToRead && lengthCopied > 0) 
    { 
     lengthCopied = getNextAudio(_av, 
         buffer->mAudioDataBytesCapacity-buffer->mAudioDataByteSize, 
         (uint8_t*)buffer->mAudioData+buffer->mAudioDataByteSize, 
         &dts,&isDone); 

     ct+= lengthCopied; 

     if(lengthCopied < 0 || isDone) 
     { 
      printf("nothing to read....\n\n"); 
      PlayState *ps = (PlayState *)info; 
      ps->finished = true; 
      ps->started = false; 
      break; 
     } 

     if(aqStartDts < 0) aqStartDts = dts; 

     if(buffer->mPacketDescriptionCount ==0) 
     { 
      bufferStartTime.mFlags = kAudioTimeStampSampleTimeValid; 
      bufferStartTime.mSampleTime = (Float64)(dts-aqStartDts);//* _av->audio.frame_size; 

      if (bufferStartTime.mSampleTime <0) 
       bufferStartTime.mSampleTime = 0; 

      printf("AQHandler.m fillAudioBuffer: DTS for %x: %lf time base: %lf StartDTS: %d\n", 
        (unsigned int)buffer, 
        bufferStartTime.mSampleTime, 
        _av->audio.time_base, 
        aqStartDts); 

     } 

     buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mStartOffset = buffer->mAudioDataByteSize; 
     buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mDataByteSize = lengthCopied; 



     buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mVariableFramesInPacket = 0; 

     buffer->mPacketDescriptionCount++; 

     buffer->mAudioDataByteSize += lengthCopied; 
    } 

    int audioBufferCount, audioBufferTotal, videoBufferCount, videoBufferTotal; 
    bufferCheck(_av,&videoBufferCount, &videoBufferTotal, &audioBufferCount, &audioBufferTotal); 

    if(buffer->mAudioDataByteSize) 
    { 

     err = AudioQueueEnqueueBufferWithParameters(queue, buffer, 0, NULL, 0, 0, 0, NULL, &bufferStartTime, NULL); 

     if(err) 
     { 
      char sErr[10]; 
      printf("AQHandler.m fillAudioBuffer: Could not enqueue buffer 0x%x: %d %s.", buffer, err, FormatError(sErr, err)); 

     } 

    } 

} 




int getNextAudio(video_data_t* vInst, int maxlength, uint8_t* buf, int* pts, int* isDone) 
{ 

    struct video_context_t *ctx = vInst->context; 
    int datalength   = 0; 

    while(ctx->audio_ring.lock || (ctx->audio_ring.count <= 0 && ((ctx->play_state & STATE_DIE) != STATE_DIE))) 
    { 

     if (ctx->play_state & STATE_EOF) return -1;   
     usleep(100); 
    } 

    *pts = 0; 
    ctx->audio_ring.lock = kLocked; 

    if(ctx->audio_ring.count>0 && maxlength > ctx->audio_buffer[ctx->audio_ring.read].size) 
    {  
     memcpy(buf, ctx->audio_buffer[ctx->audio_ring.read].data,ctx->audio_buffer[ctx->audio_ring.read].size); 

     *pts = ctx->audio_buffer[ctx->audio_ring.read].pts; 

     datalength = ctx->audio_buffer[ctx->audio_ring.read].size; 

     ctx->audio_ring.read++;   
     ctx->audio_ring.read %= ABUF_SIZE;   
     ctx->audio_ring.count--; 

    } 
    ctx->audio_ring.lock = kUnlocked; 

    if((ctx->play_state & STATE_EOF) == STATE_EOF && ctx->audio_ring.count == 0) *isDone = 1; 

    return datalength; 
} 

回答

1

聲音失真最可能的原因是簡單的數據包丟失,RTSP可能容易發生,特別是無線連接。

我建議你考慮配置ffmpeg,以便在可能的時候使用基於TCP的連接,而不是默認的RTP/UDP。

1

如果您正在您的音頻隊列回調中下載或解碼音頻,NUM_BUFFERS和/或buf ferSize可能需要更大以覆蓋更糟糕的網絡延遲和抖動。或者您可以預先解碼音頻回調以外的音頻,並在回調之前排隊足夠的數據以處理下載和解碼時間和差異。

+0

嗨,謝謝你的提示。 下載和解碼在另一個線程中處理。這裏所做的只是在需要時填充音頻緩衝區。很可能等到我們有東西要填寫時(在getNextAudio()中)。我只是不明白爲什麼輸出音頻失真並且充滿了噪音。 – illew