2017-05-14 216 views
1

我使用AVCaptureSession來使用視頻和音頻輸入並使用AVAssetWriter編碼H.264視頻。損壞的視頻使用AVAssetWriter捕獲音頻和視頻

如果我不寫音頻,視頻按預期進行編碼。但是如果我寫音頻,我會看到一個損壞的視頻。

如果我檢查CMSampleBuffer供給到AVAssetWriter音頻它顯示這樣的信息:

invalid = NO 
dataReady = YES 
makeDataReadyCallback = 0x0 
makeDataReadyRefcon = 0x0 
formatDescription = <CMAudioFormatDescription 0x17410ba30 [0x1b3a70bb8]> { 
mediaType:'soun' 
mediaSubType:'lpcm' 
mediaSpecific: { 
    ASBD: { 
     mSampleRate: 44100.000000 
     mFormatID: 'lpcm' 
     mFormatFlags: 0xc 
     mBytesPerPacket: 2 
     mFramesPerPacket: 1 
     mBytesPerFrame: 2 
     mChannelsPerFrame: 1 
     mBitsPerChannel: 16  } 
    cookie: {(null)} 
    ACL: {(null)} 
    FormatList Array: {(null)} 
} 
extensions: {(null)} 

由於它供給LPCM音頻,我已經配置了AVAssetWriterInput與此設置聲音(我試圖既一個和兩個頻道):

var channelLayout = AudioChannelLayout() 
memset(&channelLayout, 0, MemoryLayout<AudioChannelLayout>.size); 
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Mono 

let audioOutputSettings:[String: Any] = [AVFormatIDKey as String:UInt(kAudioFormatLinearPCM), 
              AVNumberOfChannelsKey as String:1, 
              AVSampleRateKey as String:44100.0, 
              AVLinearPCMIsBigEndianKey as String:false, 
              AVLinearPCMIsFloatKey as String:false, 
              AVLinearPCMBitDepthKey as String:16, 
              AVLinearPCMIsNonInterleaved as String:false, 
              AVChannelLayoutKey: NSData(bytes:&channelLayout, length:MemoryLayout<AudioChannelLayout>.size)] 

self.assetWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioOutputSettings) 
self.assetWriter.add(self.assetWriterAudioInput) 

當我使用上面的lpcm設置時,我無法用任何應用程序打開視頻。我嘗試過使用kAudioFormatMPEG4AACkAudioFormatAppleLossless,但仍然收到損壞的視頻,但我可以使用QuickTime Player 8(不是QuickTime Player 7)查看視頻,但它對視頻的持續時間感到困惑,並且沒有播放聲音。

記錄完成時,我呼籲:

func endRecording(_ completionHandler: @escaping() ->()) { 
    isRecording = false 
    assetWriterVideoInput.markAsFinished() 
    assetWriterAudioInput.markAsFinished() 
    assetWriter.finishWriting(completionHandler: completionHandler) 
} 

這是AVCaptureSession如何配置:

func setupCapture() { 

    captureSession = AVCaptureSession() 

    if (captureSession == nil) { 
     fatalError("ERROR: Couldnt create a capture session") 
    } 

    captureSession?.beginConfiguration() 
    captureSession?.sessionPreset = AVCaptureSessionPreset1280x720 

    let frontDevices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaTypeVideo) && ($0 as AnyObject).position == AVCaptureDevicePosition.front } 

    if let captureDevice = frontDevices.first as? AVCaptureDevice { 
     do { 
      let videoDeviceInput: AVCaptureDeviceInput 
      do { 
       videoDeviceInput = try AVCaptureDeviceInput(device: captureDevice) 
      } 
      catch { 
       fatalError("Could not create AVCaptureDeviceInput instance with error: \(error).") 
      } 
      guard (captureSession?.canAddInput(videoDeviceInput))! else { 
       fatalError() 
      } 
      captureSession?.addInput(videoDeviceInput) 
     } 
    } 

    do { 
     let audioDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio) 
     let audioDeviceInput: AVCaptureDeviceInput 
     do { 
      audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice) 
     } 
     catch { 
      fatalError("Could not create AVCaptureDeviceInput instance with error: \(error).") 
     } 
     guard (captureSession?.canAddInput(audioDeviceInput))! else { 
      fatalError() 
     } 
     captureSession?.addInput(audioDeviceInput) 
    } 

    do { 
     let dataOutput = AVCaptureVideoDataOutput() 
     dataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA] 
     dataOutput.alwaysDiscardsLateVideoFrames = true 
     let queue = DispatchQueue(label: "com.3DTOPO.videosamplequeue") 
     dataOutput.setSampleBufferDelegate(self, queue: queue) 
     guard (captureSession?.canAddOutput(dataOutput))! else { 
      fatalError() 
     } 
     captureSession?.addOutput(dataOutput) 

     videoConnection = dataOutput.connection(withMediaType: AVMediaTypeVideo) 
    } 

    do { 
     let audioDataOutput = AVCaptureAudioDataOutput() 
     let queue = DispatchQueue(label: "com.3DTOPO.audiosamplequeue") 
     audioDataOutput.setSampleBufferDelegate(self, queue: queue) 
     guard (captureSession?.canAddOutput(audioDataOutput))! else { 
      fatalError() 
     } 
     captureSession?.addOutput(audioDataOutput) 

     audioConnection = audioDataOutput.connection(withMediaType: AVMediaTypeAudio) 
    } 

    captureSession?.commitConfiguration() 

    // this will trigger capture on its own queue 
    captureSession?.startRunning() 
} 

AVCaptureVideoDataOutput委託方法:

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 
    // func captureOutput(captureOutput: AVCaptureOutput, sampleBuffer: CMSampleBuffer, connection:AVCaptureConnection) { 

    var error: CVReturn 

    if (connection == audioConnection) { 
     delegate?.audioSampleUpdated(sampleBuffer: sampleBuffer) 
     return 
    } 

    // ... Write video buffer ...// 
} 

其中要求:

func audioSampleUpdated(sampleBuffer: CMSampleBuffer) { 
    if (isRecording) { 
     while !assetWriterAudioInput.isReadyForMoreMediaData {} 
     if (!assetWriterAudioInput.append(sampleBuffer)) { 
      print("Unable to write to audio input"); 
     } 
    } 
} 

如果我禁用上面的assetWriterAudioInput.append()調用,那麼視頻沒有損壞,但我當然沒有音頻編碼。我怎樣才能使視頻和音頻編碼工作?

回答

1

我想通了。我將assetWriter.startSession源時間設置爲0,然後從當前CACurrentMediaTime()減去開始時間以寫入像素數據。

我將assetWriter.startSession源時間更改爲CACurrentMediaTime(),並且在寫入視頻幀時不減去當前時間。

舊啓動會話代碼:

assetWriter.startWriting() 
assetWriter.startSession(atSourceTime: kCMTimeZero) 

新代碼的工作原理:

let presentationStartTime = CMTimeMakeWithSeconds(CACurrentMediaTime(), 240) 

assetWriter.startWriting() 
assetWriter.startSession(atSourceTime: presentationStartTime)