2017-05-30 90 views
2

我將示例緩衝區轉換爲CGContext。然後我對上下文應用一個轉換,並從中創建一個CIImage,然後將其顯示在UIImageView中。以任意角度旋轉CMSampleBuffer並將其附加到AVSsetWriterInput中swift 3

與此同時,我想將此附加到AVAssetWriterInput以創建這些轉換的電影。

到目前爲止,我適用於上下文的轉換沒有任何效果。當我在圖像視圖中顯示所謂的轉換圖像時。它看起來完全一樣。

UPDATE: 我設法將採樣緩衝區記錄到一個視頻文件(它仍然拉伸,因爲方向錯誤)。我用這個代碼爲基礎

http://geek-is-stupid.github.io/blog/2017/04/13/how-to-record-detect-face-overlay-video-at-real-time-using-swift/

但我仍然將旋轉到CGContext上掙扎。基本上我對上下文做的所有事情都完全被忽略了。

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 

     let writable = canWrite() 
     if writable , sessionAtSourceTime == nil { 
       print("starting session") 
       sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer) 
       assetWriter!.startSession(atSourceTime: sessionAtSourceTime!) 
      } 

     let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 
     if writable { 
      autoreleasepool { 
       CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) 
       var renderedOutputPixelBuffer: CVPixelBuffer? = nil 
       let options = [ 
        kCVPixelBufferCGImageCompatibilityKey as String: true, 
        kCVPixelBufferCGBitmapContextCompatibilityKey as String: true,] as CFDictionary 
       let status = CVPixelBufferCreate(kCFAllocatorDefault, 
               CVPixelBufferGetWidth(pixelBuffer), 
               CVPixelBufferGetHeight(pixelBuffer), 
               kCVPixelFormatType_32BGRA, options, 
               &renderedOutputPixelBuffer) 
       guard status == kCVReturnSuccess else { return } 

       CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0)) 

       let renderedOutputPixelBufferBaseAddress = CVPixelBufferGetBaseAddress(renderedOutputPixelBuffer!) 

       memcpy(renderedOutputPixelBufferBaseAddress,CVPixelBufferGetBaseAddress(pixelBuffer),CVPixelBufferGetHeight(pixelBuffer) * CVPixelBufferGetBytesPerRow(pixelBuffer)) 

       CVPixelBufferLockBaseAddress(renderedOutputPixelBuffer!, CVPixelBufferLockFlags(rawValue: 0)) 

       let context = CGContext(data: renderedOutputPixelBufferBaseAddress, 
             width: CVPixelBufferGetWidth(renderedOutputPixelBuffer!), 
             height: CVPixelBufferGetHeight(renderedOutputPixelBuffer!), 
             bitsPerComponent: 8, 
             bytesPerRow: CVPixelBufferGetBytesPerRow(renderedOutputPixelBuffer!), 
             space: CGColorSpaceCreateDeviceRGB(), 
             bitmapInfo: bitmapInfo!) 


       let radians : Float = atan2f(Float(boxView!.transform.b), Float(boxView!.transform.a)); 
       context!.translateBy(x: self.view.frame.size.width/2, y: self.view.frame.size.height/2) 
       context!.rotate(by:CGFloat(radians)) 

       let image: CGImage = context!.makeImage()! 

       self.imageView!.image = UIImage(cgImage: image) 

       if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() { 
        bufferAdaptor?.append(renderedOutputPixelBuffer!, withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) 
       } 

      CVPixelBufferUnlockBaseAddress(renderedOutputPixelBuffer!,CVPixelBufferLockFlags(rawValue: 0)) 
      CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0)) 
     } 
    } 

回答

1

找到了解決方案。低於代碼的重要部分。

//create pixelbuffer from the delegate method samplebuffer 
    let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 
    CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0)) 
    //create CI image from the buffer 
    let ci = CIImage.init(cvPixelBuffer: pixelBuffer, options: options) 
    //create filter to rotate 
    let filter = CIFilter.init(name: "CIAffineTransform") 
    //create transform, move rotation point to center    
    var transform = CGAffineTransform(translationX: self.view.frame.midX, y: self.view.frame.midY) 
    //rotate it 
    transform = transform.rotate(angle: CGFloat(radians)) 
    // move the transform point back to the original 
    transform = transform.translatedBy(x: -self.view.frame.midX, y: -self.view.frame.midY) 

    filter!.setValue(transform, forKey: kCIInputTransformKey) 
    filter!.setValue(ci, forKey: kCIInputImageKey) 
    //take the output from the filter 
    let output = filter?.outputImage 
    //create empty pixelbuffer 
    var newPixelBuffer : CVPixelBuffer? = nil 

    CVPixelBufferCreate(kCFAllocatorDefault, Int(self.view.frame.width) , 
            Int(self.view.frame.height), 
            kCVPixelFormatType_32BGRA, 
            nil, 
            &newPixelBuffer) 
    //render the context to the new pixelbuffer, context is a global 
    //CIContext variable. creating a new one each frame is too CPU intensive    
    context.render(output!, to: newPixelBuffer!) 

    //finally, write this to the pixelbufferadaptor    
    if (bufferAdaptor?.assetWriterInput.isReadyForMoreMediaData)!, canWrite() { 
     bufferAdaptor?.append(newPixelBuffer!, 
         withPresentationTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) 

     } 

    CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))