2016-01-25 46 views
2

我想實現https://android.googlesource.com/platform/cts/+/jb-mr2-release/tests/tests/media/src/android/media/cts/DecodeEditEncodeTest.java 但通過使用視頻文件mp4修改源。 mime tipe是video/avc,比特率爲288kbps,iframeinterval爲100,寬度爲176,高度爲144.文件大小爲6MB。 當我解碼視頻並將幀放入輸出表面時,我可以將幀保存爲位圖,並且看到幀非常好。但最後,編碼後(與原始視頻的參數相同),我得到一個700kb的文件,我看不到視頻(可能是損壞的文件)。Mediacodec再次解碼視頻和編碼得到一個損壞的文件

   extractor = new MediaExtractor(); 
       extractor.SetDataSource(filePath); 
       for (int i = 0; i < extractor.TrackCount; i++) 
       { 
        inputFormat = extractor.GetTrackFormat(i); 
        string mime = inputFormat.GetString(MediaFormat.KeyMime); 
        if (mime.StartsWith("video/")) 
        { 
         extractor.SelectTrack(i); 
         mimeType = mime; 
         break; 
        } 
       } 
       mWidth = inputFormat.GetInteger(MediaFormat.KeyWidth); 
       mHeight = inputFormat.GetInteger(MediaFormat.KeyHeight); 
       // Create an encoder format that matches the input format. (Might be able to just 
       // re-use the format used to generate the video, since we want it to be the same.) 
       MediaFormat outputFormat = MediaFormat.CreateVideoFormat(mimeType, mWidth, mHeight); 
       outputFormat.SetInteger(MediaFormat.KeyColorFormat, 
         (int)MediaCodecCapabilities.Formatsurface); 
       outputFormat.SetInteger(MediaFormat.KeyBitRate, 288000); 
       outputFormat.SetInteger(MediaFormat.KeyFrameRate, 
         inputFormat.GetInteger(MediaFormat.KeyFrameRate)); 
       outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, 100); 
       outputData.setMediaFormat(outputFormat); 
       encoder = MediaCodec.CreateEncoderByType(mimeType); 
       encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode); 
       inputSurface = new InputSurface(encoder.CreateInputSurface()); 
       inputSurface.makeCurrent(); 
       encoder.Start(); 
       // OutputSurface uses the EGL context created by InputSurface. 
       decoder = MediaCodec.CreateDecoderByType(mimeType); 
       outputSurface = new OutputSurface(); 
       outputSurface.changeFragmentShader(FRAGMENT_SHADER); 
       decoder.Configure(inputFormat, outputSurface.getSurface(), null, 0); 
       decoder.Start(); 
       editVideoData2(extractor, decoder, outputSurface, inputSurface, encoder, outputData); 

和解碼編碼部分:

  while (!outputDone) 
      { 
       if (VERBOSE) Log.Debug(TAG, "edit loop"); 
       // Feed more data to the decoder. 
       if (!inputDone) 
       { 

        int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); 
        if (inputBufIndex >= 0) 
        { 
         ByteBuffer buffer = decoderInputBuffers[inputBufIndex]; 
         int sampleSize = extractor.ReadSampleData(buffer, 0); 
         if (sampleSize < 0) 
         { 
          inputChunk++; 
          // End of stream -- send empty frame with EOS flag set. 
          decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, 
            MediaCodecBufferFlags.EndOfStream); 
          inputDone = true; 
          if (VERBOSE) Log.Debug(TAG, "sent input EOS (with zero-length frame)"); 
         } 
         else { 
          // Copy a chunk of input to the decoder. The first chunk should have 
          // the BUFFER_FLAG_CODEC_CONFIG flag set. 
          buffer.Clear(); 
          decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0); 
          extractor.Advance(); 

          inputChunk++; 
         } 
        } 
        else { 
         if (VERBOSE) Log.Debug(TAG, "input buffer not available"); 
        } 
       } 
       // Assume output is available. Loop until both assumptions are false. 
       bool decoderOutputAvailable = !decoderDone; 
       bool encoderOutputAvailable = true; 
       while (decoderOutputAvailable || encoderOutputAvailable) 
       { 
        // Start by draining any pending output from the encoder. It's important to 
        // do this before we try to stuff any more data in. 
        int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC); 
        if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater) 
        { 
         // no output available yet 
         if (VERBOSE) Log.Debug(TAG, "no output from encoder available"); 
         encoderOutputAvailable = false; 
        } 
        else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) 
        { 
         encoderOutputBuffers = encoder.GetOutputBuffers(); 
         if (VERBOSE) Log.Debug(TAG, "encoder output buffers changed"); 
        } 
        else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) 
        { 
         MediaFormat newFormat = encoder.OutputFormat; 
         if (VERBOSE) Log.Debug(TAG, "encoder output format changed: " + newFormat); 
        } 
        else if (encoderStatus < 0) 
        { 
         Log.Error(TAG, "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus); 
        } 
        else { // encoderStatus >= 0 
         ByteBuffer encodedData = encoderOutputBuffers[encoderStatus]; 
         if (encodedData == null) 
         { 
          Log.Error(TAG,"encoderOutputBuffer " + encoderStatus + " was null"); 
         } 
         // Write the data to the output "file". 
         if (info.Size != 0) 
         { 
          encodedData.Position(info.Offset); 
          encodedData.Limit(info.Offset + info.Size); 
          byte[] data = new byte[encodedData.Remaining()]; 
          encodedData.Get(data); 
          fStream.Write(data, 0, data.Length); 
          // outputData.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs); 
          outputCount++; 
          if (VERBOSE) Log.Debug(TAG, "encoder output " + info.Size + " bytes"); 
         } 
         outputDone = (info.Flags & MediaCodecBufferFlags.EndOfStream) != 0; 
         encoder.ReleaseOutputBuffer(encoderStatus, false); 
        } 
        if (encoderStatus != (int)MediaCodecInfoState.TryAgainLater) 
        { 
         // Continue attempts to drain output. 
         continue; 
        } 
        // Encoder is drained, check to see if we've got a new frame of output from 
        // the decoder. (The output is going to a Surface, rather than a ByteBuffer, 
        // but we still get information through BufferInfo.) 
        if (!decoderDone) 
        { 
         int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); 
         if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) 
         { 
          // no output available yet 
          if (VERBOSE) Log.Debug(TAG, "no output from decoder available"); 
          decoderOutputAvailable = false; 
         } 
         else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) 
         { 
          //decoderOutputBuffers = decoder.GetOutputBuffers(); 
          if (VERBOSE) Log.Debug(TAG, "decoder output buffers changed (we don't care)"); 
         } 
         else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) 
         { 
          // expected before first buffer of data 
          MediaFormat newFormat = decoder.OutputFormat; 
          if (VERBOSE) Log.Debug(TAG, "decoder output format changed: " + newFormat); 
         } 
         else if (decoderStatus < 0) 
         { 
          Log.Error(TAG,"unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); 
         } 
         else { // decoderStatus >= 0 
          if (VERBOSE) Log.Debug(TAG, "surface decoder given buffer " 
            + decoderStatus + " (size=" + info.Size + ")"); 
          // The ByteBuffers are null references, but we still get a nonzero 
          // size for the decoded data. 
          bool doRender = (info.Size != 0); 
          // As soon as we call releaseOutputBuffer, the buffer will be forwarded 
          // to SurfaceTexture to convert to a texture. The API doesn't 
          // guarantee that the texture will be available before the call 
          // returns, so we need to wait for the onFrameAvailable callback to 
          // fire. If we don't wait, we risk rendering from the previous frame. 
          decoder.ReleaseOutputBuffer(decoderStatus, doRender); 
          if (doRender) 
          { 
           // This waits for the image and renders it after it arrives. 
           if (VERBOSE) Log.Debug(TAG, "awaiting frame"); 
           outputSurface.awaitNewImage(); 
           outputSurface.drawImage(); 
           outputSurface.saveFrame(Android.OS.Environment.ExternalStorageDirectory + "/test.jpg", mWidth, mHeight); 
           // Send it to the encoder. 
           inputSurface.setPresentationTime(info.PresentationTimeUs * 1000); 
           if (VERBOSE) Log.Debug(TAG, "swapBuffers"); 
           inputSurface.swapBuffers(); 
          } 
          if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0) 
          { 
           // forward decoder EOS to encoder 
           if (VERBOSE) Log.Debug(TAG, "signaling input EOS"); 
           if (WORK_AROUND_BUGS) 
           { 
            // Bail early, possibly dropping a frame. 
            return; 
           } 
           else { 
            encoder.SignalEndOfInputStream(); 
           } 
          } 
         } 
        } 
       } 
      } 
      if (inputChunk != outputCount) 
      { 
       throw new RuntimeException("frame lost: " + inputChunk + " in, " + 
         outputCount + " out"); 
      } 
      fStream.Close(); 

如果我得到的幀成圖像,我可以看到它好的,我猜框架有確定的OutputSurface。我在編碼器配置中看不到任何奇怪的東西。你能幫助我嗎,至少是說我認爲我可以檢查?謝謝。

+1

快速提問:您是否期待原始H.264輸出或.mp4輸出?我在代碼中看不到MediaMuxer。 – fadden

回答

2

我忘了添加Mediamuxer作爲fadden說。如果您將fstream部分更改爲Mediamuxerwritesampledata,並且您添加了start()stop()Adtrack()調用,則該調用工作正常。任何人都可以將此代碼視爲解碼編碼的示例。 謝謝。

+0

先生,我有類似的要求可以告訴我在哪裏修改你的代碼,因爲我是新來的請引導我 – user3269550