在我的C++應用程序中,我使用一系列JPEG圖像,使用FreeImage處理其數據,然後使用ffmpeg/libx264 C API將位圖編碼爲H264 。輸出是一個MP4,以12fps顯示一系列22張圖像。我的代碼是根據fmpeg C源代碼附帶的「muxing」示例進行調整的。ffmpeg/libx264 C API:從短尾MP4中刪除的幀
我的問題:無論我如何調整編解碼器參數,傳遞給編碼器的序列末尾的一定數量的幀都不會出現在最終輸出中。我給自己定的AVCodecContext參數是這樣的:
//set context params
ctx->codec_id = AV_CODEC_ID_H264;
ctx->bit_rate = 4000 * 1000;
ctx->width = _width;
ctx->height = _height;
ost->st->time_base = AVRational{ 1, 12 };
ctx->time_base = ost->st->time_base;
ctx->gop_size = 1;
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
我發現,較高的gop_size
更多的幀從視頻的結尾下降。我也可以從輸出中看到,使用這個gop大小(我主要指導所有輸出幀是I幀)只有9幀被寫入。
我不知道爲什麼會發生這種情況。我嘗試編碼重複幀並製作更長的視頻。這導致沒有幀被丟棄。我知道ffmpeg命令行工具有一個連接命令,可以完成我想要做的事情,但我不知道如何使用C API完成相同的目標。
下面是我從控制檯獲取輸出:
[libx264 @ 026d81c0]使用CPU能力:MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2 [libx264 @ 026d81c0]高調,水平 3.1 [libx264 @ 026d81c0] 264 - 內核152 r2851 ba24899 - H.264/MPEG-4 AVC編解碼器 - Cop yleft 2003-2017 - http://www.videolan.org/x264.html - 選項:cabac = 1 ref = 1 deb lock = 1:0:0分析= 0x3:0x113 me = hex subme = 7 psy = 1 psy_rd = 1.00:0.00 mixed_ref = 0 m e_range = 16 chroma_me = 1 trellis = 1 8x8dct = 1 cqm = 0 deadzone = 21,11 fast_pskip = 1 chro ma_qp_offset = -2 threads = 12 lookahead_threads = 2 sliced_threads = 0 nr = 0 decimate = 1 interlaced = 0 bluray_compat = 0 constrained_intra = 0 bframes = 0 weightp = 0 keyint = 1 ke yint_min = 1 scenecut = 40 intra_refresh = 0 rc = abr mbtree = 0 bitrate = 4000 ratetol = 1.0 qcomp = 0.60 qpmin = 0 qpmax = 69 qpstep = 4 ip_ratio = 1.40 aq = 1:1.00輸出#0,mp4,至 '.... \ images \ c411a991-46f6- 400C-8bb0-77af3738559a.mp4' : 流#0:0:視頻:H264,YUV420P,700x700,q = 2-31,4000千比特/秒,12 TBN
[libx264 @ 026d81c0]在幀I:9 Avg QP:17.83 size:111058 [libx264 @ 026d81c0] mb I I16..4:1.9%47.7%50.5%[libx264 @ 026d81c0] final ratefactor:19.14 [libx264 @ 026d81c0] 8 x8轉化intra:47.7% [libx264 @ 026d81c0]編碼的y,uvDC,uvAC intra:98.4%96.9%89.5% [libx264 @ 026d81c0] i16 v,h,dc,p:64%6%2%28% [libx264 @ 026d81c0] i8 v,h,dc,ddl,ddr,vr,hd,v1,hu:32%15%9%5%5%6%8% 10%10% h,v,hd,vl,hu:28%18% 43%22%25 %10%[libx264 @ 026d81c0] kb/s的:10661.53
代碼下面包括:
MP4Writer.h
#ifndef MPEG_WRITER
#define MPEG_WRITER
#include <iostream>
#include <string>
#include <vector>
#include <ImgData.h>
extern "C" {
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
}
typedef struct OutputStream
{
AVStream *st;
AVCodecContext *enc;
//pts of the next frame that will be generated
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
};
class MP4Writer {
public:
MP4Writer();
void Init();
int16_t SetOutput(const std::string & path);
int16_t AddFrame(uint8_t * imgData);
int16_t Write(std::vector<ImgData> & imgData);
int16_t Finalize();
void SetHeight(const int height) { _height = _width = height; } //assuming 1:1 aspect ratio
private:
int16_t AddStream(OutputStream * ost, AVFormatContext * formatCtx, AVCodec ** codec, enum AVCodecID codecId);
int16_t OpenVideo(AVFormatContext * formatCtx, AVCodec *codec, OutputStream * ost, AVDictionary * optArg);
static AVFrame * AllocPicture(enum AVPixelFormat pixFmt, int width, int height);
static AVFrame * GetVideoFrame(uint8_t * imgData, OutputStream * ost, const int width, const int height);
static int WriteFrame(AVFormatContext * formatCtx, const AVRational * timeBase, AVStream * stream, AVPacket * packet);
int _width;
int _height;
OutputStream _ost;
AVFormatContext * _formatCtx;
AVDictionary * _dict;
};
#endif //MPEG_WRITER
MP4Writer。CPP
#include <MP4Writer.h>
#include <algorithm>
MP4Writer::MP4Writer()
{
_width = 0;
_height = 0;
}
void MP4Writer::Init()
{
av_register_all();
}
/**
sets up output stream for the specified path.
note that the output format is deduced automatically from the file extension passed
@param path: output file path
@returns: -1 = output could not be deduced, -2 = invalid codec, -3 = error opening output file,
-4 = error writing header
*/
int16_t MP4Writer::SetOutput(const std::string & path)
{
int error;
AVCodec * codec;
AVOutputFormat * format;
_ost = OutputStream{}; //TODO reset state in a more focused way?
//allocate output media context
avformat_alloc_output_context2(&_formatCtx, NULL, NULL, path.c_str());
if (!_formatCtx) {
std::cout << "could not deduce output format from file extension. aborting" << std::endl;
return -1;
}
//set format
format = _formatCtx->oformat;
if (format->video_codec != AV_CODEC_ID_NONE) {
AddStream(&_ost, _formatCtx, &codec, format->video_codec);
}
else {
std::cout << "there is no video codec set. aborting" << std::endl;
return -2;
}
OpenVideo(_formatCtx, codec, &_ost, _dict);
av_dump_format(_formatCtx, 0, path.c_str(), 1);
//open output file
if (!(format->flags & AVFMT_NOFILE)) {
error = avio_open(&_formatCtx->pb, path.c_str(), AVIO_FLAG_WRITE);
if (error < 0) {
std::cout << "there was an error opening output file " << path << ". aborting" << std::endl;
return -3;
}
}
//write header
error = avformat_write_header(_formatCtx, &_dict);
if (error < 0) {
std::cout << "an error occurred writing header. aborting" << std::endl;
return -4;
}
return 0;
}
/**
initialize the output stream
@param ost: the output stream
@param formatCtx: the context format
@param codec: the output codec
@param codec: the ffmpeg enumerated id of the codec
@returns: -1 = encoder not found, -2 = stream could not be allocated, -3 = encoding context could not be allocated
*/
int16_t MP4Writer::AddStream(OutputStream * ost, AVFormatContext * formatCtx, AVCodec ** codec, enum AVCodecID codecId)
{
AVCodecContext * ctx; //TODO not sure why this is here, could just set ost->enc directly
int i;
//detect the encoder
*codec = avcodec_find_encoder(codecId);
if ((*codec) == NULL) {
std::cout << "could not find encoder. aborting" << std::endl;
return -1;
}
//allocate stream
ost->st = avformat_new_stream(formatCtx, NULL);
if (ost->st == NULL) {
std::cout << "could not allocate stream. aborting" << std::endl;
return -2;
}
//allocate encoding context
ost->st->id = formatCtx->nb_streams - 1;
ctx = avcodec_alloc_context3(*codec);
if (ctx == NULL) {
std::cout << "could not allocate encoding context. aborting" << std::endl;
return -3;
}
ost->enc = ctx;
//set context params
ctx->codec_id = AV_CODEC_ID_H264;
ctx->bit_rate = 4000 * 1000;
ctx->width = _width;
ctx->height = _height;
ost->st->time_base = AVRational{ 1, 12 };
ctx->time_base = ost->st->time_base;
ctx->gop_size = 1;
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
//if neccesary, set stream headers and formats separately
if (formatCtx->oformat->flags & AVFMT_GLOBALHEADER) {
std::cout << "setting stream and headers to be separate" << std::endl;
ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
return 0;
}
/**
open the video for writing
@param formatCtx: the format context
@param codec: output codec
@param ost: output stream
@param optArg: dictionary
@return: -1 = error opening codec, -2 = allocate new frame, -3 = copy stream params
*/
int16_t MP4Writer::OpenVideo(AVFormatContext * formatCtx, AVCodec *codec, OutputStream * ost, AVDictionary * optArg)
{
int error;
AVCodecContext * ctx = ost->enc;
AVDictionary * dict = NULL;
av_dict_copy(&dict, optArg, 0);
//open codec
error = avcodec_open2(ctx, codec, &dict);
av_dict_free(&dict);
if (error < 0) {
std::cout << "there was an error opening the codec. aborting" << std::endl;
return -1;
}
//allocate new frame
ost->frame = AllocPicture(ctx->pix_fmt, ctx->width, ctx->height);
if (ost->frame == NULL) {
std::cout << "there was an error allocating a new frame. aborting" << std::endl;
return -2;
}
//copy steam params
error = avcodec_parameters_from_context(ost->st->codecpar, ctx);
if (error < 0) {
std::cout << "could not copy stream parameters. aborting" << std::endl;
return -3;
}
return 0;
}
/**
allocate a new frame
@param pixFmt: ffmpeg enumerated pixel format
@param width: output width
@param height: output height
@returns: an inititalized frame
*/
AVFrame * MP4Writer::AllocPicture(enum AVPixelFormat pixFmt, int width, int height)
{
AVFrame * picture;
int error;
//allocate the frame
picture = av_frame_alloc();
if (picture == NULL) {
std::cout << "there was an error allocating the picture" << std::endl;
return NULL;
}
picture->format = pixFmt;
picture->width = width;
picture->height = height;
//allocate the frame's data buffer
error = av_frame_get_buffer(picture, 32);
if (error < 0) {
std::cout << "could not allocate frame data" << std::endl;
return NULL;
}
picture->pts = 0;
return picture;
}
/**
convert raw RGB buffer to YUV frame
@return: frame that contains image data
*/
AVFrame * MP4Writer::GetVideoFrame(uint8_t * imgData, OutputStream * ost, const int width, const int height)
{
int error;
AVCodecContext * ctx = ost->enc;
//prepare the frame
error = av_frame_make_writable(ost->frame);
if (error < 0) {
std::cout << "could not make frame writeable" << std::endl;
return NULL;
}
//TODO set this context one time per run, or even better, one time at init
//convert RGB to YUV
struct SwsContext* fooContext = sws_getContext(width, height, AV_PIX_FMT_BGR24,
width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
int inLinesize[1] = { 3 * width }; // RGB stride
uint8_t * inData[1] = { imgData };
int sliceHeight = sws_scale(fooContext, inData, inLinesize, 0, height, ost->frame->data, ost->frame->linesize);
sws_freeContext(fooContext);
ost->frame->pts = ost->next_pts++;
//TODO does the frame need to be returned here as it is available at the class level?
return ost->frame;
}
/**
write frame to file
@param formatCtx: the output format context
@param timeBase: the framerate
@param stream: output stream
@param packet: data packet
@returns: see return values for av_interleaved_write_frame
*/
int MP4Writer::WriteFrame(AVFormatContext * formatCtx, const AVRational * timeBase, AVStream * stream, AVPacket * packet)
{
av_packet_rescale_ts(packet, *timeBase, stream->time_base);
packet->stream_index = stream->index;
//write compressed file to media file
return av_interleaved_write_frame(formatCtx, packet);
}
int16_t MP4Writer::Write(std::vector<ImgData> & imgData)
{
int16_t errorCount = 0;
int16_t retVal = 0;
bool countingUp = true;
size_t i = 0;
while (true) {
//don't show first frame again when counting back down
if (!countingUp && i == 0) {
break;
}
uint8_t * pixels = imgData[i].GetBits(imgData[i].mp4Input);
AddFrame(pixels);
//handle inc/dec without repeating last frame
if (countingUp) {
if (i == imgData.size() -1) {
countingUp = false;
i--;
}
else {
i++;
}
}
else {
i--;
}
}
Finalize();
return 0; //TODO return error code
}
/**
add another frame to output video
@param imgData: the raw image data
@returns -1 = error encoding video frame, -2 = error writing frame
*/
int16_t MP4Writer::AddFrame(uint8_t * imgData)
{
int error;
AVCodecContext * ctx;
AVFrame * frame;
int gotPacket = 0;
AVPacket pkt = { 0 };
ctx = _ost.enc;
av_init_packet(&pkt);
frame = GetVideoFrame(imgData, &_ost, _width, _height);
//encode the image
error = avcodec_encode_video2(ctx, &pkt, frame, &gotPacket);
if (error < 0) {
std::cout << "there was an error encoding the video frame" << std::endl;
return -1;
}
//write the frame. NOTE: this doesn't kick in until the encoder has received a certain number of frames
if (gotPacket) {
error = WriteFrame(_formatCtx, &ctx->time_base, _ost.st, &pkt);
if (error < 0) {
std::cout << "the video frame could not be written" << std::endl;
return -2;
}
}
return 0;
}
/**
finalize output video and cleanup
*/
int16_t MP4Writer::Finalize()
{
av_write_trailer(_formatCtx);
avcodec_free_context(&_ost.enc);
av_frame_free(&_ost.frame);
av_frame_free(&_ost.tmp_frame);
avio_closep(&_formatCtx->pb);
avformat_free_context(_formatCtx);
sws_freeContext(_ost.sws_ctx);
swr_free(&_ost.swr_ctx);
return 0;
}
使用
#include <FreeImage.h>
#include <MP4Writer.h>
#include <vector>
struct ImgData
{
unsigned int width;
unsigned int height;
std::string path;
FIBITMAP * mp4Input;
uint8_t * GetBits(FIBITMAP * bmp) { return FreeImage_GetBits(bmp); }
};
int main()
{
std::vector<ImgData> imgDataVec;
//load images and push to imgDataVec
MP4Writer mp4Writer;
mp4Writer.SetHeight(1200); //assumes 1:1 aspect ratio
mp4Writer.Init();
mp4Writer.SetOutput("test.mp4");
mp4Writer.Write(imgDataVec);
}
我懷疑我的C編譯器會拒絕這段代碼。 –
你是對的:這段代碼是C++ –