2017-06-20 51 views
0

有一個來自http://www.delphiffmpeg.com的視頻編碼實例 - 需要將一組TBitmaps轉換爲YCbCr(YUV),我們應該怎麼做?該示例包含虛擬顏色:Delphi,TBitmap(rgb)轉換爲YCbCr顏色格式

(* encode 1 second of video *) 
    idx := 1; 
    for i := 0 to 25 - 1 do 
    begin 
    av_init_packet(@pkt); 
    pkt.data := nil; // packet data will be allocated by the encoder 
    pkt.size := 0; 

    //fflush(stdout); 
    (* prepare a dummy image *) 
    (* Y *) 
    for y := 0 to c.height - 1 do 
     for x := 0 to c.width - 1 do 
     PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3; 

    (* Cb and Cr *) 
    for y := 0 to c.height div 2 - 1 do 
     for x := 0 to c.width div 2 - 1 do 
     begin 
     PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2; 
     PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5; 
     end; 

    frame.pts := i; 

    (* encode the image *) 
    ret := avcodec_encode_video2(c, @pkt, frame, @got_output); 
    if ret < 0 then 
    begin 
     Writeln(ErrOutput, 'Error encoding frame'); 
     ExitCode := 1; 
     Exit; 
    end; 

    if got_output <> 0 then 
    begin 
     Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size])); 
     FileWrite(f, pkt.data^, pkt.size); 
     av_packet_unref(@pkt); 
     Inc(idx); 
    end; 
    end; 

但是,我們需要的位圖轉換爲YCbCr..instead與虛擬圖像填充像素。下面是一個完整的源代碼:

(* 
* Video encoding example 
*) 
procedure video_encode_example(const filename: string; codec_id: TAVCodecID); 
const 
    endcode: array[0..3] of Byte = (0, 0, 1, $b7); 
var 
    codec: PAVCodec; 
    c: PAVCodecContext; 
    idx, i, ret, x, y, got_output: Integer; 
    f: THandle; 
    frame: PAVFrame; 
    pkt: TAVPacket; 
begin 
    Writeln(Format('Encode video file %s', [filename])); 

    (* find the mpeg1 video encoder *) 
    codec := avcodec_find_encoder(codec_id); 
    if not Assigned(codec) then 
    begin 
    Writeln(ErrOutput, 'Codec not found'); 
    ExitCode := 1; 
    Exit; 
    end; 

    c := avcodec_alloc_context3(codec); 
    if not Assigned(c) then 
    begin 
    Writeln(ErrOutput, 'Could not allocate video codec context'); 
    ExitCode := 1; 
    Exit; 
    end; 

    (* put sample parameters *) 
    c.bit_rate := 400000; 
    (* resolution must be a multiple of two *) 
    c.width := 352; 
    c.height := 288; 
    (* frames per second *) 
    c.time_base.num := 1; 
    c.time_base.den := 25; 
    (* emit one intra frame every ten frames 
    * check frame pict_type before passing frame 
    * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I 
    * then gop_size is ignored and the output of encoder 
    * will always be I frame irrespective to gop_size 
    *) 
    c.gop_size := 10; 
    c.max_b_frames := 1; 
    c.pix_fmt := AV_PIX_FMT_YUV420P; 

    if codec_id = AV_CODEC_ID_H264 then 
    av_opt_set(c.priv_data, 'preset', 'slow', 0); 

    (* open it *) 
    if avcodec_open2(c, codec, nil) < 0 then 
    begin 
    Writeln(ErrOutput, 'Could not open codec'); 
    ExitCode := 1; 
    Exit; 
    end; 

    f := FileCreate(filename); 
    if f = INVALID_HANDLE_VALUE then 
    begin 
    Writeln(ErrOutput, Format('Could not open %s', [filename])); 
    ExitCode := 1; 
    Exit; 
    end; 

    frame := av_frame_alloc(); 
    if not Assigned(frame) then 
    begin 
    Writeln(ErrOutput, 'Could not allocate video frame'); 
    ExitCode := 1; 
    Exit; 
    end; 
    frame.format := Ord(c.pix_fmt); 
    frame.width := c.width; 
    frame.height := c.height; 

    (* the image can be allocated by any means and av_image_alloc() is 
    * just the most convenient way if av_malloc() is to be used *) 
    ret := av_image_alloc(@frame.data[0], @frame.linesize[0], c.width, c.height, 
         c.pix_fmt, 32); 
    if ret < 0 then 
    begin 
    Writeln(ErrOutput, 'Could not allocate raw picture buffer'); 
    ExitCode := 1; 
    Exit; 
    end; 

    (* encode 1 second of video *) 
    idx := 1; 
    for i := 0 to 25 - 1 do 
    begin 
    av_init_packet(@pkt); 
    pkt.data := nil; // packet data will be allocated by the encoder 
    pkt.size := 0; 

    //fflush(stdout); 
    (* prepare a dummy image *) 
    (* Y *) 
    for y := 0 to c.height - 1 do 
     for x := 0 to c.width - 1 do 
     PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3; 

    (* Cb and Cr *) 
    for y := 0 to c.height div 2 - 1 do 
     for x := 0 to c.width div 2 - 1 do 
     begin 
     PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2; 
     PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5; 
     end; 

    frame.pts := i; 

    (* encode the image *) 
    ret := avcodec_encode_video2(c, @pkt, frame, @got_output); 
    if ret < 0 then 
    begin 
     Writeln(ErrOutput, 'Error encoding frame'); 
     ExitCode := 1; 
     Exit; 
    end; 

    if got_output <> 0 then 
    begin 
     Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size])); 
     FileWrite(f, pkt.data^, pkt.size); 
     av_packet_unref(@pkt); 
     Inc(idx); 
    end; 
    end; 

    (* get the delayed frames *) 
    repeat 
    //fflush(stdout); 

    ret := avcodec_encode_video2(c, @pkt, nil, @got_output); 
    if ret < 0 then 
    begin 
     Writeln(ErrOutput, 'Error encoding frame'); 
     ExitCode := 1; 
     Exit; 
    end; 

    if got_output <> 0 then 
    begin 
     Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size])); 
     FileWrite(f, pkt.data^, pkt.size); 
     av_packet_unref(@pkt); 
     Inc(idx); 
    end; 
    until got_output = 0; 

    (* add sequence end code to have a real mpeg file *) 
    FileWrite(f, endcode[0], SizeOf(endcode)); 
    FileClose(f); 

    avcodec_close(c); 
    av_free(c); 
    av_freep(@frame.data[0]); 
    av_frame_free(@frame); 
    Writeln(''); 
end; 

是的,我們知道這個公式,但我們應該怎樣做(* Cb和Cr *)循環,上升到c.height DIV 2 - 1和c.width DIV 2 - 1?我們所有的實驗做出正確的圖像幾何形狀,但是不正確的顏色......這裏是我們所擁有的:

(Y) 
    for y := 0 to c.height - 1 do 
    begin 
     Line := image.ScanLine[y]; 
     for x := 0 to c.width - 1 do 
     begin 
     Yy := Round(Line[x].R*0.29900 + Line[x].G*0.58700 + Line[x].B*0.11400); 
     PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := Yy; 
     end; 
    end; 
    (Cb and Cr) 
    for y := 0 to c.height div 2 - 1 do 
    begin 
     Pixels := image.ScanLine[y]; 
     for x := 0 to c.width div 2 - 1 do 
     begin 
     Cb := Round(Line[x].R -0.16874 - Line[x].G 0.33126 + Line[x].B * 0.50000) + 128; 
     Cr := Round(Line[x].R 0.50000 - Line[x].G 0.41869 - Line[x].B * 0.08131) + 64; 
     PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := Cr; 
     PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := Cb; 
     //PByte(@PAnsiChar(frame.data[1])[y frame.linesize[1] + x])^ := 128 + y + i 2; 
     //PByte(@PAnsiChar(frame.data[2])[y frame.linesize[2] + x])^ := 64 + x + i 5; 
     end; 
    end; 

這個應該怎麼定?

+0

什麼問題? –

+0

對不起,查看主要問題更新 –

回答

2

不難找到公式RGB-YCbCr轉換:

Y = R * 0.29900 + G * 0.58700 + B * 0.11400 
Cb = R * -0.16874 + G * -0.33126 + B * 0.50000 + 128 
Cr = R * 0.50000 + G * -0.41869 + B * -0.08131 + 128 

這是準備在代碼實現。使用Scanline屬性可快速訪問每個像素的R,G,B分量

+0

好的,謝謝,看主要問題更新了 –

+0

你必須考慮所需的FourCC格式,它定義了YCbCr組件的打包。例如,4:2:2格式(YUV2)包含'Y0 Cb Y1 Cr'塊 – MBo