2014-05-09 76 views
2

我使用Dranger tutorial01(ffmpeg)來解碼視頻並獲取AVI幀。我想用OpenGL來顯示視頻。在opgel紋理上顯示ffmpeg幀

http://dranger.com/ffmpeg/tutorial01.html

主要功能如下:

int main (int argc, char** argv) { 
// opengl stuff 
glutInit(&argc, argv); 
glutInitDisplayMode(GLUT_RGBA); 
glutInitWindowSize(800, 600); 
glutCreateWindow("Hello GL"); 

glutReshapeFunc(changeViewport); 
glutDisplayFunc(render); 

GLenum err = glewInit(); 
if(GLEW_OK !=err){ 
    fprintf(stderr, "GLEW error"); 
    return 1; 
} 

glClear(GL_COLOR_BUFFER_BIT); 


glEnable(GL_TEXTURE_2D); 
GLuint texture; 
glGenTextures(1, &texture); //Make room for our texture 
glBindTexture(GL_TEXTURE_2D, texture); 

//ffmpeg stuff 

AVFormatContext *pFormatCtx = NULL; 
int    i, videoStream; 
AVCodecContext *pCodecCtx = NULL; 
AVCodec   *pCodec = NULL; 
AVFrame   *pFrame = NULL; 
AVFrame   *pFrameRGB = NULL; 
AVPacket  packet; 
int    frameFinished; 
int    numBytes; 
uint8_t   *buffer = NULL; 

AVDictionary *optionsDict = NULL; 


if(argc < 2) { 
printf("Please provide a movie file\n"); 
return -1; 
} 
// Register all formats and codecs 

av_register_all(); 

// Open video file 
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 
    return -1; // Couldn't open file 

// Retrieve stream information 

if(avformat_find_stream_info(pFormatCtx, NULL)<0) 
return -1; // Couldn't find stream information 

// Dump information about file onto standard error 
av_dump_format(pFormatCtx, 0, argv[1], 0); 

// Find the first video stream 

videoStream=-1; 
for(i=0; i<pFormatCtx->nb_streams; i++) 
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 
    videoStream=i; 
    break; 
} 
if(videoStream==-1) 
return -1; // Didn't find a video stream 

// Get a pointer to the codec context for the video stream 
pCodecCtx=pFormatCtx->streams[videoStream]->codec; 

// Find the decoder for the video stream 
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 
if(pCodec==NULL) { 
    fprintf(stderr, "Unsupported codec!\n"); 
    return -1; // Codec not found 
} 
// Open codec 
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) 
    return -1; // Could not open codec 

// Allocate video frame 
pFrame=av_frame_alloc(); 

// Allocate an AVFrame structure 
pFrameRGB=av_frame_alloc(); 
if(pFrameRGB==NULL) 
return -1; 

// Determine required buffer size and allocate buffer 
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 
       pCodecCtx->height); 
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 

struct SwsContext  *sws_ctx = sws_getContext(pCodecCtx->width, 
      pCodecCtx->height, pCodecCtx->pix_fmt, 800, 
      600, PIX_FMT_RGB24, SWS_BICUBIC, NULL, 
      NULL, NULL); 


// Assign appropriate parts of buffer to image planes in pFrameRGB 
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset 
// of AVPicture 
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 
    pCodecCtx->width, pCodecCtx->height); 

// Read frames and save first five frames to disk 
i=0; 
while(av_read_frame(pFormatCtx, &packet)>=0) { 


// Is this a packet from the video stream? 
if(packet.stream_index==videoStream) { 
    // Decode video frame 
    avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
      &packet); 

    // Did we get a video frame? 
    if(frameFinished) { 
// Convert the image from its native format to RGB 
    /* sws_scale 
    (
     sws_ctx, 
     (uint8_t const * const *)pFrame->data, 
     pFrame->linesize, 
     0, 
     pCodecCtx->height, 
     pFrameRGB->data, 
     pFrameRGB->linesize 
    ); 
    */ 
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); 
    // additional opengl 
    glBindTexture(GL_TEXTURE_2D, texture); 

     //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]); 
    // glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); 

     glTexImage2D(GL_TEXTURE_2D,    //Always GL_TEXTURE_2D 
      0,       //0 for now 
      GL_RGB,      //Format OpenGL uses for image 
      pCodecCtx->width, pCodecCtx->height, //Width and height 
      0,       //The border of the image 
      GL_RGB, //GL_RGB, because pixels are stored in RGB format 
      GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored 
          //as unsigned numbers 
      pFrameRGB->data[0]);    //The actual pixel data 
    // additional opengl end 

// Save the frame to disk 
if(++i<=5) 
    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
     i); 
    } 
} 

glColor3f(1,1,1); 
glBindTexture(GL_TEXTURE_2D, texture); 
glBegin(GL_QUADS); 
    glTexCoord2f(0,1); 
    glVertex3f(0,0,0); 

    glTexCoord2f(1,1); 
    glVertex3f(pCodecCtx->width,0,0); 

    glTexCoord2f(1,0); 
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0); 

    glTexCoord2f(0,0); 
    glVertex3f(0,pCodecCtx->height,0); 

glEnd(); 
// Free the packet that was allocated by av_read_frame 
av_free_packet(&packet); 
} 


    // Free the RGB image 
av_free(buffer); 
av_free(pFrameRGB); 

// Free the YUV frame 
av_free(pFrame); 

// Close the codec 
avcodec_close(pCodecCtx); 

// Close the video file 
avformat_close_input(&pFormatCtx); 

return 0; 
} 

不幸的是我無法找到我的解決方案在這裏

ffmpeg video to opengl texture

程序編譯,但不上顯示任何視頻紋理。僅創建一個OpenGL窗口。

回答

3

一個問題是您使用單個緩衝像素格式。大多數現代操作系統使用依賴於雙緩衝像素格式的窗口組合。易足以改變:

--- glutInitDisplayMode(GLUT_RGBA); 
+++ glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); 

render函數調用glutSwapBuffers()結束。

另一個問題是,你永遠不會輸入glutMainLoop因此沒有事件(如來自操作系統的繪圖請求)得到處理。您的代碼的某些部分也必須進入渲染功能。

幀解碼和紋理上傳必須放置在任一種idle處理程序(未創建一個),通過glutPostRedisplay()呼叫或直接在render功能如下:

void render(void) { 
/* ... */ 

--- while(av_read_frame(pFormatCtx, &packet)>=0) { 
+++ if(av_read_frame(pFormatCtx, &packet)>=0) { 


// Is this a packet from the video stream? 
if(packet.stream_index==videoStream) { 
    // Decode video frame 
    avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 

    // Did we get a video frame? 
    if(frameFinished) { 
    // Convert the image from its native format to RGB 
    /* sws_scale 
    (
     sws_ctx, 
     (uint8_t const * const *)pFrame->data, 
     pFrame->linesize, 
     0, 
     pCodecCtx->height, 
     pFrameRGB->data, 
     pFrameRGB->linesize 
    ); 
    */ 
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); 
    // additional opengl 
    glBindTexture(GL_TEXTURE_2D, texture); 

此時你應該使用glTexSubImage2D而不是glTexImage2D,因爲它速度更快。但是,您必須先使用glTexImage2D創建紋理;在撥打glutMainLoop()之前先做一次。

glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); 

/* 
     glTexImage2D(GL_TEXTURE_2D,    //Always GL_TEXTURE_2D 
      0,         //0 for now 
      GL_RGB,        //Format OpenGL uses for image 
      pCodecCtx->width, pCodecCtx->height, //Width and height 
      0,         //The border of the image 
      GL_RGB,        //GL_RGB, because pixels are stored 
               //in RGB format 
      GL_UNSIGNED_BYTE,     //GL_UNSIGNED_BYTE, because pixels are 
               //stored as unsigned numbers 
      pFrameRGB->data[0]);     //The actual pixel data 
*/ 
    // additional opengl end 
} 

glColor3f(1,1,1); 
glBindTexture(GL_TEXTURE_2D, texture); 
glBegin(GL_QUADS); 
    glTexCoord2f(0,1); 
    glVertex3f(0,0,0); 

    glTexCoord2f(1,1); 
    glVertex3f(pCodecCtx->width,0,0); 

    glTexCoord2f(1,0); 
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0); 

    glTexCoord2f(0,0); 
    glVertex3f(0,pCodecCtx->height,0); 

glEnd(); 
// Free the packet that was allocated by av_read_frame 
av_free_packet(&packet); 
} 

/* ... */ 

glutSwapBuffers(); 

}