2012-06-25 65 views
0

我是gstreamer的新手,我想從3gp文件中獲取音頻和視頻兩種緩衝區,並在回調中執行一些處理。運行gstreamer管道(無法獲取回調中的視頻和音頻數據)

(我開始我的管道到一個單獨的線程,管道提供了在VideoCallback回調AudioCallback和視頻緩衝器音頻緩衝區。)

這是我管線的樣子:

GstElement* audioQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback) 
GstElement* videoQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback) 


//static functions 
static gboolean 
bus_call (GstBus* bus, GstMessage* msg, gpointer data) 
{ 
    GMainLoop* loop = (GMainLoop*) data; 

    switch (GST_MESSAGE_TYPE (msg)) 
    { 
    case GST_MESSAGE_EOS: 
     g_main_loop_quit (loop); 
     break; 

    case GST_MESSAGE_ERROR: { 
     gchar *debug; 
     GError *error; 

     gst_message_parse_error (msg, &error, &debug); 
     g_free (debug); 

     g_printerr ("Error: %s\n", error->message); 
     g_error_free (error); 

     g_main_loop_quit (loop); 
     break; 
    } 
    default: 
     break; 
    } 
    return true; 
} 

static void link_two_elements(GstElement* src_element, GstElement* sink_element) 
{ 
    if(!gst_element_link(src_element, sink_element)) 
     g_printerr ("Linking Error"); 

} 

static void 
on_pad_added (GstElement *element, 
       GstPad  *pad, 
       gpointer data) 
{ 
    GstCaps *caps; 
    GstStructure *str; 
    gchar *tex; 
    GstPad* sinkpad; 

    /* check media type */ 
    caps = gst_pad_get_caps (pad); 
    str = gst_caps_get_structure (caps, 0); 
    tex = (gchar*)gst_structure_get_name(str); 

    if(g_strrstr(tex,"audio")) 
    { 
    //GstElement *audioQueue = (GstElement *) data; 
    sinkpad = gst_element_get_static_pad (audioQueue, "sink"); 

    if(sinkpad) 
    { 
     GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad); 
     gst_object_unref (sinkpad); 
    } 
    } 

    if(g_strrstr(tex,"video")) 
    { 
    //GstElement *videoQueue = (GstElement *) data; 
    sinkpad = gst_element_get_static_pad (videoQueue, "sink"); 

    GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad); 
    gst_object_unref (sinkpad); 
    } 
} 

void runPipeline() 
    { 
    GMainLoop *loop; 
    GstElement *__pPipeline, *source, *demuxer, *audioDecoder, *audioConverter, *audioresample, /**audioQueue,*/ *audioSink, *videoDecoder, *videoSink, /**videoQueue,*/ *ffmpegcolorspace, *videoscale; 
    GstBus* bus; 

    //Initialisation 
    gst_init (null,null); 

    loop = g_main_loop_new (NULL, FALSE); 

    // Create gstreamer elements 
    __pPipeline = gst_pipeline_new("test_appsink"); 
    source = gst_element_factory_make ("filesrc", "file-source"); 
    demuxer = gst_element_factory_make("qtdemux", "demuxer"); 
    //audioDecoder = gst_element_factory_make("ffdec_mp3", "audioDecoder"); 
    audioDecoder = gst_element_factory_make("decodebin", "audioDecoder"); 
    audioConverter = gst_element_factory_make("audioconvert", "audioConverter"); 
    audioresample = gst_element_factory_make("audioresample", "audioresample"); 
    audioSink = gst_element_factory_make("appsink", "audioSink"); 
    audioQueue = gst_element_factory_make("queue2", "audioQueue"); 
    //videoDecoder = gst_element_factory_make("ffdec_h264", "videoDecoder"); 
    videoQueue = gst_element_factory_make("queue2", "videoQueue"); 
    videoDecoder = gst_element_factory_make("decodebin ", "videoDecoder"); 
    ffmpegcolorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace"); 
    videoscale = gst_element_factory_make("videoscale", "videoscale"); 
    videoSink = gst_element_factory_make("appsink", "videoSink"); 
    //appsink = gst_element_factory_make("appsink", "sink-buffer"); 

    if (!__pPipeline || !source || !demuxer || !audioDecoder || !audioConverter ||!audioresample || !audioSink || !videoSink || !audioQueue || !videoQueue || !videoDecoder || !ffmpegcolorspace || !videoscale) 
    { 
     //return -1; 
    } 

    //we set the input filename to the source element 
    g_object_set (G_OBJECT (source), "location", "/etc/20000101-161404.3gp", NULL); 

    //Make appsink emit the "new-preroll" and "new-buffer" signals. 
    gst_app_sink_set_emit_signals ((GstAppSink*) audioSink, TRUE); 
    gst_app_sink_set_emit_signals ((GstAppSink*) videoSink, TRUE); 

    //we add a message handler 
    bus = gst_pipeline_get_bus (GST_PIPELINE (__pPipeline)); 
    gst_bus_add_watch (bus, bus_call, loop); 
    gst_object_unref (bus); 

    //we add all elements into the pipeline 
    gst_bin_add_many (GST_BIN (__pPipeline), 
        source, demuxer, videoDecoder, audioDecoder, audioConverter, audioresample, audioSink, videoSink, 
        audioQueue, videoQueue, ffmpegcolorspace, videoscale, NULL); 


    //link source and demuxer seperately 
    link_two_elements(source, demuxer); 

    //link rest of the elements 
    int retValVideoLinking = (int)gst_element_link_many (videoQueue, videoDecoder, ffmpegcolorspace, videoscale, videoSink, NULL); 
    int retValAudioLinking = (int)gst_element_link_many (audioQueue, audioDecoder, audioConverter, audioresample, audioSink, NULL); 

    gulong sigConRet = g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), null); 

    _ArAudioIn audioInstance = _ArAudioIn::GetArAudioInstance(); 

    g_signal_connect (videoSink, "new-buffer", G_CALLBACK (AudioCallback), null);//AudioCallback static API 
    g_signal_connect (audioSink, "new-buffer", G_CALLBACK (VideoCallback), null);//VideoCallback static API 

    //Set the pipeline to "playing" state 
    GstStateChangeReturn state = gst_element_set_state (__pPipeline, GST_STATE_PLAYING); 

    g_main_loop_run (loop); 
    return null; 
    } 


I'm just getting a single video buffer in my Videocallback and also in the on_pad_addded : I'm getting a linking err for audio pad linking. 
GST_PAD_LINK_NOFORMAT   = -4, 

I'm trying to link the queue's sink pad to the pad recieved in on_pad_added, same is working for video but not for audio. 

If anybody has any idea about this then please give me some pointers to get rid off this err and make this pipeline work. 
+0

這將是很好,如果你清理你的代碼之前要求我們調試它。作爲一般建議,請檢查返回值並記錄警告或退出(1)以確保管道設置正常工作。我也開始使用普通的視頻和音頻連接來檢查它是否播放。 – ensonic

回答

0

它如果你在要求我們調試之前清理你的代碼會很好。作爲一般建議,檢查返回值並記錄警告或簡單地退出(1)以確保管道設置工作(例如,在pad_added處理程序中)。我也開始使用普通的視頻和音頻連接來檢查它是否播放。

最後,從pipleine中提取數據通常是一個壞主意。也許你可以在回調中告訴你想要做什麼,這樣我們可以給出更好的建議。

相關問題