gstreamer code for playing avi file is hanging - gstreamer

I am new to gstremaer. I have written a code for playing avi file using gstreamer. But on executing the code it just hangs after a while, I am unable to debug whats the problem, Can someone help me please.
The code and the output is as below:
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[])
{
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(pipeline);
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source),"location",argv[1],NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || !video_convertor || !audio_sink || !video_sink )
{ g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline,GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline,GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data)
{
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");
audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad *videoconvertsink;
GstPad *audioconvertsink ;
g_print(" In dynamic_decodepad ADDING PAD\n");
videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
gst_pad_link(pad,videoconvertsink);
audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
gst_pad_link(pad,audioconvertsink );
g_print(" In dynamic_decodepad ADDING PAD2\n");
}
Output:
In playing state
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 8192
default
In process message msg->type : 8192
default
In dynamic ADDING PAD
In dynamic ADDING PAD2
In dynamic ADDING PAD
In dynamic ADDING PAD2
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 16
GST_MESSAGE_TAG
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In dynamic_decodepad ADDING PAD
In dynamic_decodepad ADDING PAD2
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
In process message msg->type : 64
GST_MESSAGE_STATE_CHANGED
It hangs at this point.
Any help is appreciated.
Thanks in advance.

Your code is wrong in several ways, that is why my answer is so long.
First of all, gst_pipeline_new returns GstElement* not GstPipeline*:
- pipeline = gst_pipeline_new("PIPELINE");
+ GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
- bus = gst_pipeline_get_bus(pipeline);
+ bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
Then, your pipeline is wrong: you trying to decode both streams (audio and video) with one decodebin but you need two. Create it and don't forget to add it to the bin:
videoqueue = gst_element_factory_make("queue","Queue for video");
+ audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
- gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
+ gst_bin_add_many(
+ Bin,
+ demuxer,
+ audioqueue,videoqueue,
+ audio_decoder,audio_convertor,
+ video_decoder,video_convertor,
+ audio_sink,video_sink,
+ NULL);
And, by the way, it's better to use decodebin2 as decodebin is deprecated.
Then you linking some elements dynamically: demuxer to queue and decodebin to convertors. Hence you should not create link between decodebin and convertors with gst_element_link_many:
gst_element_link(source,demuxer);
- gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
- gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
+ gst_element_link_many(audioqueue,audio_decoder,NULL);
+ gst_element_link_many(audio_convertor,audio_sink,NULL);
+ gst_element_link_many(videoqueue,video_decoder,NULL);
+ gst_element_link_many(video_convertor,video_sink,NULL);
And of course, as we added audio_decoder decodebin, we need to handle it's pad creation signal:
+ g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
And now we are at the most interesting part.
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *audiodemuxsink;
GstPad *videodemuxsink;
GstElement *decoder = (GstElement *)data;
g_print(" In dynamic ADDING PAD\n");
audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
g_print(" In dynamic ADDING PAD2\n");
}
This is completely wrong! dynamic_addpad is called on each pad creation. avidemux commonly creates two pads (one for each data stream): "audio_00" and "video_00". So, dynamic_addpad will be called twice and we need to distinguish what to link depending on pad name:
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
Almost the same is for dynamic_decodepad. As it's only one src pad is created by decodebin, it will be easier to create separate handlers for video_decoder and audio_decoder.
But for pedagogical reasons I will do it in one function. Now we can distinguish which element to connect to pad by it's caps.
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
gst_pad_can_link will not work in dynamic_addpath because it's possible to connect query element both to "audio_00" and "video_00".
That's it. Don't hesitate to ask if you have other questions.

Related

How can I fix "Internal data stream error" generated by the Gstreamer appsrc element?

I am trying to use appsrc element of Gstreamer on a trivial example. I am creating a buffer, filling it with dummy data and trying to send it to a fakesink. The code is a watered down version of the tutorial given in link below. It only has two elements, appsrc and fakesink. My code is also given below.
When I run this code I get "Error: Internal data stream error." I have searched for a solution and as far as I can tell, the issue is attributed to mismatch of caps between elements. I don't think this is the issue here since I only have two elements.
I have tried setting "caps" property of appsrc to NULL here, but I have also tried passing the proper "caps" property which was "audio/G729." Both have failed. Also it seems like the appsrc is fine for the first 4 chunks but then it generates an error. It is always after 4 Chunks. Not sure if that is a clue.
Also, I am running the code on an embedded system (ARM Cortex-A15) but I don't think that is related. I can succesfully stream a G729 encoded Audio file on this system via following command:
gst-launch-1.0 -v filesrc location=encodedData.g729 ! 'audio/G729' ! rtpg729pay ! udpsink host=192.168.XX.XX auto-multicast=true port=5004
What could be the reason behind this error? How can I fix this?
Thanks for all the responses.
Link: Link to Tutorial
Code:
#include <gst/gst.h>
#include <glib.h>
#include "glibconfig.h"
#include <stdio.h>
#define CHUNK_SIZE 10
typedef struct gstreamstruct {
GstElement *pipeline, *app_source, *fakesink;
guint sourceid; /* To control the GSource */
GMainLoop *main_loop; /* GLib's Main Loop */
guint sample;
} gstreamstruct;
static gboolean busCall (GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static gboolean pushData (gstreamstruct *streamer)
{
printf("--->PushData!\n");
GstMapInfo map;
GstBuffer *buffer;
GstFlowReturn ret;
guint8 *raw;
int i;
/* Create a new empty buffer */
buffer = gst_buffer_new_and_alloc (CHUNK_SIZE);
/* Set its timestamp and duration */
GST_BUFFER_TIMESTAMP (buffer) = gst_util_uint64_scale (streamer->sample, GST_SECOND, 1000);
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale (CHUNK_SIZE, GST_SECOND, 1000);
//Put some dummy into buffer
gst_buffer_map (buffer, &map, GST_MAP_WRITE);
raw = (guint8 *)map.data;
for(i = 0; i<CHUNK_SIZE; i++)
{
raw[0] = 0;
}
//update sample value
streamer->sample += CHUNK_SIZE;
printf("currentDuration: %d ms\n", streamer->sample);
gst_buffer_unmap (buffer, &map);
/* Push the buffer into the appsrc */
g_signal_emit_by_name (streamer->app_source, "push-buffer", buffer, &ret);
/* Free the buffer now that we are done with it */
gst_buffer_unref (buffer);
if (ret != GST_FLOW_OK)
{
/* We got some error, stop sending data */
printf("Data sending Failed!\n");
return FALSE;
}
return TRUE;
}
/* This signal callback triggers when appsrc needs data.
* Here, we add an idle handler to the mainloop to start pushing data into the appsrc
*
* Whenever Gstreamer goes idle, it will call this function. Maybe we can utilize this for
* G729 etc!
*
* */
static void startFeed (GstElement *source, guint size, gstreamstruct *streamer)
{
if (streamer->sourceid == 0)
{
g_print ("Start feeding\n");
streamer->sourceid = g_idle_add ((GSourceFunc) pushData, streamer);
}
}
/* This callback triggers when appsrc has enough data and we can stop sending.
* We remove the idle handler from the mainloop */
static void stopFeed (GstElement *source, gstreamstruct *streamer)
{
if (streamer->sourceid != 0)
{
g_print ("Stop feeding\n");
g_source_remove (streamer->sourceid);
streamer->sourceid = 0;
}
}
void appSrcTest (void* args)
{
printf("---> appSrcTest\n");
gstreamstruct my_streamer;
GstCaps *caps;
GstBus *bus;
//GstPad *pad;
guint bus_watch_id;
memset (&my_streamer, 0, sizeof (gstreamstruct));
gst_init (NULL, NULL);
my_streamer.main_loop = g_main_loop_new (NULL, FALSE);
printf("Gst Initialized!\n");
my_streamer.sample = 0;
my_streamer.app_source = gst_element_factory_make("appsrc", "appSrc");
my_streamer.fakesink = gst_element_factory_make("fakesink", "fakeSink");
my_streamer.pipeline = gst_pipeline_new ("g729-pipeline");
if(!my_streamer.app_source || !my_streamer.fakesink || !my_streamer.pipeline)
{
g_printerr ("Not all elements could be created.\n");
return;
}
printf("Elements Created!\n");
caps=NULL;
/*
caps = gst_caps_new_simple ("audio/G729",
"channels", G_TYPE_INT, 1,
"rate", G_TYPE_INT, 8000,
NULL);
*/
//g_object_set (G_OBJECT(my_streamer.app_source), "caps", caps, "format", GST_FORMAT_TIME, NULL);
g_signal_connect (my_streamer.app_source, "need-data", G_CALLBACK (startFeed), &my_streamer);
g_signal_connect (my_streamer.app_source, "enough-data", G_CALLBACK (stopFeed), &my_streamer);
printf("Properties Set!\n");
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (my_streamer.pipeline));
bus_watch_id = gst_bus_add_watch (bus, busCall, my_streamer.main_loop);
gst_object_unref (bus);
gst_bin_add_many (GST_BIN (my_streamer.pipeline), my_streamer.app_source, my_streamer.fakesink, NULL);
printf("Elements Added!\n");
printf("Pipeline Starting!\n");
gst_element_set_state (my_streamer.pipeline, GST_STATE_PLAYING);
g_main_loop_run (my_streamer.main_loop);
gst_element_set_state (my_streamer.pipeline, GST_STATE_NULL);
gst_object_unref (my_streamer.pipeline);
g_source_remove (bus_watch_id);
g_main_loop_unref (my_streamer.main_loop);
}
The output from this code is generated as:
Gst Initialized!
Elements Created!
Properties Set!
Elements Added!
Pipeline Starting!
Start feeding
--->PushData!
currentDuration: 10 ms
--->PushData!
currentDuration: 20 ms
--->PushData!
currentDuration: 30 ms
--->PushData!
currentDuration: 40 ms
Error: Internal data stream error.
Edit: After more trials I have realized that the error is not generated after 4 Chunks consistently. When I reboot the system and call the function the error is generated after 156 Chunks for instance. After a couple more tries, the error starts occur much sooner (like 4 Chunks.) Also I have tried running the code with GST_DEBUG=2 but could not really find anything useful. Below you can find the DEBUG output.
DEBUG:
---> appSrcTest
Gst Initialized!
Elements Created!
Properties Set!
Elements Added!
Pipeline Starting!
Start feeding
--->PushData!
currentDuration: 10 ms
--->PushData!
currentDuration: 20 ms
--->PushData!
currentDuration: 30 ms
--->PushData!
currentDuration: 40 ms
--->PushData!
0:00:00.084280528 1344 0x18fa00 WARN basesrc gstbasesrc.c:3055:gst_base_src_loop:<appSrc> error: Internal data stream error.
currentDuration: 50 ms
--->PushData!
0:00:00.084342504 1344 0x18fa00 WARN basesrc gstbasesrc.c:3055:gst_base_src_loop:<appSrc> error: streaming stopped, reason not-linked (-1)
currentDuration: 60 ms
--->PushData!
currentDuration: 70 ms
Error: Internal data stream error.
Edit 2: After further debugging I have realized that the fakesink element was not linked to appsrc. So I manually linked them via the following line
gst_element_link_pads (my_streamer.app_source, "src", my_streamer.fakesink, "sink");
I think it works fine now, I will come back again after I verify it completely.
Edit 3: Yeah, I can confirm that was the issue. I forgot to link the elements.
I forgot to link the elements. The following line solves the issue.
gst_element_link_pads (my_streamer.app_source, "src", my_streamer.fakesink, "sink");
Don't know how, but this thing worked for me:
sudo apt remove gstreamer1.0-vaapi

pad creation and some mistakes on gst_parse_launch

I am a newbie to gstreamer and I would like to know if we have to create source and sink pads for convert like video convert in a pipeline. I have a pipeline like this
gst-launch-1.0 v4l2src ! video/x-raw,format=YUY2 ! videoconvert ! xvimagesink
I am trying to create a simple c application to understand the creation of pads and would like to know if video convert has a source pad and sink pad too. I am creating a source and sink pad for the filter.
EDIT:
yeah well you see, I tried following the dynamic pipelines example and wrote the code below
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
g_print("In pad handler\n");
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
if(!g_str_has_prefix(new_pad_type, "video/x-raw,format=(string)YUY2"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link( data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_print("before signal connect\n");
gint id= g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
g_print("after signal connect with id = %d\n", id);
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
// gst_element_link(data.source, data.convert);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and it gave me an error
before signal connect
after signal connect with id = 1
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
(The above code works if I write gst_elements_link(data.source, data.convert) after the element link statement for convert and sink)
So I tried the normal way in which I just added and linked all the elements together and it began to work without use of the pads.
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *convert, *sink;
GstBus *bus;
GstMessage *msg;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
convert = gst_element_factory_make("nvvidconv", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source,convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
But, inorder to fully grasp the knowledge of pads, I wanted to execute simpler pipelines with pads.
I just don't fully understand the usage of pads and link them and all.
EDIT2:
Ultimately I want to write application for pipeline like this which works on command line perfectly well,
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
But as I don't understand the usage of both pads and bins, I am unable to implement them in my above pipeline. However, I tried this,
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include "gstnvdsmeta.h"
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
"Roadsign"
};
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
GstBuffer *buf=(GstBuffer *)info->data;
guint num_rects =0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
vehicle_count++;
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
}
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
g_print ("Frame Number = %d Number of objects = %d "
"Vehicle Count = %d Person Count = %d\n",
frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *filter1, *convert,*filter2, *filter3, *vidconv, *filter4, *mux, *infer, *tiler, *osd, *transform , *sink, *bin, *convert2 , *vidconv2;
GMainLoop *loop;
GstCaps *caps1, *caps2, *caps3, *caps4;
GstPad *osd_sink_pad =NULL, *srcpad, *sinkpad;
loop = g_main_loop_new(NULL,FALSE);
gst_init(&argc, &argv);
pipeline = gst_pipeline_new("nv_pipeline");
gchar *string1 = "video/x-raw(memory:NVMM),format=(string)NV12";
source = gst_element_factory_make("v4l2src", "source");
filter1 = gst_element_factory_make("capsfilter", "filter1");
convert = gst_element_factory_make("nvvidconv", "convert");
filter2 = gst_element_factory_make("capsfilter", "filter2");
filter3 = gst_element_factory_make("capsfilter", "filter3");
filter4 = gst_element_factory_make("capsfilter", "filter4");
vidconv = gst_element_factory_make("nvvideoconvert", "vidconv");
mux = gst_element_factory_make("nvstreammux", "mux");
infer = gst_element_factory_make("nvinfer", "infer");
tiler = gst_element_factory_make("nvmultistreamtiler", "tiler");
osd = gst_element_factory_make("nvosd", "osd");
transform = gst_element_factory_make("nvegltransform", "transform");
sink = gst_element_factory_make("nveglglessink", "sink");
convert2 = gst_element_factory_make("nvvidconv", "convert2");
vidconv2 = gst_element_factory_make("nvvideoconvert", "vidconv2");
gst_bin_add_many(GST_BIN(pipeline), source,filter1,convert,filter2, convert2,filter3,vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
gst_element_link_many(source,filter1,convert,filter2, convert2,filter3, vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
osd_sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
caps1 = gst_caps_new_simple("video/x-raw", "format",G_TYPE_STRING,"YUY2",NULL);
caps2 = gst_caps_from_string(string1);
caps3 = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", NULL);
caps4 = gst_caps_from_string(string1);
g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
g_object_set(G_OBJECT(mux), "live-source", 1, "name", "mux", "batch-size", 1, "width", 1280, "height", 720, NULL);
g_object_set(G_OBJECT(infer), "config-file-path","/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt",NULL);
g_object_set(G_OBJECT(infer), "batch-size", 1, NULL);
g_object_set(G_OBJECT(tiler), "rows", 1, "columns", 1, "width", 1280, "height", 720, NULL);
gst_caps_unref(caps1);
gst_caps_unref(caps2);
gst_caps_unref(caps3);
gst_caps_unref(caps4);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running ...\n");
g_main_loop_run(loop);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
which gives the exact same output as the command line gst-launch-1.0 like,
(deep_pads:15648): GLib-GObject-WARNING **: 11:29:18.761: cannot register existing type 'GstInterpolationMethod'
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: g_param_spec_enum: assertion 'G_TYPE_IS_ENUM (enum_type)' failed
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: validate_pspec_to_install: assertion 'G_IS_PARAM_SPEC (pspec)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_pad_add_probe: assertion 'GST_IS_PAD (pad)' failed
0:00:00.843318172 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:20.693301580 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_int8.engine
except It doesn't display an output window from the above c application and doesn't display the rest.
Your first example fails because data.source element is actually never linked to data.convert element. Since both elements have static pads you need to "manually" create them and link them before setting pipeline to GST_STATE_PLAYING:
GstPad *source_pad = gst_element_get_static_pad(data.source, "src");
GstPad *sink_pad = gst_element_get_static_pad(data.convert, "sink");
ret = gst_pad_link(source_pad, sink_pad);
You probably expected that static source pad of your data.source element will somehow be automatically created so you registered
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
But, as you can see from your debug, pad_added_handler was never called because g_signal_connect can be registered and will be called for elements that have dynamic pads. For example, demultiplexer tsdemux will dynamically create its source pads during discovery of elementary streams so, in that case, registration of pad-added callback would be necessary.
The first step in your learning curve would be to understand fundamental differences between "static" (mandatory, always exists, "manually" created), dynamic (exists sometimes, automatically created by element) and request (optional, "manually" created when needed) gstreamer pads. After that everything will be much easier for you.

How to listen to oggdemux gstreamer failures?

I wrote a gstreamer app to convert from opus audio to raw audio. If I feed bad audio (just random bytes) to the pipeline, the pipeline gets stuck and /i don't receive an error message on the message bus.
I'm listening to the error messages flowing through the pipeline, but not getting an error code to indicate the failure. The gstreamer debug logs indicate the demux failed though, I can see the following in the logs:
0:00:00.021614679 22541 0xe5b190 WARN oggdemux gstoggdemux.c:4609:gst_ogg_demux_send_event:<oggdemux0> No chain to forward event to
0:00:00.021656681 22541 0xe5b190 WARN oggdemux gstoggdemux.c:2433:gst_ogg_demux_sink_event:<oggdemux0> EOS while trying to retrieve chain, seeking disabled
The following is an app sample that I wrote:
#include <gst/gst.h>
#include <gst/gstbin.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <stdio.h>
#include <string.h>
static GMainLoop *loop;
FILE *file = NULL;
size_t bytesRead = 0;
typedef struct _CustomData
{
GstElement *pipeline;
GstAppSrc *app_source;
guint sourceid; /* To control the GSource */
} CustomData;
static gboolean push_data(CustomData *data)
{
GstBuffer *gbuffer;
GstFlowReturn ret;
char buffer[1024];
gbuffer = gst_buffer_new_and_alloc(sizeof(buffer));
GstMapInfo info;
bytesRead = fread(buffer, 1, sizeof(buffer), file);
gst_buffer_map(gbuffer, &info, GST_MAP_WRITE);
memcpy(info.data, buffer, bytesRead);
gst_buffer_unmap(gbuffer, &info);
if (bytesRead > 0)
{
//g_print("Pushing %d\n", (int)bytesRead);
/* Push the buffer into the appsrc */
g_signal_emit_by_name(data->app_source, "push-buffer", gbuffer, &ret);
return TRUE;
}
else
{
g_print("file complete\n");
gst_app_src_end_of_stream(data->app_source);
return FALSE;
}
gst_buffer_unref(gbuffer);
}
static void stop_feed(GstElement *source, CustomData *data)
{
if (data->sourceid != 0)
{
g_print("Stop feeding\n");
g_source_remove(data->sourceid);
data->sourceid = 0;
}
}
static void start_feed(GstElement *source, guint size, CustomData *data)
{
if (data->sourceid == 0)
{
g_print("Start feeding\n");
data->sourceid = g_idle_add((GSourceFunc)push_data, data);
}
}
static gboolean bus_call(GstBus * bus, GstMessage * msg, gpointer user_data)
{
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_EOS:
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: from %s %s\n", GST_OBJECT_NAME(msg->src), error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
int main(int argc,
char *argv[])
{
CustomData data;
memset(&data, 0, sizeof(data));
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
GError *error = NULL;
data.pipeline = gst_parse_launch("concat name=c ! filesink location=program.wav appsrc name=src_00 ! oggdemux ! opusdec ! audioconvert ! audioresample ! audio/x-raw,format=S16LE,channels=1,rate=16000 ! queue ! c.", &error);
if (!data.pipeline)
{
g_printerr("Pipeline could not be created. Exiting.\n");
return -1;
}
data.app_source = (G_TYPE_CHECK_INSTANCE_CAST((gst_bin_get_by_name(GST_BIN(data.pipeline), "src_00")), GST_TYPE_APP_SRC, GstAppSrc));
g_signal_connect(data.app_source, "need-data", G_CALLBACK(start_feed), &data);
g_signal_connect(data.app_source, "enough-data", G_CALLBACK(stop_feed), &data);
/* we add a message handler */
bus = gst_pipeline_get_bus(GST_PIPELINE(data.pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
gst_object_unref(bus);
file = fopen("junk.wav", "rb");
/* Set the pipeline to "playing" state*/
g_print("Now playing");
gst_element_set_state(data.pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print("Running...\n");
g_main_loop_run(loop);
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback\n");
gst_element_set_state(data.pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(data.pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
I would have expected that the demux failure would follow to the message bus, but it is not. How can I listen to such errors ?
I've tried with other pipelines that uses decodebin and I get the error messages on the message bus. The following pipeline works as expected:
gst_parse_launch("concat name=c ! filesink location=program.wav appsrc name=src_00 ! decodebin ! audioconvert ! audioresample ! audio/x-raw,format=S16LE,channels=1,rate=16000 ! queue ! c.", &error);
GStreamer version: 1.8.3
OS: Ubuntu 16.04
The issue seems to be resolved in Gstreamer 1.14. After updating I now get an error message on the message bus:
Message: Error: from oggdemux0 Could not demultiplex stream.
Error Code: GST_STREAM_ERROR_DEMUX

gstreamer flacenc How to add duration information to flac files?

I'm using gstreamer(gst-launch-1.0 version 1.8.3) to record flac files. the command line looks like this:
gst-launch-1.0 -v alsasrc ! flacenc ! filesink location="output.flac"
mediainfo output.flac
mediainfo result
The above picture shows the results of using the mediainfo.
This file can play at media player, but it does not support navigation and play time.
I think that there is no duration information.
player screen
How to add duration imformation to flac files?
I think .flac is a very basic stream format. It does not support random access or carry a duration. You can't know the exact duration unless you parse the complete file. Some players may make a "best effort" approach here and seek to roughly the file position to where you out the slider to but its nothing the format itself provides.
I think for seeking you are supposed to put .flac into a container like .ogg. This is actually very similar to .aac files which should be put into .mp4.
So try gst-launch-1.0 -e -v alsasrc ! flacenc ! oggmux ! filesink location="output.ogg".
$ mediainfo output.ogg
General
Complete name : output.ogg
Format : Ogg
Format/Info : Free Lossless Audio Codec
File size : 598 KiB
Duration : 7 s 941 ms
Overall bit rate mode : Variable
Overall bit rate : 617 kb/s
Audio
ID : 256729656 (0xF4D6238)
Format : FLAC
Format/Info : Free Lossless Audio Codec
Duration : 7 s 941 ms
Bit rate mode : Variable
Channel(s) : 2 channels
Channel positions : Front: L R
Sampling rate : 44.1 kHz
Bit depth : 16 bits
Writing library : libFLAC 1.3.2 (UTC 2017-01-01)
I made some modifications to user199309's answer
https://stackoverflow.com/a/47569428/5564626
compile: g++ -o test test.c $(pkg-config --cflags --libs gstreamer-1.0)
#include <stdio.h>
#include <gst/gst.h>
#define GLIB_DISABLE_DEPRECATION_WARNINGS
static GstElement *pipeline;
static GstPad *queue_src_pad;
static GstElement *bins[2];
static GstPad *bin_pads[2];
static GstElement *filesink[2];
static GMainLoop *loop;
static GstElement *flacenc[2];
static size_t current_bin = 0;
static int current_file = 0;
static GstPadProbeReturn
pad_probe_cb(GstPad * pad, GstPadProbeInfo * info, gpointer user_data) {
gst_pad_remove_probe(pad, GST_PAD_PROBE_INFO_ID (info));
gst_pad_unlink(queue_src_pad, bin_pads[current_bin]);
gst_pad_send_event(bin_pads[current_bin], gst_event_new_eos());
gst_element_set_state(bins[current_bin], GST_STATE_NULL);
gst_object_ref(bins[current_bin]);
gst_bin_remove(GST_BIN(pipeline), bins[current_bin]);
current_file++;
current_bin = (current_file % 2);
{
char file_location[32];
sprintf(file_location, "recording_%ld.flac", current_file);
g_object_set(G_OBJECT(
filesink[current_bin]), "location", file_location, NULL);
printf("now writing to %s\n", file_location);
}
gst_bin_add(GST_BIN(pipeline), bins[current_bin]);
gst_pad_link(queue_src_pad, bin_pads[current_bin]);
gst_element_set_state(bins[current_bin], GST_STATE_PLAYING);
gst_element_sync_state_with_parent(bins[current_bin]);
return GST_PAD_PROBE_OK;
}
static gboolean timeout_cb(gpointer user_data) {
gst_pad_add_probe(queue_src_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, NULL, NULL);
return TRUE;
}
static gboolean
bus_cb (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *)data;
g_print("Got %s message\n", GST_MESSAGE_TYPE_NAME(msg));
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
//g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
gint main(gint argc, gchar *argv[])
{
GstElement *audiosrc;
GstElement *queue;
GstBus *bus;
GMainLoop *loop;
pthread_t libusb_tid;
guint bus_watch_id;
gst_init (&argc, &argv);
audiosrc = gst_element_factory_make("alsasrc", "audiosrc");
queue = gst_element_factory_make("queue", "queue");
bins[0] = gst_bin_new("bin0");
bins[1] = gst_bin_new("bin1");
flacenc[0] = gst_element_factory_make("flacenc", "flacenc0");
flacenc[1] = gst_element_factory_make("flacenc", "flacenc1");
filesink[0] = gst_element_factory_make("filesink", "filesink0");
filesink[1] = gst_element_factory_make("filesink", "filesink1");
pipeline = gst_pipeline_new("test-pipeline");
if (!pipeline || !audiosrc || !queue
|| !flacenc[0] || !filesink[0]
|| !flacenc[1] || !filesink[1]
) {
g_printerr ("not all elements could be created\n");
//return -1;
}
gst_bin_add_many(GST_BIN(bins[0]), flacenc[0], filesink[0], NULL);
gst_bin_add_many(GST_BIN(bins[1]), flacenc[1], filesink[1], NULL);
gst_bin_add_many(GST_BIN(pipeline), audiosrc, queue, bins[0], NULL);
g_assert(gst_element_link(audiosrc, queue));
g_assert(gst_element_link_many(flacenc[0], filesink[0], NULL));
g_assert(gst_element_link_many(flacenc[1], filesink[1], NULL));
GstPad* pad = gst_element_get_static_pad(flacenc[0], "sink");
gst_element_add_pad(bins[0], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
GstPad* pad2 = gst_element_get_static_pad(flacenc[1], "sink");
gst_element_add_pad(bins[1], gst_ghost_pad_new("sink", pad2));
gst_object_unref(pad2);
bin_pads[0] = gst_element_get_static_pad(bins[0], "sink");
bin_pads[1] = gst_element_get_static_pad(bins[1], "sink");
current_bin = 0;
gst_element_link(queue, bins[current_bin]);
g_object_set(filesink[current_bin], "location", "recording_0.flac", NULL);
queue_src_pad = gst_element_get_static_pad(queue, "src");
bus = gst_element_get_bus(pipeline);
bus_watch_id = gst_bus_add_watch(bus, bus_cb, loop);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new(NULL, FALSE);
g_timeout_add_seconds(10, timeout_cb, NULL);
g_main_loop_run (loop);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}

Playing .AVI file with GStreamer SDK in Windows

I want to play an .AVI file using GStreamer in Windows 7. GStreamer SDK was installed as given in this link. Then a GStreamer SDK project was created and the following code was added to a C file as given is this link with the suggested corrections. Project properties -> Configurations properties -> Debugging -> Working directory was changed to "$(GSTREAMER_SDK_ROOT_X86)\bin" and the same was added to windows PATH variable as suggested in the installation link given above. When run the code, it just exits without playing the video, last few lines of the output is given below. Please note that I have installed 32 bit Gstreamer SDK on a 64 bit Windows 7.
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[]){
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source), "location", "file:///C:/Video.avi", NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || ! video_convertor || !audio_sink || !video_sink ){
g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(
Bin,
demuxer,
audioqueue,videoqueue,
audio_decoder,audio_convertor,
video_decoder,video_convertor,
audio_sink,video_sink,
NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,audio_decoder,NULL);
gst_element_link_many(audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,NULL);
gst_element_link_many(video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data){
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg)){
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data) {
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data) {
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
Output:
The thread 'Win32 Thread' (0x19c4) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2370) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2040) has exited with code 0 (0x0).
The program '[5368] GstProject2.exe: Native' has exited with code 0 (0x0).
Finally I was able to play the AVI file using the following code which is based on this example in Gstreamer SDK website.
In command prompt:
Option 1:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! queue ! ffmpegcolorspace ! autovideosink dec. ! queue ! audioconvert ! audioresample ! autoaudiosink
Option 2:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! audioresample ! autoaudiosink
Option 3:
gst-launch-0.10 uridecodebin uri=file:///C:/Video.avi name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! autoaudiosink
In Visual Studio:
#include <gst/gst.h>
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *audio_sink;
GstElement *colorspace;
GstElement *video_sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
int main(int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.source = gst_element_factory_make ("uridecodebin", "source");
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.audio_sink = gst_element_factory_make ("autoaudiosink", "audio_sink");
data.colorspace = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
data.video_sink = gst_element_factory_make ("autovideosink", "video_sink");
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.audio_sink || !data.colorspace || !data.video_sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , data.audio_sink, data.colorspace, data.video_sink, NULL);
if (!(gst_element_link (data.convert, data.audio_sink) && gst_element_link (data.colorspace, data.video_sink))) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Set the URI to play */
g_object_set (data.source, "uri", "file:///C:/Video.avi", NULL);
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad_audio = gst_element_get_static_pad (data->convert, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->colorspace, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
///* If our converter is already linked, we have nothing to do here */
//if (gst_pad_is_linked (sink_pad)) {
// g_print (" We are already linked. Ignoring.\n");
// goto exit;
//}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is raw video. Connecting.\n", new_pad_type);
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}