Issue with Gstreamer under Qt in c++ : flow error - c++

I'm a beginner in Gstreamer, I need to get the frame from an UDP camera and convert it to an cv::Mat(OpenCV).
I run my camera stream like this :
gst-launch-1.0 v4l2src device=/dev/video0 ! \
h264parse ! rtph264pay ! udpsink host=XXX.XXX.XXX.XXX port=5000
And in another terminal I can get the stream like this (and it works):
gst-launch-1.0 udpsrc port=5000 caps='application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264' ! \
rtph264depay ! avdec_h264 ! autovideosink
So in my C++ code here is my C++ code :
GstFlowReturn
new_preroll(GstAppSink *appsink, gpointer data) {
g_print ("Got preroll!\n");
return GST_FLOW_OK;
}
GstFlowReturn
new_sample(GstAppSink *appsink, gpointer data) {
static int framecount = 0;
framecount++;
GstSample *sample = gst_app_sink_pull_sample(appsink);
GstCaps *caps = gst_sample_get_caps(sample);
GstBuffer *buffer = gst_sample_get_buffer(sample);
const GstStructure *info = gst_sample_get_info(sample);
// ---- Read frame and convert to opencv format ---------------
GstMapInfo map;
gst_buffer_map (buffer, &map, GST_MAP_READ);
// convert gstreamer data to OpenCV Mat, you could actually
// resolve height / width from caps...
Mat frame(Size(320, 240), CV_8UC3, (char*)map.data, Mat::AUTO_STEP);
int frameSize = map.size;
// TODO: synchronize this....
frameQueue.push_back(frame);
gst_buffer_unmap(buffer, &map);
// ------------------------------------------------------------
// print dot every 30 frames
if (framecount%30 == 0) {
g_print (".");
}
// show caps on first frame
if (framecount == 1) {
g_print ("%s\n", gst_caps_to_string(caps));
}
gst_sample_unref (sample);
return GST_FLOW_OK;
}
static gboolean
my_bus_callback (GstBus *bus, GstMessage *message, gpointer data) {
g_print ("Got %s message\n", GST_MESSAGE_TYPE_NAME (message));
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR: {
GError *err;
gchar *debug;
gst_message_parse_error (message, &err, &debug);
g_print ("Error: %s\n", err->message);
g_error_free (err);
g_free (debug);
break;
}
case GST_MESSAGE_EOS:
/* end-of-stream */
break;
default:
/* unhandled message */
break;
}
/* we want to be notified again the next time there is a message
* on the bus, so returning TRUE (FALSE means we want to stop watching
* for messages on the bus and our callback should not be called again)
*/
return TRUE;
}
int
main (int argc, char *argv[])
{
GError *error = NULL;
gst_init (&argc, &argv);
gchar *descr = g_strdup(
"udpsrc port=5000 ! "
"caps=application/x-rtp, media=(string)video, clock-rate=(int)9000, encoding-name=(string)H264 ! "
"rtph264depay ! "
"avdec_h264 ! "
"videoconvert ! "
"appsink name=sink "
);
GstElement *pipeline = gst_parse_launch (descr, &error);
if (pipeline== NULL) {
g_print ("could not construct pipeline: %s\n", error->message);
g_error_free (error);
exit (-1);
}
/* get sink */
GstElement *sink = gst_bin_get_by_name (GST_BIN (pipeline), "sink");
gst_app_sink_set_emit_signals((GstAppSink*)sink, true);
gst_app_sink_set_drop((GstAppSink*)sink, true);
gst_app_sink_set_max_buffers((GstAppSink*)sink, 1);
GstAppSinkCallbacks callbacks = { NULL, new_preroll, new_sample };
gst_app_sink_set_callbacks (GST_APP_SINK(sink), &callbacks, NULL, NULL);
GstBus *bus;
guint bus_watch_id;
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, my_bus_callback, NULL);
gst_object_unref (bus);
GstStateChangeReturn test=gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_PLAYING);
qDebug() <<test<< " this is the test";
namedWindow("edges",1);
while(1) {
g_main_iteration(false);
// TODO: synchronize...
if (frameQueue.size() >10) {
// this lags pretty badly even when grabbing frames from webcam
Mat frame = frameQueue.front();
imshow("edges", frame);
cv::waitKey(30);
frameQueue.clear();
}
}
gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_NULL);
gst_object_unref (GST_OBJECT (pipeline));
return 0;
}
And I get the ERROR :
Error: Internal data flow error.
I think it is from my declaration of my pipeline, but I can't find what's wrong with it.
Any suggestions?

You have a ! after udpsrc port=5000. That one is not present in your original pipeline. I haven't checked any further, maybe print out the pipeline and double check if its the desired one.

Related

gstreamer audiomixer command to code converting

I want to use audiomixer in my application which receives audios from different sources and should play them together in speaker.
my final application should do something like this command:
gst-launch-1.0 audiomixer name=mix ! autoaudiosink autoaudiosrc ! \
audioconvert ! mix. udpsrc port=5001 caps="application/x-rtp" ! queue !\
rtppcmudepay ! mulawdec ! audioconvert ! audioresample ! mix.
I already wrote a code to use tee and queues and know how to work with tee and queues in code based on this code. but I don't know how to use mixer in my code.
so for simplicity I just want to write a code to work as this command does:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
I didn't find any useful example to reach this goal, how can I write a C code to do this?
for the second part:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
this code works:
#include <gst/gst.h>
static GMainLoop *loop;
int bus_callback (GstBus *bus, GstMessage *message, gpointer data)
{
g_print ("Got %s message\n", GST_MESSAGE_TYPE_NAME (message));
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR: {
GError *err;
gchar *debug;
gst_message_parse_error (message, &err, &debug);
g_print ("Error: %s\n", err->message);
g_error_free (err);
g_free (debug);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_EOS:
/* end-of-stream */
g_main_loop_quit (loop);
break;
default:
/* unhandled message */
break;
}
/* we want to be notified again the next time there is a message
* on the bus, so returning TRUE (FALSE means we want to stop watching
* for messages on the bus and our callback should not be called again)
*/
return TRUE;
}
int main(int argc, char *argv[])
{
/* Initialize GStreamer */
gst_init (nullptr, nullptr);
GstElement *pipeline, *src1,*src2, *sink, *convert1,*convert2,*audiomixer;
GstPad *conv_pad1, *conv_pad2, *mixer1_sinkpad,*mixer2_sinkpad;
gint i;
static GstBus *bus;
static guint bus_watch_id;
pipeline = gst_pipeline_new ("pipeline");
audiomixer = gst_element_factory_make ("adder", "mixer");
sink = gst_element_factory_make ("autoaudiosink", "sink");
src1 = gst_element_factory_make ("audiotestsrc", "src1");
convert1 = gst_element_factory_make ("audioconvert", "convert1");
src2 = gst_element_factory_make ("autoaudiosrc", "src2");
convert2 = gst_element_factory_make ("audioconvert", "convert2");
//g_object_set (sink, "async-handling", TRUE, NULL);
gst_bin_add_many (GST_BIN (pipeline), audiomixer ,sink, NULL);
gst_bin_add_many (GST_BIN (pipeline), src1 , convert1 , NULL);
gst_bin_add_many (GST_BIN (pipeline), src2 , convert2 , NULL);
gst_element_link (src1, convert1 );
gst_element_link (src2, convert2 );
gst_element_link(audiomixer , sink);
conv_pad1= gst_element_get_static_pad (convert1, "src");
mixer1_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad1, mixer1_sinkpad);
g_object_unref(mixer1_sinkpad);
conv_pad2= gst_element_get_static_pad (convert2, "src");
mixer2_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad2, mixer2_sinkpad);
g_object_unref(mixer2_sinkpad);
/* adds a watch for new message on our pipeline’s message bus to
* the default GLib main context, which is the main context that our
* GLib main loop is attached to below
*/
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_callback, NULL);
gst_object_unref (bus);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (loop);
g_object_unref(conv_pad1);
g_object_unref(conv_pad2);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_source_remove (bus_watch_id);
}

pad creation and some mistakes on gst_parse_launch

I am a newbie to gstreamer and I would like to know if we have to create source and sink pads for convert like video convert in a pipeline. I have a pipeline like this
gst-launch-1.0 v4l2src ! video/x-raw,format=YUY2 ! videoconvert ! xvimagesink
I am trying to create a simple c application to understand the creation of pads and would like to know if video convert has a source pad and sink pad too. I am creating a source and sink pad for the filter.
EDIT:
yeah well you see, I tried following the dynamic pipelines example and wrote the code below
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
g_print("In pad handler\n");
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
if(!g_str_has_prefix(new_pad_type, "video/x-raw,format=(string)YUY2"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link( data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_print("before signal connect\n");
gint id= g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
g_print("after signal connect with id = %d\n", id);
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
// gst_element_link(data.source, data.convert);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and it gave me an error
before signal connect
after signal connect with id = 1
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
(The above code works if I write gst_elements_link(data.source, data.convert) after the element link statement for convert and sink)
So I tried the normal way in which I just added and linked all the elements together and it began to work without use of the pads.
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *convert, *sink;
GstBus *bus;
GstMessage *msg;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
convert = gst_element_factory_make("nvvidconv", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source,convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
But, inorder to fully grasp the knowledge of pads, I wanted to execute simpler pipelines with pads.
I just don't fully understand the usage of pads and link them and all.
EDIT2:
Ultimately I want to write application for pipeline like this which works on command line perfectly well,
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
But as I don't understand the usage of both pads and bins, I am unable to implement them in my above pipeline. However, I tried this,
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include "gstnvdsmeta.h"
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
"Roadsign"
};
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
GstBuffer *buf=(GstBuffer *)info->data;
guint num_rects =0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
vehicle_count++;
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
}
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
g_print ("Frame Number = %d Number of objects = %d "
"Vehicle Count = %d Person Count = %d\n",
frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *filter1, *convert,*filter2, *filter3, *vidconv, *filter4, *mux, *infer, *tiler, *osd, *transform , *sink, *bin, *convert2 , *vidconv2;
GMainLoop *loop;
GstCaps *caps1, *caps2, *caps3, *caps4;
GstPad *osd_sink_pad =NULL, *srcpad, *sinkpad;
loop = g_main_loop_new(NULL,FALSE);
gst_init(&argc, &argv);
pipeline = gst_pipeline_new("nv_pipeline");
gchar *string1 = "video/x-raw(memory:NVMM),format=(string)NV12";
source = gst_element_factory_make("v4l2src", "source");
filter1 = gst_element_factory_make("capsfilter", "filter1");
convert = gst_element_factory_make("nvvidconv", "convert");
filter2 = gst_element_factory_make("capsfilter", "filter2");
filter3 = gst_element_factory_make("capsfilter", "filter3");
filter4 = gst_element_factory_make("capsfilter", "filter4");
vidconv = gst_element_factory_make("nvvideoconvert", "vidconv");
mux = gst_element_factory_make("nvstreammux", "mux");
infer = gst_element_factory_make("nvinfer", "infer");
tiler = gst_element_factory_make("nvmultistreamtiler", "tiler");
osd = gst_element_factory_make("nvosd", "osd");
transform = gst_element_factory_make("nvegltransform", "transform");
sink = gst_element_factory_make("nveglglessink", "sink");
convert2 = gst_element_factory_make("nvvidconv", "convert2");
vidconv2 = gst_element_factory_make("nvvideoconvert", "vidconv2");
gst_bin_add_many(GST_BIN(pipeline), source,filter1,convert,filter2, convert2,filter3,vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
gst_element_link_many(source,filter1,convert,filter2, convert2,filter3, vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
osd_sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
caps1 = gst_caps_new_simple("video/x-raw", "format",G_TYPE_STRING,"YUY2",NULL);
caps2 = gst_caps_from_string(string1);
caps3 = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", NULL);
caps4 = gst_caps_from_string(string1);
g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
g_object_set(G_OBJECT(mux), "live-source", 1, "name", "mux", "batch-size", 1, "width", 1280, "height", 720, NULL);
g_object_set(G_OBJECT(infer), "config-file-path","/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt",NULL);
g_object_set(G_OBJECT(infer), "batch-size", 1, NULL);
g_object_set(G_OBJECT(tiler), "rows", 1, "columns", 1, "width", 1280, "height", 720, NULL);
gst_caps_unref(caps1);
gst_caps_unref(caps2);
gst_caps_unref(caps3);
gst_caps_unref(caps4);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running ...\n");
g_main_loop_run(loop);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
which gives the exact same output as the command line gst-launch-1.0 like,
(deep_pads:15648): GLib-GObject-WARNING **: 11:29:18.761: cannot register existing type 'GstInterpolationMethod'
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: g_param_spec_enum: assertion 'G_TYPE_IS_ENUM (enum_type)' failed
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: validate_pspec_to_install: assertion 'G_IS_PARAM_SPEC (pspec)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_pad_add_probe: assertion 'GST_IS_PAD (pad)' failed
0:00:00.843318172 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:20.693301580 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_int8.engine
except It doesn't display an output window from the above c application and doesn't display the rest.
Your first example fails because data.source element is actually never linked to data.convert element. Since both elements have static pads you need to "manually" create them and link them before setting pipeline to GST_STATE_PLAYING:
GstPad *source_pad = gst_element_get_static_pad(data.source, "src");
GstPad *sink_pad = gst_element_get_static_pad(data.convert, "sink");
ret = gst_pad_link(source_pad, sink_pad);
You probably expected that static source pad of your data.source element will somehow be automatically created so you registered
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
But, as you can see from your debug, pad_added_handler was never called because g_signal_connect can be registered and will be called for elements that have dynamic pads. For example, demultiplexer tsdemux will dynamically create its source pads during discovery of elementary streams so, in that case, registration of pad-added callback would be necessary.
The first step in your learning curve would be to understand fundamental differences between "static" (mandatory, always exists, "manually" created), dynamic (exists sometimes, automatically created by element) and request (optional, "manually" created when needed) gstreamer pads. After that everything will be much easier for you.

How to listen to oggdemux gstreamer failures?

I wrote a gstreamer app to convert from opus audio to raw audio. If I feed bad audio (just random bytes) to the pipeline, the pipeline gets stuck and /i don't receive an error message on the message bus.
I'm listening to the error messages flowing through the pipeline, but not getting an error code to indicate the failure. The gstreamer debug logs indicate the demux failed though, I can see the following in the logs:
0:00:00.021614679 22541 0xe5b190 WARN oggdemux gstoggdemux.c:4609:gst_ogg_demux_send_event:<oggdemux0> No chain to forward event to
0:00:00.021656681 22541 0xe5b190 WARN oggdemux gstoggdemux.c:2433:gst_ogg_demux_sink_event:<oggdemux0> EOS while trying to retrieve chain, seeking disabled
The following is an app sample that I wrote:
#include <gst/gst.h>
#include <gst/gstbin.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <stdio.h>
#include <string.h>
static GMainLoop *loop;
FILE *file = NULL;
size_t bytesRead = 0;
typedef struct _CustomData
{
GstElement *pipeline;
GstAppSrc *app_source;
guint sourceid; /* To control the GSource */
} CustomData;
static gboolean push_data(CustomData *data)
{
GstBuffer *gbuffer;
GstFlowReturn ret;
char buffer[1024];
gbuffer = gst_buffer_new_and_alloc(sizeof(buffer));
GstMapInfo info;
bytesRead = fread(buffer, 1, sizeof(buffer), file);
gst_buffer_map(gbuffer, &info, GST_MAP_WRITE);
memcpy(info.data, buffer, bytesRead);
gst_buffer_unmap(gbuffer, &info);
if (bytesRead > 0)
{
//g_print("Pushing %d\n", (int)bytesRead);
/* Push the buffer into the appsrc */
g_signal_emit_by_name(data->app_source, "push-buffer", gbuffer, &ret);
return TRUE;
}
else
{
g_print("file complete\n");
gst_app_src_end_of_stream(data->app_source);
return FALSE;
}
gst_buffer_unref(gbuffer);
}
static void stop_feed(GstElement *source, CustomData *data)
{
if (data->sourceid != 0)
{
g_print("Stop feeding\n");
g_source_remove(data->sourceid);
data->sourceid = 0;
}
}
static void start_feed(GstElement *source, guint size, CustomData *data)
{
if (data->sourceid == 0)
{
g_print("Start feeding\n");
data->sourceid = g_idle_add((GSourceFunc)push_data, data);
}
}
static gboolean bus_call(GstBus * bus, GstMessage * msg, gpointer user_data)
{
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_EOS:
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: from %s %s\n", GST_OBJECT_NAME(msg->src), error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
int main(int argc,
char *argv[])
{
CustomData data;
memset(&data, 0, sizeof(data));
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
GError *error = NULL;
data.pipeline = gst_parse_launch("concat name=c ! filesink location=program.wav appsrc name=src_00 ! oggdemux ! opusdec ! audioconvert ! audioresample ! audio/x-raw,format=S16LE,channels=1,rate=16000 ! queue ! c.", &error);
if (!data.pipeline)
{
g_printerr("Pipeline could not be created. Exiting.\n");
return -1;
}
data.app_source = (G_TYPE_CHECK_INSTANCE_CAST((gst_bin_get_by_name(GST_BIN(data.pipeline), "src_00")), GST_TYPE_APP_SRC, GstAppSrc));
g_signal_connect(data.app_source, "need-data", G_CALLBACK(start_feed), &data);
g_signal_connect(data.app_source, "enough-data", G_CALLBACK(stop_feed), &data);
/* we add a message handler */
bus = gst_pipeline_get_bus(GST_PIPELINE(data.pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
gst_object_unref(bus);
file = fopen("junk.wav", "rb");
/* Set the pipeline to "playing" state*/
g_print("Now playing");
gst_element_set_state(data.pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print("Running...\n");
g_main_loop_run(loop);
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback\n");
gst_element_set_state(data.pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(data.pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
I would have expected that the demux failure would follow to the message bus, but it is not. How can I listen to such errors ?
I've tried with other pipelines that uses decodebin and I get the error messages on the message bus. The following pipeline works as expected:
gst_parse_launch("concat name=c ! filesink location=program.wav appsrc name=src_00 ! decodebin ! audioconvert ! audioresample ! audio/x-raw,format=S16LE,channels=1,rate=16000 ! queue ! c.", &error);
GStreamer version: 1.8.3
OS: Ubuntu 16.04
The issue seems to be resolved in Gstreamer 1.14. After updating I now get an error message on the message bus:
Message: Error: from oggdemux0 Could not demultiplex stream.
Error Code: GST_STREAM_ERROR_DEMUX

GStreamer 1.0 - cannot play audio: Error: The stream is in the wrong format

I made a .pcm audio file via the following command:
gst-launch-1.0 filesrc location=/home/pi/rawaudio/can-you-keep-a-secret.wav ! wavparse ! audioresample ! audioconvert ! audio/x-raw,format=S16BE,channels=1,rate=44100,layout=interleaved ! filesink location=/home/pi/rawaudio/test.pcm
I can play it with:
gst-launch-1.0 filesrc location=/home/pi/rawaudio/test.pcm ! audio/x-raw,format=S16BE,channels=1,rate=44100,layout=interleaved ! audioconvert ! audioresample ! alsasink
This is working perfectly. But now, I need to implement this into my c++ application. This is what I already have:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
#define AUDIO_LOCATION "/home/pi/rawaudio/test.pcm"
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *parser, *sink, *convert;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
sink = gst_element_factory_make ("alsasink", "sink");
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", AUDIO_LOCATION, NULL);
//convert = gst_element_factory_make("audioconvert", "convert");
//parser = gst_element_factory_make("audioresample","parse");
GstPad *sourcepad, *sinkpad;
sourcepad = gst_element_get_static_pad(source, "src");
gst_pad_set_caps(sourcepad,
gst_caps_new_simple("audio/x-raw",
"rate", G_TYPE_INT, 44100,
"channels", G_TYPE_INT, 1,
"format", G_TYPE_STRING, "S16BE",
"layout", G_TYPE_STRING, "interleaved",
NULL));
gst_object_unref(sourcepad);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, sink, NULL);
// link source to decode
gst_element_link_many(source, sink, NULL);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
But it is not working. I get
Error: The stream is in the wrong format
when running.
What am I missing? some elements like audioparse, audioconvert, audioresample ... I also tried inserting the audioconvert and audioresample after the source into the pipeline but then I get an internal data stream error exception.

Playing .AVI file with GStreamer SDK in Windows

I want to play an .AVI file using GStreamer in Windows 7. GStreamer SDK was installed as given in this link. Then a GStreamer SDK project was created and the following code was added to a C file as given is this link with the suggested corrections. Project properties -> Configurations properties -> Debugging -> Working directory was changed to "$(GSTREAMER_SDK_ROOT_X86)\bin" and the same was added to windows PATH variable as suggested in the installation link given above. When run the code, it just exits without playing the video, last few lines of the output is given below. Please note that I have installed 32 bit Gstreamer SDK on a 64 bit Windows 7.
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[]){
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source), "location", "file:///C:/Video.avi", NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || ! video_convertor || !audio_sink || !video_sink ){
g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(
Bin,
demuxer,
audioqueue,videoqueue,
audio_decoder,audio_convertor,
video_decoder,video_convertor,
audio_sink,video_sink,
NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,audio_decoder,NULL);
gst_element_link_many(audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,NULL);
gst_element_link_many(video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data){
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg)){
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data) {
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data) {
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
Output:
The thread 'Win32 Thread' (0x19c4) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2370) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2040) has exited with code 0 (0x0).
The program '[5368] GstProject2.exe: Native' has exited with code 0 (0x0).
Finally I was able to play the AVI file using the following code which is based on this example in Gstreamer SDK website.
In command prompt:
Option 1:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! queue ! ffmpegcolorspace ! autovideosink dec. ! queue ! audioconvert ! audioresample ! autoaudiosink
Option 2:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! audioresample ! autoaudiosink
Option 3:
gst-launch-0.10 uridecodebin uri=file:///C:/Video.avi name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! autoaudiosink
In Visual Studio:
#include <gst/gst.h>
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *audio_sink;
GstElement *colorspace;
GstElement *video_sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
int main(int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.source = gst_element_factory_make ("uridecodebin", "source");
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.audio_sink = gst_element_factory_make ("autoaudiosink", "audio_sink");
data.colorspace = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
data.video_sink = gst_element_factory_make ("autovideosink", "video_sink");
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.audio_sink || !data.colorspace || !data.video_sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , data.audio_sink, data.colorspace, data.video_sink, NULL);
if (!(gst_element_link (data.convert, data.audio_sink) && gst_element_link (data.colorspace, data.video_sink))) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Set the URI to play */
g_object_set (data.source, "uri", "file:///C:/Video.avi", NULL);
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad_audio = gst_element_get_static_pad (data->convert, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->colorspace, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
///* If our converter is already linked, we have nothing to do here */
//if (gst_pad_is_linked (sink_pad)) {
// g_print (" We are already linked. Ignoring.\n");
// goto exit;
//}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is raw video. Connecting.\n", new_pad_type);
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}