Gstreamer rtsp application for audio and video - gstreamer

I was trying to develop an application for the pipeline:
gst-launch-1.0 rtspsrc location="rtsp://192.168.3.30:8554/rajvi" latency=0 name=demux demux. ! queue ! rtpmp4gdepay ! aacparse ! avdec_aac ! audioconvert ! audioresample ! autoaudiosink demux. ! queue ! rtph264depay ! h264parse ! omxh264dec ! videoconvert ! videoscale ! video/x-raw,width=176, height=144 ! ximagesink
Following is the code which I have implemented:
#include <gst/gst.h>
static void onPadAdded(GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
GstCaps * p_caps = gst_pad_get_pad_template_caps (pad);
gchar * description = gst_caps_to_string(p_caps);
g_free(description);
GstElement *depay = GST_ELEMENT(data);
if(gst_element_link_pads(element, name, depay, "sink") == 0)
{
g_print("cb_new_rtspsrc_pad : failed to link elements \n");
}
g_free(name);
}
int main(int argc, char *argv[]) {
GstElement *source, *videosink, *audio, *video, *convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
GstPad *sinkpad,*ghost_sinkpad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make ("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make ("queue", "audio-queue");
audioDepay = gst_element_factory_make ("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make ("aacparse", "audio-parser");
audioDecode = gst_element_factory_make ("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make ("audioconvert", "aconv");
audioResample = gst_element_factory_make ("audioresample", "audio-resample");
audioSink = gst_element_factory_make ("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), audioDepay);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
video = gst_bin_new ("videobin");
videoQueue = gst_element_factory_make ("queue", "video-queue");
videoDepay= gst_element_factory_make ("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make ("h264parse", "video-parser");
videoDecode = gst_element_factory_make ("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(video),videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert,videosink, capsFilter);
gst_caps_unref (capsFilter);
if (!link_ok) {
g_warning ("Failed to link element1 and element2!");
}
sinkpad = gst_element_get_static_pad (videoConvert, "sink");
ghost_sinkpad = gst_ghost_pad_new ("sink", sinkpad);
gst_pad_set_active (ghost_sinkpad, TRUE);
gst_element_add_pad (video, ghost_sinkpad);
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
gst_bin_add_many (GST_BIN(pipeline), video,NULL);
/* Start playing */
gst_element_set_state ( pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
Getting error to link pipeline->audio->video bins

If you put the video and audio in the pipeline bin all together then you can do it. Figure out what you caps are for the video and audio and should be able to link them.
// ----------------------------------
// pad-added signal
// ----------------------------------
static void onPadAdded(GstElement* element, GstPad* pad, gpointer user_data)
{
gchar *name;
GstCaps * p_caps;
GstElement* nextElement;
GstElement* pipeline = (GstElement*)user_data;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
p_caps = gst_pad_get_pad_template_caps(pad);
if (strstr(name, "[CAPS FOR VIDEO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Video -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "video-depayer");
}
else if (strstr(name, "[CAPS FOR AUDIO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Audio -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "audio-depayer");
}
if (nextElement != NULL)
{
if (!gst_element_link_filtered(element, nextElement, p_caps))
//if (!gst_element_link_pads_filtered(element, name, nextElement, "sink", p_caps))
{
std::cout << std::endl << "Failed to link video element to src to sink" << std::endl;
}
gst_object_unref(nextElement);
}
g_free(name);
gst_caps_unref(p_caps);
}
// ----------------------------------
// main
// ----------------------------------
int main(int argc, char *argv[])
{
GstElement *source, *videosink, *audio,*convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init(&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make("queue", "audio-queue");
audioDepay = gst_element_factory_make("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make("aacparse", "audio-parser");
audioDecode = gst_element_factory_make("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make("audioconvert", "aconv");
audioResample = gst_element_factory_make("audioresample", "audio-resample");
audioSink = gst_element_factory_make("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), pipeline);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
videoQueue = gst_element_factory_make("queue", "video-queue");
videoDepay = gst_element_factory_make("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make("h264parse", "video-parser");
videoDecode = gst_element_factory_make("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(pipeline), videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert, videosink, capsFilter);
gst_caps_unref(capsFilter);
if (!link_ok) {
g_warning("Failed to link element1 and element2!");
}
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
/* Start playing */
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,(GstMessageType)( GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref(msg);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
}

Related

How to create gstreamer pipeline with parallel branches having different FPS using tee plugin

Hi I want to create a gstreamer pipeline with two branches having different FPS. The C++ code I wrote is given below
#include <iostream>
#include <string.h>
#include <gst/gst.h>
#include <gst/app/app.h>
using namespace std;
GstElement *src, *dbin, *conv, *tee, *mux, *parse, *pipeline;
GstElement *queue1,*videorate1, *conv1, *jenc1, *sink1;
GstElement *queue2,*videorate2, *conv2, *jenc2, *sink2;
GMainLoop *loop;
static gboolean
message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
{
//Cpipeline *obj_pipeline = (Cpipeline*)user_data;
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_error (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_WARNING:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_warning (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
break;
}
case GST_MESSAGE_EOS:
g_print ("\nGot EOS\n");
g_main_loop_quit (loop);
break;
default:
break;
}
return TRUE;
}
static void pad_added_handler (GstElement *src, GstPad *new_pad, gpointer x)
{
GstPad *sink_pad = gst_element_get_static_pad (parse, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print ("We are already linked. Ignoring.\n");
goto exit;
}
new_pad_caps = gst_pad_get_current_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "video/x-h264")) {
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
goto exit;
}
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print ("Type is '%s' but link failed.\n", new_pad_type);
goto exit;
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
int main()
{
gst_init (NULL, NULL);
pipeline = gst_pipeline_new (NULL);
src = gst_element_factory_make ("filesrc", NULL);
mux = gst_element_factory_make("qtdemux",NULL);
parse = gst_element_factory_make("h264parse",NULL);
dbin = gst_element_factory_make ("nvv4l2decoder", NULL);
conv = gst_element_factory_make ("nvvideoconvert", NULL);
tee = gst_element_factory_make ("tee", NULL);
std::string url = "VD19_peoplewalking.mp4";
if (!pipeline || !src || !dbin || !conv || !tee || !mux || !parse) {
g_error ("Failed to create elements");
return -1;
}
g_object_set (src, "location", url.c_str(), NULL);
gst_bin_add_many (GST_BIN (pipeline), src, dbin, mux, parse, conv, tee, NULL);
if (!gst_element_link_many(src,mux,NULL) || !gst_element_link_many(parse,dbin,conv, tee,NULL) )//|| !gst_element_link_many (conv, tee, NULL))
{
g_error("Failed to link elements");
return -3;
}
g_signal_connect (mux, "pad-added", G_CALLBACK (pad_added_handler), NULL);
//First Branch creation
GstPadTemplate *templ;
templ =
gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (tee),
"src_%u");
GstPad *teepad1 = gst_element_request_pad (tee, templ, NULL, NULL);
queue1 = gst_element_factory_make ("queue", NULL);
videorate1 = gst_element_factory_make("videorate",NULL);
conv1 = gst_element_factory_make ("nvvideoconvert", NULL);
//jenc = gst_element_factory_make ("jpegenc",NULL);
sink1 = gst_element_factory_make ("autovideosink", NULL);
//sink = gst_element_factory_make ("appsink", NULL);
g_object_set (G_OBJECT(videorate1), "rate", 1.0, NULL);
gst_bin_add_many (GST_BIN (pipeline), queue1, videorate1, conv1, sink1, NULL);
if (!gst_element_link_many ( queue1, conv1, videorate1, sink1, NULL))
{
g_error ("Failed to link elements");
}
GstPad *sinkpad = gst_element_get_static_pad ( queue1, "sink");
gst_pad_link ( teepad1, sinkpad);
gst_object_unref (sinkpad);
//First Branch creation ends
//Second Branc
GstPadTemplate *templ2;
templ2 =
gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (tee),
"src_%u");
GstPad *teepad2 = gst_element_request_pad (tee, templ2, NULL, NULL);
queue2 = gst_element_factory_make ("queue", NULL);
videorate2 = gst_element_factory_make("videorate",NULL);
conv2 = gst_element_factory_make ("nvvideoconvert", NULL);
sink2 = gst_element_factory_make ("autovideosink", NULL);
g_object_set (G_OBJECT(videorate2), "rate", 0.5, NULL);
gst_bin_add_many (GST_BIN (pipeline), queue2, videorate2, conv2, sink2, NULL);
if (!gst_element_link_many ( queue2, conv2, videorate2, sink2, NULL))
{
g_error ("Failed to link elements");
}
GstPad *sinkpad2 = gst_element_get_static_pad ( queue2, "sink");
gst_pad_link ( teepad2, sinkpad2);
gst_object_unref (sinkpad2);
//Second brach creation ends
GstBus *bus;
loop = g_main_loop_new (NULL, FALSE);
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_bus_add_signal_watch (bus);
g_signal_connect (G_OBJECT (bus), "message", G_CALLBACK (message_cb), NULL);
gst_object_unref (GST_OBJECT (bus));
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_main_loop_unref (loop);
gst_object_unref (pipeline);
}
Through command line I am able to run multiple branches with different fps please see the command below
gst-launch-1.0 filesrc location=VD19_peoplewalking.mp4 ! qtdemux ! h264parse ! nvv4l2decoder ! tee name=t ! queue ! videorate ! "video/x-raw(ANY),framerate=1/1" ! nvvideoconvert ! autovideosink t. ! videorate ! "video/x-raw(ANY),framerate=30/1" ! nvvideoconvert ! autovideosink
I am able to run C++ the code but the streams are not played as expected. Both streams get stuck in between while running the code.
Am I missing something?

gstreamer pipeline from cam to file C code ends up with empty output file

I have an USB camera. I have working terminal commands to record or display fullHD video and to save one 4k image. I would like to handle it all via C++ app. If we will concentrate on the video-saving:
gst-launch-1.0 v4l2src device=/dev/video0 num-buffers=900! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location=/mnt/ssd/test.avi
will save 900frames (aka 30s) of video. I would like to have C++ code to record indefinetly (in future maybe in hour-long segments) until I (the app) tell it to end.
I came up with
struct {
GstElement *pipeline_sink, *source, *appsink;
GstElement *pipeline_src, *appsrc, *decoder, *mux, *sink, *encoder;
} usbCam::mGstData;
int usbCam::gstInit(){
GstCaps *caps;
GstStateChangeReturn ret;
// Initialize GStreamer
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
// Create the elements
mGstData.source = gst_element_factory_make ("v4l2src", "source");
g_object_set (mGstData.source, "device", "/dev/video0", NULL);
mGstData.pipeline_sink = gst_pipeline_new ("pipeline_sink");
caps = gst_caps_new_any();
gst_app_sink_set_caps(GST_APP_SINK(mGstData.appsink), caps);
gst_caps_unref (caps);
gst_app_sink_set_emit_signals(GST_APP_SINK(mGstData.appsink), true);
// Build the pipeline
gst_bin_add_many (GST_BIN (mGstData.pipeline_sink), mGstData.source, mGstData.appsink, NULL);
if (gst_element_link_many(mGstData.source, mGstData.appsink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_sink);
return -1;
}
return 0;
}
int usbCam::videoStart(){
GstCaps *caps;
GstStateChangeReturn ret;
if (!mGstData.pipeline_sink || !mGstData.source) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
mGstData.appsrc = gst_element_factory_make ("appsrc", "appsrc");
mGstData.decoder = gst_element_factory_make ("imxvpudec", "transform_enc");
mGstData.mux = gst_element_factory_make ("avimux", "avimux");
mGstData.sink = gst_element_factory_make ("filesink", "sink");
g_object_set (mGstData.sink, "location", "/mnt/ssd/videoTest.avi", NULL);
mGstData.pipeline_src = gst_pipeline_new ("pipeline_src");
if (!mGstData.pipeline_src || !mGstData.appsrc || !mGstData.decoder || !mGstData.mux || !mGstData.sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
NULL);
gst_app_src_set_caps(GST_APP_SRC(mGstData.appsrc), caps);
gst_caps_unref (caps);
gst_app_src_set_duration(GST_APP_SRC(mGstData.appsrc), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(mGstData.appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(mGstData.appsrc), -1, 0);
gst_bin_add_many (GST_BIN (mGstData.pipeline_src), mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL);
if (gst_element_link_many(mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
ret = gst_element_set_state (mGstData.pipeline_src, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
return 0;
}
int usbCam::videoEnd(){
{
gst_app_src_end_of_stream(GST_APP_SRC(mGstData.appsrc));
usleep(500000);
gst_element_set_state (mGstData.pipeline_src, GST_STATE_NULL);
gst_object_unref (mGstData.pipeline_src);
return 0;
}
Now, this code runs. No error in the output, one warning though:
(GLib-GObject-WARNING **: 17:51:34.132: g_object_set_is_valid_property: object class 'GstSplitMuxSink' has no property named 'h}\x9fe h\xe6a_no_\xc1')
.
What actually bothers me is the output file. It is created, but it is an empty file with 0b size. Can anyone point me in the direction of the proper fix?
Edit: Today I came up with two other attempts. The firs one is not that different from the one already posted here. The second gives me pipeline with wrong parameters (different FPS) and I am unable to correctly stop it so that the file have correct EOF.
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
std::string command = "v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location = /mnt/ssd/testPipeline.avi";
/* Build the pipeline */
pipeline =
gst_parse_launch
(command.c_str(),
NULL);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GstMessageType(
GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
EDIT2:
OK now my code looks like this:
GstElement *pipeline;
GstElement *tee; //in the future I would like to save video and images AND stream or use thi pipeline data internally.
void gstFail(const gchar* message){
g_printerr(message);
gst_object_unref (pipeline);
return;
}
void videoStart(std::string path){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstCaps *caps;
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", mVideoDevice.toStdString().c_str(), NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
g_object_set (sink, "location", path.c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink, NULL);
if (gst_element_link_filtered(source, muxer, caps) != TRUE) {
gst_caps_unref (caps);
gstFail("Elements could not be linked or caps set.\n");
return;
}
gst_caps_unref (caps);
if (gst_element_link_many(muxer,tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gstFail("Unable to set the pipeline to the playing state.\n");
return;
}
return;
}
void videoEnd(void)
{
GstMessage *message = gst_message_new_eos(&pipeline->object);
gst_bus_post(pipeline->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PLAYING_TO_PAUSED);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PAUSED_TO_READY);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
}
void takeImage(std::string path){
GstElement *sink = gst_element_factory_make("multifilesink", "multifilesink");
g_object_set (sink, "location", path.c_str(), NULL);
gst_bin_add_many (GST_BIN (pipeline), sink, NULL);
if (gst_element_link_many(tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
return;
}
This saves the video ALMOST ok (VLC does not display correct lenght. But when I see the video file properties via Nautilus in Ubuntu the correct lenght is displayed and the video is playable). It does not save the pictures.
OK, so here's how I solved it: my initial pipeline is split with tee element into two sinks: the original sink that saves the video and appsink. In the callback functuion for the appsink I create new pipeline and push the frame any time I want to save the image. Basically:
...
int saveSampleFromAppsinkJpeg( GstSample *sample){
if (!shouldSaveImage) {
return -2;
}
if (capturing){
return -3;
}
std::thread([=]{
capturing = true;
GstStateChangeReturn ret;
GstElement *appsrc = gst_element_factory_make ("appsrc", "appsrc");
GstElement *sink = gst_element_factory_make ("multifilesink", "sink");
g_object_set (sink, "location", "some/path", NULL);
GstElement *pipeline_img = gst_pipeline_new ("pipeline_img");
if (!pipeline_img || !appsrc || !sink) {
g_printerr ("Not all elements could be created.\n");
capturing = false;
return -1;
}
gst_app_src_set_caps(GST_APP_SRC(appsrc), caps);
gst_app_src_set_duration(GST_APP_SRC(appsrc), GST_TIME_AS_MSECONDS(80)); // TODO 80
gst_app_src_set_stream_type(GST_APP_SRC(appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(appsrc), -1, 0);
gst_bin_add_many (GST_BIN (pipeline_img), appsrc, sink, NULL);
if (gst_element_link_many(appsrc, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
ret = gst_element_set_state (pipeline_img, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
//push the image in the pipeline
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_push_sample(GST_APP_SRC(appsrc), sample);
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("Sample for saving image not pushed.\n");
status = gst_app_src_end_of_stream(GST_APP_SRC(appsrc));
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("EOS for saving image not pushed.\n");
//end the pipeline
usleep(500000); // Important
GstMessage *message = gst_message_new_eos(&pipeline_img->object);
gst_bus_post(pipeline_img->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_set_state (pipeline_img, GST_STATE_PAUSED);
gst_element_set_state (pipeline_img, GST_STATE_NULL);
gst_object_unref (pipeline_img);
shouldSaveImage = false;
capturing = false;
return 1;
}).detach();
return 1;
}
static GstFlowReturn new_sample_jpeg(GstElement * elt)
{
GstSample *sample;
GstBuffer *buffer;
GstMemory *memory;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
buffer = gst_sample_get_buffer (sample);
if (buffer != NULL) {
memory = gst_buffer_get_memory (buffer, 0);
if (memory != NULL) {
//now all data are image data. If image wanted->image save!
if (wantToSave) saveSampleFromAppsinkJpeg(sample);
}
...
}
}
void startVideo(){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink, *queue_rcr, *queue_app, *appsink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", "/dev/video1", NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
queue_rcr = gst_element_factory_make ("queue", "record_queue");
queue_app = gst_element_factory_make ("queue", "app_queue");
appsink = gst_element_factory_make("appsink", "appsink");
g_object_set (sink, "location", path.toStdString().c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink || !queue_rcr || !appsink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink,queue_rcr, appsink, queue_app, NULL);
if (gst_element_link_filtered(source, tee, caps) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_rcr, muxer, sink, NULL) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_app, appsink, NULL) != TRUE) {
//failhandling
}
gst_app_sink_set_emit_signals(GST_APP_SINK(appsink), true);
g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample_jpeg));
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
//failhandling
}
// Start playing
recording = true;
return;
}

Dynamically link audio source elemnt into gstreamer audiomixer element

I am trying to mix internal audio and microphone audio using gstreamer audiomixer element, and then mux the single stream with video data, so far I can do it only when the soundcard is already active.
I am using Waspisrc , waspisrc loopback=true property.
What I mean is my code works when there is already some song is being played in the computer and then i start my code, it works.
What i want to acheive is , internal sound src can link with audiomixer element dynamically , and it just gives me error, the program crashes , what i did so far is put the soundcard source elemnt in another bin than the main pipeline, and add a data probe in the wasapisrc element, when there is sound from audio i try to link the source with queue and then audiomixer in the main pipeline.
Any help how can i acheive dynamically link and unlink src element into audiomixer?
my code is below:
#include <gst/gst.h>
//#include "pch.h"
#include <windows.h>
#include <stdio.h>
GMainLoop* mainLoop;
GstElement *mainPipeline;
GstPadLinkReturn link_to_mixer(GstPad* binPad, GstElement* mix);
GstPad* retrieve_ghost_pad(GstElement* bin, GstElement* elem);
typedef struct _elemStruct
{
GstElement *micSource, *micSourceQueue, *soundCardSrc, *soundCardSrcQueue, *micSrcRate, *micRateQueue, *soundCardRate, *soundCardRateQueue, *audioMixer, *audioMixerQueue;
GstElement* audioConverter, *audioConverterQueue, *audioEncoder, *audioEncoderQueue, *avMuxer, *gdiGrabber, *videoConverter, *x264encoder;
GstElement* muxerQueue, *fileSinker, *gdiGrabberQueue, *videoConverterQueue, *x264encoderQueue;
GstCaps *caps;
GstElement* message;
GstStateChangeReturn stateRet;
GstElement *micBin, *soundCardBin, *screenBin, *audioBin;
GstPad *micMixPad, *soundCardMixPad, *audioMuxPad, *videoMuxPad;
GstBus* mainBus;
GstStateChangeReturn ret;
GstMessage* msg;
guint bus_watch_id;
GstElement* soundCardTempSink;
}elemStruct;
BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
{
switch (fdwCtrlType)
{
// Handle the CTRL-C signal.
case CTRL_C_EVENT:
printf("Ctrl-C event\n\n");
Beep(750, 300);
return TRUE;
// CTRL-CLOSE: confirm that the user wants to exit.
case CTRL_CLOSE_EVENT:
Beep(600, 200);
printf("Ctrl-Close event\n\n");
return TRUE;
// Pass other signals to the next handler.
case CTRL_BREAK_EVENT:
Beep(900, 200);
printf("Ctrl-Break event\n\n");
return FALSE;
case CTRL_LOGOFF_EVENT:
Beep(1000, 200);
printf("Ctrl-Logoff event\n\n");
return FALSE;
case CTRL_SHUTDOWN_EVENT:
Beep(750, 500);
printf("Ctrl-Shutdown event\n\n");
return FALSE;
default:
return FALSE;
}
}
void addsoundsrc_toMainline(GstPadProbeInfo* info, GstElement* bin)
{
// we got data , add pipeline to audiomixer
// add bin to audiomixer
// get bin src pad
// call retrieve ghostsrc function
//retrieve_ghost_pad()
GstElement* queue = gst_bin_get_by_name(GST_BIN(bin), "sound_card_source_queue");
GstPad* mixpad = retrieve_ghost_pad(bin, queue);
//link_to_mixer(mixpad, )
}
GstPadProbeReturn soundCardProbe(GstPad* pad, GstPadProbeInfo* info, gpointer data)
{
//GstBuffer* buffer = gst_pad_probe_info_get_buffer(info);
GstBuffer* buffer = GST_PAD_PROBE_INFO_BUFFER(info);
elemStruct* mainElem = (elemStruct*)data;
g_print("received data in the soundcard probe ");
//GstElement* bin = mainElem->soundCardBin;
//bool add = gst_bin_add(GST_BIN(mainElem->audioBin), mainElem->soundCardBin);
//gst_element_sync_state_with_parent(mainElem->soundCardBin);
//GstElement* queue = gst_bin_get_by_name((GST_BIN(bin)), "sound_card_source_queue");
//GstPad* mixpad = retrieve_ghost_pad(bin, mainElem->soundCardSrcQueue);
//GstPad* mixPad = gst_element_get_static_pad(mainElem->soundCardSrcQueue, "sink");
//link_to_mixer(mixPad, mainElem->audioMixer);
//addsoundsrc_toMainline(info, bin);
return GST_PAD_PROBE_PASS;
}
void set_queue_property(GstElement* _queue)
{
g_object_set(G_OBJECT(_queue), "max-size-buffers", 1000, "max-size-time", 1000000000000, NULL);
}
GstPadLinkReturn link_to_mixer(GstPad* binPad, GstElement* mix)
{
GstPad* mixerPad;
gchar* binPadName, *mixerPadName;
mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
//mixerPad = gst_element_get_request_pad(mix, "sink_%u");
binPadName = gst_pad_get_name(binPad);
mixerPadName = gst_pad_get_name(mixerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, mixerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, mixerPadName);
g_free(binPadName);
g_free(mixerPadName);
//gst_object_unref(binPad);
gst_object_unref(mixerPad);
//gst_element_release_request_pad(mix, mixerPad);
return retVal;
}
GstPadLinkReturn audio_link_to_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
//mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
muxerPad = gst_element_get_request_pad(mix, "audio_%u");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
//gst_object_unref(mixerPad);
gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPadLinkReturn video_link_to_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
//mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
muxerPad = gst_element_get_request_pad(mix, "video_%u");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
//gst_object_unref(mixerPad);
gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPadLinkReturn link_to_mpeg_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
muxerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
//muxerPad = gst_element_get_request_pad(mix, "sink_%d");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
gst_object_unref(muxerPad);
//gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPad* retrieve_ghost_pad(GstElement* bin, GstElement* elem)
{
GstPad* elemPad = gst_element_get_static_pad(elem, "src");
GstPad* ghost = gst_ghost_pad_new("ghostsrc", elemPad);
gst_element_add_pad(bin, ghost);
gst_object_unref(elemPad);
return ghost;
}
static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
{
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
}
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
case GST_MESSAGE_STATE_CHANGED:
{
GstState old_state, new_state;
gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
g_print("Element %s changed state from %s to %s.\n",
GST_OBJECT_NAME(msg->src),
gst_element_state_get_name(old_state),
gst_element_state_get_name(new_state));
//if (new_state == GST_STATE_PAUSED)
//{
// gst_element_set_state(mainPipeline, GST_STATE_NULL);
//}
break;
}
break;
default:
break;
}
return TRUE;
}
int main(int argc, char** argv)
{
//gst - launch - 1.0.exe wasapisrc loopback = true\
// ! audiorate ! queue ! mix. wasapisrc low-latency=true \
// ! audiorate ! queue ! mix. audiomixer name=mix ! queue ! audioconvert \
// ! queue ! avenc_aac ! queue ! muxer. gdiscreencapsrc ! videoconvert \
// ! x264enc ! mpegtsmux name = muxer !queue ! filesink location=muxin.mp4 sync=false
elemStruct* mainStruct = new elemStruct();;
if (!gst_init_check(&argc, &argv, NULL))
{
g_printerr("couldn't initialize gstreamer\n");
return -1;
}
mainLoop = g_main_loop_new(NULL, FALSE);
if ((mainPipeline = gst_pipeline_new("main_pipeline")) == NULL)
{
}
mainStruct->micSource = gst_element_factory_make("wasapisrc", "mic_source");
mainStruct->soundCardSrc = gst_element_factory_make("wasapisrc", "sound_card_source");
mainStruct->gdiGrabber = gst_element_factory_make("dx9screencapsrc", "dx9_screen_capture_source");
mainStruct->micSourceQueue = gst_element_factory_make("queue", "mic_source_queue_elem");
mainStruct->soundCardSrcQueue = gst_element_factory_make("queue", "sound_card_source_queue");
mainStruct->micSrcRate = gst_element_factory_make("audiorate", "mic_audio_rate_elem");
mainStruct->soundCardRate = gst_element_factory_make("audiorate", "soundCard_audiorate_elem");
mainStruct->micRateQueue = gst_element_factory_make("queue", "mic_audiorate_queue");
mainStruct->soundCardRateQueue = gst_element_factory_make("queue", "soundCard_audiorate_queue");
mainStruct->audioMixer = gst_element_factory_make("audiomixer", "audio_mixer_elem");
mainStruct->audioMixerQueue = gst_element_factory_make("queue", "audio_mixer_queue_elem");
mainStruct->soundCardTempSink = gst_element_factory_make("autoaudiosink", "soundcard_temp_sink_elem");
mainStruct->audioEncoder = gst_element_factory_make("avenc_aac", "audio_encoder_elem");
mainStruct->audioEncoderQueue = gst_element_factory_make("queue", "audio_encoder_queue_elem");
mainStruct->audioConverter = gst_element_factory_make("audioconvert", "audio_convert_elem");
mainStruct->audioConverterQueue = gst_element_factory_make("queue", "audio_convert_queue_elem");
mainStruct->gdiGrabberQueue = gst_element_factory_make("queue", "gdi_grabber_queue_elem");
mainStruct->gdiGrabber = gst_element_factory_make("dx9screencapsrc", "gdi_grabber_elem");
mainStruct->videoConverterQueue = gst_element_factory_make("queue", "videoconvert_queue_elem");
mainStruct->x264encoderQueue = gst_element_factory_make("queue", "x264encoder_queue_elem");
mainStruct->videoConverter = gst_element_factory_make("videoconvert", "videoconvert_elem");
mainStruct->x264encoder = gst_element_factory_make("x264enc", "x264enc_elem");
mainStruct->avMuxer = gst_element_factory_make("mpegtsmux", "mp4_muxer_elem");
//if ((avMuxer = gst_element_factory_make("mpegtsmux", "mp4_muxer_elem")) == NULL)
mainStruct->fileSinker = gst_element_factory_make("filesink", "filesink_elem");
// set up all the sources
g_object_set(G_OBJECT(mainStruct->micSource), "do-timestamp", true, NULL);
g_object_set(G_OBJECT(mainStruct->soundCardSrc), "do-timestamp", true, "loopback", true, NULL);
g_object_set(G_OBJECT(mainStruct->gdiGrabber), "do-timestamp", true, "cursor", true, NULL);
g_object_set(G_OBJECT(mainStruct->x264encoder), "pass", 17, NULL);
g_object_set(G_OBJECT(mainStruct->fileSinker), "location", "sani_1486.mp4", "sync", false, NULL);
// set up all the queues
set_queue_property(mainStruct->micSourceQueue);
set_queue_property(mainStruct->soundCardSrcQueue);
set_queue_property(mainStruct->audioMixerQueue);
set_queue_property(mainStruct->audioEncoderQueue);
set_queue_property(mainStruct->gdiGrabberQueue);
set_queue_property(mainStruct->videoConverterQueue);
set_queue_property(mainStruct->x264encoderQueue);
// add the src elements to each src bin
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->micSource, mainStruct->micSourceQueue, NULL);
mainStruct->soundCardBin = gst_bin_new("sound_card_bin");
gst_bin_add_many(GST_BIN(mainStruct->soundCardBin), mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue, NULL);
gst_element_link_many(mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue,NULL);
GstPad* soundSourceprober = gst_element_get_static_pad(mainStruct->soundCardSrc, "src");
gst_pad_add_probe(soundSourceprober, GST_PAD_PROBE_TYPE_BUFFER, soundCardProbe, &mainStruct, NULL);
gst_element_set_state(mainStruct->soundCardBin, GST_STATE_PLAYING);
// link elements in each source bin
gst_element_link(mainStruct->micSource, mainStruct->micSourceQueue);
//gst_element_link_many(mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue, NULL);
// put this two bin in audiobin, we will connect audiobin to screenBin later
gst_bin_add_many(GST_BIN(mainPipeline),mainStruct->audioMixer, mainStruct->audioMixerQueue, mainStruct->audioEncoder, mainStruct->audioEncoderQueue, NULL);
//GstStateChangeReturn ret = gst_element_set_state(mainStruct->soundCardSrc, GST_STATE_PLAYING);
//GstStateChangeReturn retu = gst_element_get_state(mainStruct->soundCardSrc);
mainStruct->micMixPad = gst_element_get_static_pad(mainStruct->micSourceQueue, "src");
link_to_mixer(mainStruct->micMixPad, mainStruct->audioMixer);
//mainStruct->soundCardMixPad = gst_element_get_static_pad(mainStruct->soundCardSrcQueue, "src");
//link_to_mixer(mainStruct->soundCardMixPad, mainStruct->audioMixer);
bool one_ = gst_element_link_many(mainStruct->audioMixer, mainStruct->audioMixerQueue, mainStruct->audioEncoder, mainStruct->audioEncoderQueue, NULL);
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->gdiGrabber, mainStruct->gdiGrabberQueue, mainStruct->videoConverterQueue, mainStruct->videoConverter, mainStruct->x264encoder, mainStruct->x264encoderQueue, NULL);
// so add this element , with main bin
gst_element_link_many(mainStruct->gdiGrabber, mainStruct->gdiGrabberQueue, mainStruct->videoConverter, mainStruct->videoConverterQueue, mainStruct->x264encoder, mainStruct->x264encoderQueue, NULL);
//link_to_mixer(videoMuxPad, avMuxer);
mainStruct->videoMuxPad = gst_element_get_static_pad(mainStruct->x264encoderQueue, "src");
mainStruct->audioMuxPad = gst_element_get_static_pad(mainStruct->audioEncoderQueue, "src");
// add all the bin and muxer and filesink to main pipeline bin
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->avMuxer, mainStruct->fileSinker, NULL);
link_to_mpeg_muxer(mainStruct->videoMuxPad, mainStruct->avMuxer);
link_to_mpeg_muxer(mainStruct->audioMuxPad, mainStruct->avMuxer);
gst_element_link(mainStruct->avMuxer, mainStruct->fileSinker);
//gst_element_link(videoMuxPad, avMuxer);
/* Start playing the pipeline */
mainStruct->ret = gst_element_set_state(mainPipeline, GST_STATE_PLAYING);
// TODO , deal with ret
mainStruct->mainBus = gst_element_get_bus(mainPipeline);
mainStruct->bus_watch_id = gst_bus_add_watch(mainStruct->mainBus, bus_call, mainLoop);
gst_object_unref(mainStruct->mainBus);
// msg = gst_bus_timed_pop_filtered(mainBus, GST_CLOCK_TIME_NONE, GstMessageType(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
g_main_loop_run(mainLoop);
gst_element_set_state(mainPipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(mainPipeline));
g_source_remove(mainStruct->bus_watch_id);
g_main_loop_unref(mainLoop);
//g_main_loop_quit(mainLoop);
return 0;
}

pad creation and some mistakes on gst_parse_launch

I am a newbie to gstreamer and I would like to know if we have to create source and sink pads for convert like video convert in a pipeline. I have a pipeline like this
gst-launch-1.0 v4l2src ! video/x-raw,format=YUY2 ! videoconvert ! xvimagesink
I am trying to create a simple c application to understand the creation of pads and would like to know if video convert has a source pad and sink pad too. I am creating a source and sink pad for the filter.
EDIT:
yeah well you see, I tried following the dynamic pipelines example and wrote the code below
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
g_print("In pad handler\n");
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
if(!g_str_has_prefix(new_pad_type, "video/x-raw,format=(string)YUY2"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link( data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_print("before signal connect\n");
gint id= g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
g_print("after signal connect with id = %d\n", id);
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
// gst_element_link(data.source, data.convert);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and it gave me an error
before signal connect
after signal connect with id = 1
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
(The above code works if I write gst_elements_link(data.source, data.convert) after the element link statement for convert and sink)
So I tried the normal way in which I just added and linked all the elements together and it began to work without use of the pads.
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *convert, *sink;
GstBus *bus;
GstMessage *msg;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
convert = gst_element_factory_make("nvvidconv", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source,convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
But, inorder to fully grasp the knowledge of pads, I wanted to execute simpler pipelines with pads.
I just don't fully understand the usage of pads and link them and all.
EDIT2:
Ultimately I want to write application for pipeline like this which works on command line perfectly well,
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
But as I don't understand the usage of both pads and bins, I am unable to implement them in my above pipeline. However, I tried this,
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include "gstnvdsmeta.h"
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
"Roadsign"
};
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
GstBuffer *buf=(GstBuffer *)info->data;
guint num_rects =0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
vehicle_count++;
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
}
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
g_print ("Frame Number = %d Number of objects = %d "
"Vehicle Count = %d Person Count = %d\n",
frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *filter1, *convert,*filter2, *filter3, *vidconv, *filter4, *mux, *infer, *tiler, *osd, *transform , *sink, *bin, *convert2 , *vidconv2;
GMainLoop *loop;
GstCaps *caps1, *caps2, *caps3, *caps4;
GstPad *osd_sink_pad =NULL, *srcpad, *sinkpad;
loop = g_main_loop_new(NULL,FALSE);
gst_init(&argc, &argv);
pipeline = gst_pipeline_new("nv_pipeline");
gchar *string1 = "video/x-raw(memory:NVMM),format=(string)NV12";
source = gst_element_factory_make("v4l2src", "source");
filter1 = gst_element_factory_make("capsfilter", "filter1");
convert = gst_element_factory_make("nvvidconv", "convert");
filter2 = gst_element_factory_make("capsfilter", "filter2");
filter3 = gst_element_factory_make("capsfilter", "filter3");
filter4 = gst_element_factory_make("capsfilter", "filter4");
vidconv = gst_element_factory_make("nvvideoconvert", "vidconv");
mux = gst_element_factory_make("nvstreammux", "mux");
infer = gst_element_factory_make("nvinfer", "infer");
tiler = gst_element_factory_make("nvmultistreamtiler", "tiler");
osd = gst_element_factory_make("nvosd", "osd");
transform = gst_element_factory_make("nvegltransform", "transform");
sink = gst_element_factory_make("nveglglessink", "sink");
convert2 = gst_element_factory_make("nvvidconv", "convert2");
vidconv2 = gst_element_factory_make("nvvideoconvert", "vidconv2");
gst_bin_add_many(GST_BIN(pipeline), source,filter1,convert,filter2, convert2,filter3,vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
gst_element_link_many(source,filter1,convert,filter2, convert2,filter3, vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
osd_sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
caps1 = gst_caps_new_simple("video/x-raw", "format",G_TYPE_STRING,"YUY2",NULL);
caps2 = gst_caps_from_string(string1);
caps3 = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", NULL);
caps4 = gst_caps_from_string(string1);
g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
g_object_set(G_OBJECT(mux), "live-source", 1, "name", "mux", "batch-size", 1, "width", 1280, "height", 720, NULL);
g_object_set(G_OBJECT(infer), "config-file-path","/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt",NULL);
g_object_set(G_OBJECT(infer), "batch-size", 1, NULL);
g_object_set(G_OBJECT(tiler), "rows", 1, "columns", 1, "width", 1280, "height", 720, NULL);
gst_caps_unref(caps1);
gst_caps_unref(caps2);
gst_caps_unref(caps3);
gst_caps_unref(caps4);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running ...\n");
g_main_loop_run(loop);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
which gives the exact same output as the command line gst-launch-1.0 like,
(deep_pads:15648): GLib-GObject-WARNING **: 11:29:18.761: cannot register existing type 'GstInterpolationMethod'
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: g_param_spec_enum: assertion 'G_TYPE_IS_ENUM (enum_type)' failed
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: validate_pspec_to_install: assertion 'G_IS_PARAM_SPEC (pspec)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_pad_add_probe: assertion 'GST_IS_PAD (pad)' failed
0:00:00.843318172 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:20.693301580 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_int8.engine
except It doesn't display an output window from the above c application and doesn't display the rest.
Your first example fails because data.source element is actually never linked to data.convert element. Since both elements have static pads you need to "manually" create them and link them before setting pipeline to GST_STATE_PLAYING:
GstPad *source_pad = gst_element_get_static_pad(data.source, "src");
GstPad *sink_pad = gst_element_get_static_pad(data.convert, "sink");
ret = gst_pad_link(source_pad, sink_pad);
You probably expected that static source pad of your data.source element will somehow be automatically created so you registered
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
But, as you can see from your debug, pad_added_handler was never called because g_signal_connect can be registered and will be called for elements that have dynamic pads. For example, demultiplexer tsdemux will dynamically create its source pads during discovery of elementary streams so, in that case, registration of pad-added callback would be necessary.
The first step in your learning curve would be to understand fundamental differences between "static" (mandatory, always exists, "manually" created), dynamic (exists sometimes, automatically created by element) and request (optional, "manually" created when needed) gstreamer pads. After that everything will be much easier for you.

Timestamping error/or comptuer too slow with gstreamer/gstbasesink in Qt

I am building a simple video player in Qt using gstreamer-1.0. When I run it from Qt, or .exe in my pc, everything runs and works ok. But when I try it from another pc, it plays for some seconds that it skips some seconds/minutes and so on. I guess the problem is with sync, I have tried setting d3dvidesink property: sync=false, but is the same. I have read many similiar threads but none seems to help.
A lot of buffers are being dropped.
Additional debug info:
gstbasesink.c(2846): gst_base_sink_is_too_late ():
There may be a timestamping problem, or this computer is too slow.
I have tried setting different properties, but none helped. I have seen the following threads, but still the same problem:
Thread 1
Thread 2
Thread 3
On Thread 3 there is a suggestion setting "do-timestamp" property on appsrc to TRUE, but I use uridecodebin as source that has not a "do-timestamp" property.
My pipeline is as follows:
uridecodebin ! audioconvert ! volume ! autoaudiosink ! videoconvert ! gamma ! d3dvideosink
Thanks in Advance!
Here is some code from the elements creation/linking. Please comment if you need any other code.
// Create the elements
data.source = gst_element_factory_make ( "uridecodebin", "source" );
data.audio_convert = gst_element_factory_make ( "audioconvert", "audio_convert" );
data.volume = gst_element_factory_make ( "volume", "volume");
data.audio_sink = gst_element_factory_make ( "autoaudiosink", "audio_sink" );
data.video_convert = gst_element_factory_make ( "videoconvert", "video_convert" );
data.filter = gst_element_factory_make ( "gamma", "filter");
data.video_sink = gst_element_factory_make ( "d3dvideosink", "video_sink" );
// Create the empty pipeline
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.audio_convert || !data.volume || !data.audio_sink
|| !data.video_convert || !data.filter || !data.video_sink ) {
g_printerr ("Not all elements could be created.\n");}
return ;
}
// Build the pipeline. Note that we are NOT linking the source at this point. We will do it later.
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.audio_convert , data.volume, data.audio_sink,
data.video_convert, data.filter, data.video_sink, NULL);
if (!gst_element_link (data.audio_convert, data.volume)) {
g_printerr ("Elements AUDIO_CONVERT - VOLUME could not be linked.\n");
gst_object_unref (data.pipeline);
return ;
}
if (!gst_element_link (data.volume, data.audio_sink)) {
g_printerr ("Elements VOLUME - AUDIO_SINK could not be linked.\n");
gst_object_unref (data.pipeline);
return ;
}
if (!gst_element_link(data.video_convert, data.filter)) {
g_printerr("Elements VIDEO_CONVERT - FILTER could not be linked.\n");
gst_object_unref(data.pipeline);
return ;
}
if (!gst_element_link(data.filter, data.video_sink)) {
g_printerr("Elements FILTER - VIDEO_SINK could not be linked.\n");
gst_object_unref(data.pipeline);
return ;
}
When I open video:
// Set the URI to play
QString filePath = "file:///"+filename;
QByteArray ba = filePath.toLatin1();
const char *c_filePath = ba.data();
ret = gst_element_set_state (data.pipeline, GST_STATE_NULL);
gint64 max_lateness = 2000000; //2 milli sec
g_object_set (data.source, "uri", c_filePath, NULL);
// I have tried setting the following properties, but none helped
// g_object_set (data.source, "do-timestamp", true, NULL);
// g_object_set( data.video_sink, "sync", false, NULL);
// g_object_set( data.video_sink, "max-lateness", max_lateness, NULL);
qDebug() << &c_filePath;
// Link video_sink with playingWidget->winId()
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (data.video_sink), xwinid);
// Connect to the pad-added signal
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data) ;
// Start playing
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
// Exit application
QTimer::singleShot(0, QApplication::activeWindow(), SLOT(quit()));}
data.playing = TRUE;
data.rate = 1.0;
// Iterate - gets the position and length every 200 msec
g_print ("Running...\n");
emit setMsg( "Running...\n" );
currFileName = filename;
timer->start(500);
Pad_added_handler:
void gst_pipeline::pad_added_handler(GstElement *src, GstPad *new_pad, CustomData *data)
{
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
GstPad *sink_pad_audio = gst_element_get_static_pad (data->audio_queue, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->video_queue, "sink");
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
// If our audio converter is already linked, we have nothing to do here
if (gst_pad_is_linked (sink_pad_audio))
{
g_print (" We have already linked sink_pad_audio. Ignoring.\n");
// goto exit;
}
// If our video converter is already linked, we have nothing to do here
if (gst_pad_is_linked (sink_pad_video))
{
g_print (" We have already linked sink_pad_video. Ignoring.\n");
// goto exit;
}
// Check the new pad's type
new_pad_caps = gst_pad_get_current_caps (new_pad); //gst_pad_get_caps
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (g_str_has_prefix (new_pad_type, "audio/x-raw"))
{
// Attempt the link
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret))
{ g_print (" Type is '%s' but link failed.\n", new_pad_type); }
else
{ g_print (" Link succeeded (type '%s').\n", new_pad_type); }
}
else if (g_str_has_prefix (new_pad_type, "video/x-raw"))
{
// Attempt the link
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret))
{ g_print (" Type is '%s' but link failed.\n", new_pad_type); }
else
{ g_print (" Link succeeded (type '%s').\n", new_pad_type); }
}
else
{
g_print (" It has type '%s' which is not audio/x-raw OR video/x-raw. Ignoring.\n", new_pad_type);
goto exit;
}
exit:
// Unreference the new pad's caps, if we got them
if (new_pad_caps != NULL)
{ gst_caps_unref (new_pad_caps); g_print("EXIT"); msg_STRING2 += "EXIT\n" ; }
// Unreference the sink pad
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}