Dynamically link audio source elemnt into gstreamer audiomixer element - c++

I am trying to mix internal audio and microphone audio using gstreamer audiomixer element, and then mux the single stream with video data, so far I can do it only when the soundcard is already active.
I am using Waspisrc , waspisrc loopback=true property.
What I mean is my code works when there is already some song is being played in the computer and then i start my code, it works.
What i want to acheive is , internal sound src can link with audiomixer element dynamically , and it just gives me error, the program crashes , what i did so far is put the soundcard source elemnt in another bin than the main pipeline, and add a data probe in the wasapisrc element, when there is sound from audio i try to link the source with queue and then audiomixer in the main pipeline.
Any help how can i acheive dynamically link and unlink src element into audiomixer?
my code is below:
#include <gst/gst.h>
//#include "pch.h"
#include <windows.h>
#include <stdio.h>
GMainLoop* mainLoop;
GstElement *mainPipeline;
GstPadLinkReturn link_to_mixer(GstPad* binPad, GstElement* mix);
GstPad* retrieve_ghost_pad(GstElement* bin, GstElement* elem);
typedef struct _elemStruct
{
GstElement *micSource, *micSourceQueue, *soundCardSrc, *soundCardSrcQueue, *micSrcRate, *micRateQueue, *soundCardRate, *soundCardRateQueue, *audioMixer, *audioMixerQueue;
GstElement* audioConverter, *audioConverterQueue, *audioEncoder, *audioEncoderQueue, *avMuxer, *gdiGrabber, *videoConverter, *x264encoder;
GstElement* muxerQueue, *fileSinker, *gdiGrabberQueue, *videoConverterQueue, *x264encoderQueue;
GstCaps *caps;
GstElement* message;
GstStateChangeReturn stateRet;
GstElement *micBin, *soundCardBin, *screenBin, *audioBin;
GstPad *micMixPad, *soundCardMixPad, *audioMuxPad, *videoMuxPad;
GstBus* mainBus;
GstStateChangeReturn ret;
GstMessage* msg;
guint bus_watch_id;
GstElement* soundCardTempSink;
}elemStruct;
BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
{
switch (fdwCtrlType)
{
// Handle the CTRL-C signal.
case CTRL_C_EVENT:
printf("Ctrl-C event\n\n");
Beep(750, 300);
return TRUE;
// CTRL-CLOSE: confirm that the user wants to exit.
case CTRL_CLOSE_EVENT:
Beep(600, 200);
printf("Ctrl-Close event\n\n");
return TRUE;
// Pass other signals to the next handler.
case CTRL_BREAK_EVENT:
Beep(900, 200);
printf("Ctrl-Break event\n\n");
return FALSE;
case CTRL_LOGOFF_EVENT:
Beep(1000, 200);
printf("Ctrl-Logoff event\n\n");
return FALSE;
case CTRL_SHUTDOWN_EVENT:
Beep(750, 500);
printf("Ctrl-Shutdown event\n\n");
return FALSE;
default:
return FALSE;
}
}
void addsoundsrc_toMainline(GstPadProbeInfo* info, GstElement* bin)
{
// we got data , add pipeline to audiomixer
// add bin to audiomixer
// get bin src pad
// call retrieve ghostsrc function
//retrieve_ghost_pad()
GstElement* queue = gst_bin_get_by_name(GST_BIN(bin), "sound_card_source_queue");
GstPad* mixpad = retrieve_ghost_pad(bin, queue);
//link_to_mixer(mixpad, )
}
GstPadProbeReturn soundCardProbe(GstPad* pad, GstPadProbeInfo* info, gpointer data)
{
//GstBuffer* buffer = gst_pad_probe_info_get_buffer(info);
GstBuffer* buffer = GST_PAD_PROBE_INFO_BUFFER(info);
elemStruct* mainElem = (elemStruct*)data;
g_print("received data in the soundcard probe ");
//GstElement* bin = mainElem->soundCardBin;
//bool add = gst_bin_add(GST_BIN(mainElem->audioBin), mainElem->soundCardBin);
//gst_element_sync_state_with_parent(mainElem->soundCardBin);
//GstElement* queue = gst_bin_get_by_name((GST_BIN(bin)), "sound_card_source_queue");
//GstPad* mixpad = retrieve_ghost_pad(bin, mainElem->soundCardSrcQueue);
//GstPad* mixPad = gst_element_get_static_pad(mainElem->soundCardSrcQueue, "sink");
//link_to_mixer(mixPad, mainElem->audioMixer);
//addsoundsrc_toMainline(info, bin);
return GST_PAD_PROBE_PASS;
}
void set_queue_property(GstElement* _queue)
{
g_object_set(G_OBJECT(_queue), "max-size-buffers", 1000, "max-size-time", 1000000000000, NULL);
}
GstPadLinkReturn link_to_mixer(GstPad* binPad, GstElement* mix)
{
GstPad* mixerPad;
gchar* binPadName, *mixerPadName;
mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
//mixerPad = gst_element_get_request_pad(mix, "sink_%u");
binPadName = gst_pad_get_name(binPad);
mixerPadName = gst_pad_get_name(mixerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, mixerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, mixerPadName);
g_free(binPadName);
g_free(mixerPadName);
//gst_object_unref(binPad);
gst_object_unref(mixerPad);
//gst_element_release_request_pad(mix, mixerPad);
return retVal;
}
GstPadLinkReturn audio_link_to_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
//mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
muxerPad = gst_element_get_request_pad(mix, "audio_%u");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
//gst_object_unref(mixerPad);
gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPadLinkReturn video_link_to_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
//mixerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
muxerPad = gst_element_get_request_pad(mix, "video_%u");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
//gst_object_unref(mixerPad);
gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPadLinkReturn link_to_mpeg_muxer(GstPad* binPad, GstElement* mix)
{
GstPad* muxerPad;
gchar* binPadName, *muxerPadName;
muxerPad = gst_element_get_compatible_pad(mix, binPad, NULL);
//muxerPad = gst_element_get_request_pad(mix, "sink_%d");
binPadName = gst_pad_get_name(binPad);
muxerPadName = gst_pad_get_name(muxerPad);
GstPadLinkReturn retVal = gst_pad_link(binPad, muxerPad); // check if succesfull;
g_print(" a new link is creatd with %s and %s pads\n", binPadName, muxerPadName);
g_free(binPadName);
g_free(muxerPadName);
//gst_object_unref(binPad);
gst_object_unref(muxerPad);
//gst_element_release_request_pad(mix, muxerPad);
return retVal;
}
GstPad* retrieve_ghost_pad(GstElement* bin, GstElement* elem)
{
GstPad* elemPad = gst_element_get_static_pad(elem, "src");
GstPad* ghost = gst_ghost_pad_new("ghostsrc", elemPad);
gst_element_add_pad(bin, ghost);
gst_object_unref(elemPad);
return ghost;
}
static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
{
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
}
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
case GST_MESSAGE_STATE_CHANGED:
{
GstState old_state, new_state;
gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
g_print("Element %s changed state from %s to %s.\n",
GST_OBJECT_NAME(msg->src),
gst_element_state_get_name(old_state),
gst_element_state_get_name(new_state));
//if (new_state == GST_STATE_PAUSED)
//{
// gst_element_set_state(mainPipeline, GST_STATE_NULL);
//}
break;
}
break;
default:
break;
}
return TRUE;
}
int main(int argc, char** argv)
{
//gst - launch - 1.0.exe wasapisrc loopback = true\
// ! audiorate ! queue ! mix. wasapisrc low-latency=true \
// ! audiorate ! queue ! mix. audiomixer name=mix ! queue ! audioconvert \
// ! queue ! avenc_aac ! queue ! muxer. gdiscreencapsrc ! videoconvert \
// ! x264enc ! mpegtsmux name = muxer !queue ! filesink location=muxin.mp4 sync=false
elemStruct* mainStruct = new elemStruct();;
if (!gst_init_check(&argc, &argv, NULL))
{
g_printerr("couldn't initialize gstreamer\n");
return -1;
}
mainLoop = g_main_loop_new(NULL, FALSE);
if ((mainPipeline = gst_pipeline_new("main_pipeline")) == NULL)
{
}
mainStruct->micSource = gst_element_factory_make("wasapisrc", "mic_source");
mainStruct->soundCardSrc = gst_element_factory_make("wasapisrc", "sound_card_source");
mainStruct->gdiGrabber = gst_element_factory_make("dx9screencapsrc", "dx9_screen_capture_source");
mainStruct->micSourceQueue = gst_element_factory_make("queue", "mic_source_queue_elem");
mainStruct->soundCardSrcQueue = gst_element_factory_make("queue", "sound_card_source_queue");
mainStruct->micSrcRate = gst_element_factory_make("audiorate", "mic_audio_rate_elem");
mainStruct->soundCardRate = gst_element_factory_make("audiorate", "soundCard_audiorate_elem");
mainStruct->micRateQueue = gst_element_factory_make("queue", "mic_audiorate_queue");
mainStruct->soundCardRateQueue = gst_element_factory_make("queue", "soundCard_audiorate_queue");
mainStruct->audioMixer = gst_element_factory_make("audiomixer", "audio_mixer_elem");
mainStruct->audioMixerQueue = gst_element_factory_make("queue", "audio_mixer_queue_elem");
mainStruct->soundCardTempSink = gst_element_factory_make("autoaudiosink", "soundcard_temp_sink_elem");
mainStruct->audioEncoder = gst_element_factory_make("avenc_aac", "audio_encoder_elem");
mainStruct->audioEncoderQueue = gst_element_factory_make("queue", "audio_encoder_queue_elem");
mainStruct->audioConverter = gst_element_factory_make("audioconvert", "audio_convert_elem");
mainStruct->audioConverterQueue = gst_element_factory_make("queue", "audio_convert_queue_elem");
mainStruct->gdiGrabberQueue = gst_element_factory_make("queue", "gdi_grabber_queue_elem");
mainStruct->gdiGrabber = gst_element_factory_make("dx9screencapsrc", "gdi_grabber_elem");
mainStruct->videoConverterQueue = gst_element_factory_make("queue", "videoconvert_queue_elem");
mainStruct->x264encoderQueue = gst_element_factory_make("queue", "x264encoder_queue_elem");
mainStruct->videoConverter = gst_element_factory_make("videoconvert", "videoconvert_elem");
mainStruct->x264encoder = gst_element_factory_make("x264enc", "x264enc_elem");
mainStruct->avMuxer = gst_element_factory_make("mpegtsmux", "mp4_muxer_elem");
//if ((avMuxer = gst_element_factory_make("mpegtsmux", "mp4_muxer_elem")) == NULL)
mainStruct->fileSinker = gst_element_factory_make("filesink", "filesink_elem");
// set up all the sources
g_object_set(G_OBJECT(mainStruct->micSource), "do-timestamp", true, NULL);
g_object_set(G_OBJECT(mainStruct->soundCardSrc), "do-timestamp", true, "loopback", true, NULL);
g_object_set(G_OBJECT(mainStruct->gdiGrabber), "do-timestamp", true, "cursor", true, NULL);
g_object_set(G_OBJECT(mainStruct->x264encoder), "pass", 17, NULL);
g_object_set(G_OBJECT(mainStruct->fileSinker), "location", "sani_1486.mp4", "sync", false, NULL);
// set up all the queues
set_queue_property(mainStruct->micSourceQueue);
set_queue_property(mainStruct->soundCardSrcQueue);
set_queue_property(mainStruct->audioMixerQueue);
set_queue_property(mainStruct->audioEncoderQueue);
set_queue_property(mainStruct->gdiGrabberQueue);
set_queue_property(mainStruct->videoConverterQueue);
set_queue_property(mainStruct->x264encoderQueue);
// add the src elements to each src bin
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->micSource, mainStruct->micSourceQueue, NULL);
mainStruct->soundCardBin = gst_bin_new("sound_card_bin");
gst_bin_add_many(GST_BIN(mainStruct->soundCardBin), mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue, NULL);
gst_element_link_many(mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue,NULL);
GstPad* soundSourceprober = gst_element_get_static_pad(mainStruct->soundCardSrc, "src");
gst_pad_add_probe(soundSourceprober, GST_PAD_PROBE_TYPE_BUFFER, soundCardProbe, &mainStruct, NULL);
gst_element_set_state(mainStruct->soundCardBin, GST_STATE_PLAYING);
// link elements in each source bin
gst_element_link(mainStruct->micSource, mainStruct->micSourceQueue);
//gst_element_link_many(mainStruct->soundCardSrc, mainStruct->soundCardSrcQueue, NULL);
// put this two bin in audiobin, we will connect audiobin to screenBin later
gst_bin_add_many(GST_BIN(mainPipeline),mainStruct->audioMixer, mainStruct->audioMixerQueue, mainStruct->audioEncoder, mainStruct->audioEncoderQueue, NULL);
//GstStateChangeReturn ret = gst_element_set_state(mainStruct->soundCardSrc, GST_STATE_PLAYING);
//GstStateChangeReturn retu = gst_element_get_state(mainStruct->soundCardSrc);
mainStruct->micMixPad = gst_element_get_static_pad(mainStruct->micSourceQueue, "src");
link_to_mixer(mainStruct->micMixPad, mainStruct->audioMixer);
//mainStruct->soundCardMixPad = gst_element_get_static_pad(mainStruct->soundCardSrcQueue, "src");
//link_to_mixer(mainStruct->soundCardMixPad, mainStruct->audioMixer);
bool one_ = gst_element_link_many(mainStruct->audioMixer, mainStruct->audioMixerQueue, mainStruct->audioEncoder, mainStruct->audioEncoderQueue, NULL);
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->gdiGrabber, mainStruct->gdiGrabberQueue, mainStruct->videoConverterQueue, mainStruct->videoConverter, mainStruct->x264encoder, mainStruct->x264encoderQueue, NULL);
// so add this element , with main bin
gst_element_link_many(mainStruct->gdiGrabber, mainStruct->gdiGrabberQueue, mainStruct->videoConverter, mainStruct->videoConverterQueue, mainStruct->x264encoder, mainStruct->x264encoderQueue, NULL);
//link_to_mixer(videoMuxPad, avMuxer);
mainStruct->videoMuxPad = gst_element_get_static_pad(mainStruct->x264encoderQueue, "src");
mainStruct->audioMuxPad = gst_element_get_static_pad(mainStruct->audioEncoderQueue, "src");
// add all the bin and muxer and filesink to main pipeline bin
gst_bin_add_many(GST_BIN(mainPipeline), mainStruct->avMuxer, mainStruct->fileSinker, NULL);
link_to_mpeg_muxer(mainStruct->videoMuxPad, mainStruct->avMuxer);
link_to_mpeg_muxer(mainStruct->audioMuxPad, mainStruct->avMuxer);
gst_element_link(mainStruct->avMuxer, mainStruct->fileSinker);
//gst_element_link(videoMuxPad, avMuxer);
/* Start playing the pipeline */
mainStruct->ret = gst_element_set_state(mainPipeline, GST_STATE_PLAYING);
// TODO , deal with ret
mainStruct->mainBus = gst_element_get_bus(mainPipeline);
mainStruct->bus_watch_id = gst_bus_add_watch(mainStruct->mainBus, bus_call, mainLoop);
gst_object_unref(mainStruct->mainBus);
// msg = gst_bus_timed_pop_filtered(mainBus, GST_CLOCK_TIME_NONE, GstMessageType(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
g_main_loop_run(mainLoop);
gst_element_set_state(mainPipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(mainPipeline));
g_source_remove(mainStruct->bus_watch_id);
g_main_loop_unref(mainLoop);
//g_main_loop_quit(mainLoop);
return 0;
}

Related

Display no signal image if video sender is closed

If I close the sender in this example, the video displayed by the receiver freezes. Is there a way to display a static no signal image instead, for example an all blue image, and have the video return when the sender restarts?
Sender
gst-launch-1.0 videotestsrc ! video/x-raw,format=GRAY8 ! videoconvert ! x264enc pass=qual quantizer=20 tune=zerolatency ! rtph264pay ! udpsink host=127.0.0.1 port=5000
Receiver
gst-launch-1.0 udpsrc port=5000 ! application/x-rtp ! rtph264depay ! avdec_h264 ! videoconvert ! autovideosink
EDIT
This code seems to come close, but for some reason if I add in the videotestsrc by uncommenting the commented out lines, the udpsrc no longer calls the timeout callback:
// g++ gst_client.cpp `pkg-config --cflags gstreamer-1.0` `pkg-config --libs gstreamer-1.0`
#include <gst/gst.h>
#include <cstdlib>
#include <cstdio>
struct gstreamer_data {
GstElement* pipeline;
GstElement* no_signal_source;
GstElement* udp_source;
GstElement* rtp_decoder;
GstElement* video_decoder;
GstElement* input_selector;
GstElement* video_converter;
GstElement* video_sink;
gulong signal_handler_id;
GMainLoop* main_loop;
};
static void element_callback(GstBus* bus, GstMessage* message, gstreamer_data* data);
static GstPadProbeReturn have_data_callback(GstPad* pad, GstPadProbeInfo *info, gstreamer_data* user_data);
static GstPadProbeReturn have_data_callback(GstPad* pad, GstPadProbeInfo *info, gstreamer_data* user_data) {
GstBus* bus;
printf("have data\n");
bus = gst_element_get_bus(user_data->pipeline);
user_data->signal_handler_id = g_signal_connect(G_OBJECT(bus), "message::element", (GCallback) element_callback, user_data);
gst_object_unref(bus);
return GST_PAD_PROBE_REMOVE;
}
static void element_callback(GstBus* bus, GstMessage* message, gstreamer_data* data) {
const GstStructure* st = gst_message_get_structure(message);
GstPad* pad;
if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ELEMENT) {
if (gst_structure_has_name(st, "GstUDPSrcTimeout")) {
printf("Timeout received from udpsrc\n");
g_signal_handler_disconnect(G_OBJECT(bus), data->signal_handler_id);
pad = gst_element_get_static_pad(data->udp_source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) have_data_callback, data, NULL);
}
}
}
static void error_callback(GstBus* bus, GstMessage* message, gstreamer_data* data) {
(void) bus;
GError* err;
gchar* debug_info;
gst_message_parse_error(message, &err, &debug_info);
g_printerr("Error received from element %s: %s\n", GST_OBJECT_NAME(message->src), err->message);
g_printerr("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error(&err);
g_free(debug_info);
g_main_loop_quit(data->main_loop);
}
int main() {
gstreamer_data data;
GstStateChangeReturn ret;
GstBus* bus;
GstPad* pad;
gst_init(NULL, NULL);
data.no_signal_source = gst_element_factory_make("videotestsrc", "no_signal_source");
g_object_set(G_OBJECT(data.no_signal_source),
"pattern", 6,
NULL);
data.udp_source = gst_element_factory_make("udpsrc", "udp_source");
g_object_set(G_OBJECT(data.udp_source),
"port", 5000,
"caps", gst_caps_new_empty_simple("application/x-rtp"),
"timeout", 1000000000,
NULL);
data.rtp_decoder = gst_element_factory_make("rtph264depay", "rtp_decoder");
data.video_decoder = gst_element_factory_make("avdec_h264", "video_decoder");
data.input_selector = gst_element_factory_make("input-selector", "input_selector");
data.video_converter = gst_element_factory_make("videoconvert", "video_converter");
data.video_sink = gst_element_factory_make("autovideosink", "video_sink");
data.pipeline = gst_pipeline_new("pipeline");
if (
!data.pipeline ||
!data.no_signal_source ||
!data.udp_source ||
!data.rtp_decoder ||
!data.video_decoder ||
!data.input_selector ||
!data.video_converter ||
!data.video_sink
)
{
g_printerr("Not all elements could be created.\n");
exit(-1);
}
gst_bin_add_many(
GST_BIN(data.pipeline),
//data.no_signal_source,
data.udp_source,
data.rtp_decoder,
data.video_decoder,
data.input_selector,
data.video_converter,
data.video_sink,
NULL);
if (gst_element_link_many(
data.udp_source,
data.rtp_decoder,
data.video_decoder,
NULL) != TRUE)
{
g_printerr("Elements could not be linked.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
GstPad* src_1 = gst_element_get_static_pad(data.video_decoder, "src");
GstPad* sink_1 = gst_element_get_request_pad(data.input_selector, "sink_%u");
gst_pad_link(src_1, sink_1);
/*
GstPad* src_2 = gst_element_get_static_pad(data.no_signal_source, "src");
GstPad* sink_2 = gst_element_get_request_pad(data.input_selector, "sink_%u");
gst_pad_link(src_2, sink_2);
*/
g_object_set(G_OBJECT(data.input_selector),
"active-pad", sink_1,
NULL);
if (gst_element_link_many(
data.input_selector,
data.video_converter,
data.video_sink,
NULL) != TRUE)
{
g_printerr("Elements could not be linked.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
pad = gst_element_get_static_pad(data.udp_source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) have_data_callback, &data, NULL);
bus = gst_element_get_bus(data.pipeline);
gst_bus_add_signal_watch(bus);
data.signal_handler_id = g_signal_connect(G_OBJECT(bus), "message::error", (GCallback) error_callback, &data);
gst_object_unref(bus);
ret = gst_element_set_state(data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr("Unable to set the pipeline to the playing state.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
data.main_loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(data.main_loop);
return 0;
}
EDIT
This code seems fine until I uncomment the selection of the active pad in the callbacks. Do I need to do something before I change the active pad, like stop the pipeline?
// g++ gst_client.cpp `pkg-config --cflags gstreamer-1.0` `pkg-config --libs gstreamer-1.0`
#include <gst/gst.h>
#include <cstdlib>
#include <cstdio>
struct gstreamer_data {
GstElement* pipeline;
GstElement* video_source;
GstElement* udp_source;
GstElement* rtp_decoder;
GstElement* video_decoder;
GstElement* video_converter;
GstElement* input_selector;
GstPad* sink_1;
GstPad* sink_2;
GstElement* video_sink;
gulong signal_handler_id;
GMainLoop* main_loop;
};
static void element_callback(GstBus* bus, GstMessage* message, gstreamer_data* data);
static GstPadProbeReturn have_data_callback(GstPad* pad, GstPadProbeInfo *info, gstreamer_data* user_data);
static GstPadProbeReturn have_data_callback(GstPad* pad, GstPadProbeInfo *info, gstreamer_data* user_data) {
GstBus* bus;
printf("have data\n");
/*
g_object_set(G_OBJECT(user_data->input_selector),
"active-pad", user_data->sink_2,
NULL);
*/
bus = gst_element_get_bus(user_data->pipeline);
user_data->signal_handler_id = g_signal_connect(G_OBJECT(bus), "message::element", (GCallback) element_callback, user_data);
gst_object_unref(bus);
return GST_PAD_PROBE_REMOVE;
}
static void element_callback(GstBus* bus, GstMessage* message, gstreamer_data* data) {
const GstStructure* st = gst_message_get_structure(message);
GstPad* pad;
if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ELEMENT) {
if (gst_structure_has_name(st, "GstUDPSrcTimeout")) {
printf("no data\n");
/*
g_object_set(G_OBJECT(data->input_selector),
"active-pad", data->sink_1,
NULL);
*/
g_signal_handler_disconnect(G_OBJECT(bus), data->signal_handler_id);
pad = gst_element_get_static_pad(data->udp_source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) have_data_callback, data, NULL);
gst_object_unref(pad);
}
}
}
static void error_callback(GstBus* bus, GstMessage* message, gstreamer_data* data) {
(void) bus;
GError* err;
gchar* debug_info;
gst_message_parse_error(message, &err, &debug_info);
g_printerr("Error received from element %s: %s\n", GST_OBJECT_NAME(message->src), err->message);
g_printerr("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error(&err);
g_free(debug_info);
g_main_loop_quit(data->main_loop);
}
int main() {
gstreamer_data data;
GstStateChangeReturn ret;
GstBus* bus;
GstPad* pad;
gst_init(NULL, NULL);
data.video_source = gst_element_factory_make("videotestsrc", "video_source");
g_object_set(G_OBJECT(data.video_source),
"pattern", 6,
"is-live", true,
NULL);
data.udp_source = gst_element_factory_make("udpsrc", "udp_source");
g_object_set(G_OBJECT(data.udp_source),
"port", 5000,
"caps", gst_caps_new_empty_simple("application/x-rtp"),
"timeout", 1000000000,
NULL);
data.rtp_decoder = gst_element_factory_make("rtph264depay", "rtp_decoder");
data.video_decoder = gst_element_factory_make("avdec_h264", "video_decoder");
data.video_converter = gst_element_factory_make("videoconvert", "video_converter");
data.input_selector = gst_element_factory_make("input-selector", "input_selector");
data.video_sink = gst_element_factory_make("autovideosink", "video_sink");
data.pipeline = gst_pipeline_new("pipeline");
if (
!data.pipeline ||
!data.video_source ||
!data.udp_source ||
!data.rtp_decoder ||
!data.video_decoder ||
!data.video_converter ||
!data.input_selector ||
!data.video_sink
)
{
g_printerr("Not all elements could be created.\n");
exit(-1);
}
gst_bin_add_many(
GST_BIN(data.pipeline),
data.video_source,
data.udp_source,
data.rtp_decoder,
data.video_decoder,
data.video_converter,
data.input_selector,
data.video_sink,
NULL);
if (gst_element_link_many(
data.udp_source,
data.rtp_decoder,
data.video_decoder,
data.video_converter,
NULL) != TRUE)
{
g_printerr("Elements could not be linked.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
GstPad* src_1 = gst_element_get_static_pad(data.video_source, "src");
data.sink_1 = gst_element_get_request_pad(data.input_selector, "sink_%u");
gst_pad_link(src_1, data.sink_1);
gst_object_unref(src_1);
GstPad* src_2 = gst_element_get_static_pad(data.video_converter, "src");
data.sink_2 = gst_element_get_request_pad(data.input_selector, "sink_%u");
gst_pad_link(src_2, data.sink_2);
gst_object_unref(src_2);
if (gst_element_link_many(
data.input_selector,
data.video_sink,
NULL) != TRUE)
{
g_printerr("Elements could not be linked.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
pad = gst_element_get_static_pad(data.udp_source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) have_data_callback, &data, NULL);
gst_object_unref(pad);
bus = gst_element_get_bus(data.pipeline);
gst_bus_add_signal_watch(bus);
data.signal_handler_id = g_signal_connect(G_OBJECT(bus), "message::error", (GCallback) error_callback, &data);
gst_object_unref(bus);
ret = gst_element_set_state(data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr("Unable to set the pipeline to the playing state.\n");
gst_object_unref(data.pipeline);
exit(-1);
}
data.main_loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(data.main_loop);
return 0;
}
EDIT:
I seems fine all of a sudden. I don't understand. Is this suitable code? Can it be improved?
EDIT:
Setting a width and height on the videotestsrc in the sender seems to make it work. If I remove those, it breaks. Why?

pad creation and some mistakes on gst_parse_launch

I am a newbie to gstreamer and I would like to know if we have to create source and sink pads for convert like video convert in a pipeline. I have a pipeline like this
gst-launch-1.0 v4l2src ! video/x-raw,format=YUY2 ! videoconvert ! xvimagesink
I am trying to create a simple c application to understand the creation of pads and would like to know if video convert has a source pad and sink pad too. I am creating a source and sink pad for the filter.
EDIT:
yeah well you see, I tried following the dynamic pipelines example and wrote the code below
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
g_print("In pad handler\n");
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
if(!g_str_has_prefix(new_pad_type, "video/x-raw,format=(string)YUY2"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link( data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_print("before signal connect\n");
gint id= g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
g_print("after signal connect with id = %d\n", id);
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
// gst_element_link(data.source, data.convert);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and it gave me an error
before signal connect
after signal connect with id = 1
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
(The above code works if I write gst_elements_link(data.source, data.convert) after the element link statement for convert and sink)
So I tried the normal way in which I just added and linked all the elements together and it began to work without use of the pads.
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *convert, *sink;
GstBus *bus;
GstMessage *msg;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
convert = gst_element_factory_make("nvvidconv", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source,convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
But, inorder to fully grasp the knowledge of pads, I wanted to execute simpler pipelines with pads.
I just don't fully understand the usage of pads and link them and all.
EDIT2:
Ultimately I want to write application for pipeline like this which works on command line perfectly well,
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
But as I don't understand the usage of both pads and bins, I am unable to implement them in my above pipeline. However, I tried this,
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include "gstnvdsmeta.h"
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
"Roadsign"
};
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
GstBuffer *buf=(GstBuffer *)info->data;
guint num_rects =0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
vehicle_count++;
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
}
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
g_print ("Frame Number = %d Number of objects = %d "
"Vehicle Count = %d Person Count = %d\n",
frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *filter1, *convert,*filter2, *filter3, *vidconv, *filter4, *mux, *infer, *tiler, *osd, *transform , *sink, *bin, *convert2 , *vidconv2;
GMainLoop *loop;
GstCaps *caps1, *caps2, *caps3, *caps4;
GstPad *osd_sink_pad =NULL, *srcpad, *sinkpad;
loop = g_main_loop_new(NULL,FALSE);
gst_init(&argc, &argv);
pipeline = gst_pipeline_new("nv_pipeline");
gchar *string1 = "video/x-raw(memory:NVMM),format=(string)NV12";
source = gst_element_factory_make("v4l2src", "source");
filter1 = gst_element_factory_make("capsfilter", "filter1");
convert = gst_element_factory_make("nvvidconv", "convert");
filter2 = gst_element_factory_make("capsfilter", "filter2");
filter3 = gst_element_factory_make("capsfilter", "filter3");
filter4 = gst_element_factory_make("capsfilter", "filter4");
vidconv = gst_element_factory_make("nvvideoconvert", "vidconv");
mux = gst_element_factory_make("nvstreammux", "mux");
infer = gst_element_factory_make("nvinfer", "infer");
tiler = gst_element_factory_make("nvmultistreamtiler", "tiler");
osd = gst_element_factory_make("nvosd", "osd");
transform = gst_element_factory_make("nvegltransform", "transform");
sink = gst_element_factory_make("nveglglessink", "sink");
convert2 = gst_element_factory_make("nvvidconv", "convert2");
vidconv2 = gst_element_factory_make("nvvideoconvert", "vidconv2");
gst_bin_add_many(GST_BIN(pipeline), source,filter1,convert,filter2, convert2,filter3,vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
gst_element_link_many(source,filter1,convert,filter2, convert2,filter3, vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
osd_sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
caps1 = gst_caps_new_simple("video/x-raw", "format",G_TYPE_STRING,"YUY2",NULL);
caps2 = gst_caps_from_string(string1);
caps3 = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", NULL);
caps4 = gst_caps_from_string(string1);
g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
g_object_set(G_OBJECT(mux), "live-source", 1, "name", "mux", "batch-size", 1, "width", 1280, "height", 720, NULL);
g_object_set(G_OBJECT(infer), "config-file-path","/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt",NULL);
g_object_set(G_OBJECT(infer), "batch-size", 1, NULL);
g_object_set(G_OBJECT(tiler), "rows", 1, "columns", 1, "width", 1280, "height", 720, NULL);
gst_caps_unref(caps1);
gst_caps_unref(caps2);
gst_caps_unref(caps3);
gst_caps_unref(caps4);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running ...\n");
g_main_loop_run(loop);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
which gives the exact same output as the command line gst-launch-1.0 like,
(deep_pads:15648): GLib-GObject-WARNING **: 11:29:18.761: cannot register existing type 'GstInterpolationMethod'
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: g_param_spec_enum: assertion 'G_TYPE_IS_ENUM (enum_type)' failed
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: validate_pspec_to_install: assertion 'G_IS_PARAM_SPEC (pspec)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_pad_add_probe: assertion 'GST_IS_PAD (pad)' failed
0:00:00.843318172 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:20.693301580 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_int8.engine
except It doesn't display an output window from the above c application and doesn't display the rest.
Your first example fails because data.source element is actually never linked to data.convert element. Since both elements have static pads you need to "manually" create them and link them before setting pipeline to GST_STATE_PLAYING:
GstPad *source_pad = gst_element_get_static_pad(data.source, "src");
GstPad *sink_pad = gst_element_get_static_pad(data.convert, "sink");
ret = gst_pad_link(source_pad, sink_pad);
You probably expected that static source pad of your data.source element will somehow be automatically created so you registered
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
But, as you can see from your debug, pad_added_handler was never called because g_signal_connect can be registered and will be called for elements that have dynamic pads. For example, demultiplexer tsdemux will dynamically create its source pads during discovery of elementary streams so, in that case, registration of pad-added callback would be necessary.
The first step in your learning curve would be to understand fundamental differences between "static" (mandatory, always exists, "manually" created), dynamic (exists sometimes, automatically created by element) and request (optional, "manually" created when needed) gstreamer pads. After that everything will be much easier for you.

Gstreamer EOS message handling in filesink to change location on the fly

Trying to switch output files on the fly, but can't handle EOS.
http://gstreamer-devel.966125.n4.nabble.com/Dynamically-updating-filesink-location-at-run-time-on-the-fly-td4660569.html
Quote:
Assuming you have a pipeline that looks like this:
audiosrc --> encoder --> mux --> filesink
then you'll need to change it to:
audiosrc --> encoder --> queue --> muxsink_bin
where muxsink_bin is a bin
ghostpad --> mux --> filesink
then the procedure is:
1 - Block the queue srcpad using gst_pad_set_blocked_async()
2 - In the blocked callback:
2a - unlink muxsink_bin with gst_pad_unlink()
2b - send an EOS event to the muxsink_bin sink pad with gst_pad_send_event()
2b - create a new muxsink_bin
2c - set filesink location
2d - add the new bin to the pipeline with gst_bin_add()
2e - sync with parent using gst_element_sync_state_with_parent()
2f - link it to the queue srcpad with gst_pad_link()
2g - unblock the queue srcpad with gst_pad_set_blocked_async(). When the unblocked callback occurs you're recording again & no data has been lost. No action is required in the unblocked callback
3 - handle the EOS & delete the old muxsink_bin. I had a msg handler that I installed in my bin_init() function using "gstbin_class->handle_message = GST_DEBUG_FUNCPTR(msg_handler)" & in the handler:
3a - lock the bin state with gst_element_set_locked_state()
3b - set the state to NULL with gst_element_set_state()
3c - remove it from the pipeline with gst_bin_remove()
That's it. The only thing to be mindful of is that data must be flowing thru the pipeline for this to work.
Paddy
The main sequence works except for the finalization of the old pipeline.
The difficulty is with the point 3: I can send EOS to the ghostpad, and the filesink gets it. But how to catch that EOS?
What does it mean "install msg handler using gstbin_class->handle_message = GST_DEBUG_FUNCPTR(msg_handler)"?
There is message forwarding.
Must be enabled on the bus:
g_object_set(G_OBJECT(bin), "message-forward", TRUE, 0);
Handling:
case GST_MESSAGE_ELEMENT:
{
const GstStructure *s = gst_message_get_structure (msg);
if (gst_structure_has_name (s, "GstBinForwarded"))
{
GstMessage *forward_msg = NULL;
gst_structure_get (s, "message", GST_TYPE_MESSAGE, &forward_msg, NULL);
if (GST_MESSAGE_TYPE (forward_msg) == GST_MESSAGE_EOS)
{
g_print ("EOS from element %s\n",
GST_OBJECT_NAME (GST_MESSAGE_SRC (forward_msg)));
DestroyBin();
CreateNewBin();
RemovePad();
}
gst_message_unref (forward_msg);
}
}
Full code:
#include <gst/gst.h>
#include <iostream>
#include <cstring>
#include <cstdio>
static gchar *opt_effects = NULL;
#define DEFAULT_EFFECTS "identity,exclusion,navigationtest," \
"agingtv,videoflip,vertigotv,gaussianblur,shagadelictv,edgetv"
static GstElement *pipeline;
static GstElement * muxer;
static GstElement * sink;
static GstElement * q2;
static int i=0;
GstElement * bin;
GstPad * muxerSinkPad;
gulong probeId;
static GQueue effects = G_QUEUE_INIT;
void CreateNewBin();
void DestroyBin();
void ChangeLocation();
void RemovePad();
static GstPadProbeReturn
pad_probe_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data)
{
GstPad *sinkPad = gst_element_get_static_pad(bin, "sink");
gst_pad_unlink(pad, sinkPad);
gst_pad_send_event(sinkPad, gst_event_new_eos());
gst_object_unref(sinkPad);
return GST_PAD_PROBE_OK;
}
static gboolean
timeout_cb (gpointer user_data)
{
static int i=0;
if(i==0)
{
GstPad * q2SrcPad;
q2SrcPad = gst_element_get_static_pad(q2, "src");
std::cout << "Timeout: " << q2SrcPad << std::endl;
probeId = gst_pad_add_probe (q2SrcPad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, user_data, NULL);
gst_object_unref(q2SrcPad);
return TRUE;
}
return FALSE;
}
static gboolean
bus_cb (GstBus * bus, GstMessage * msg, gpointer user_data)
{
GMainLoop *loop = (GMainLoop*)user_data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *dbg;
gst_message_parse_error (msg, &err, &dbg);
gst_object_default_error (msg->src, err, dbg);
g_error_free (err);
g_free (dbg);
g_main_loop_quit (loop);
break;
}
case GST_EVENT_EOS:
std::cout << "EOS message is got" << std::endl;
break;
case GST_MESSAGE_ELEMENT:
{
const GstStructure *s = gst_message_get_structure (msg);
if (gst_structure_has_name (s, "GstBinForwarded"))
{
GstMessage *forward_msg = NULL;
gst_structure_get (s, "message", GST_TYPE_MESSAGE, &forward_msg, NULL);
if (GST_MESSAGE_TYPE (forward_msg) == GST_MESSAGE_EOS)
{
g_print ("EOS from element %s\n",
GST_OBJECT_NAME (GST_MESSAGE_SRC (forward_msg)));
DestroyBin();
CreateNewBin();
RemovePad();
}
gst_message_unref (forward_msg);
}
}
break;
default:
break;
}
return TRUE;
}
int
main (int argc, char **argv)
{
GError *err = NULL;
GMainLoop *loop;
GstElement *src, *q1,/* *q2,*/ /**effect,*/ /**filter1*//*, *filter2*/ *encoder;/*, *sink*/;
gst_init(&argc, &argv);
pipeline = gst_pipeline_new ("pipeline");
src = gst_element_factory_make ("videotestsrc", NULL);
//Create a caps filter between videosource videoconvert
std::string capsString = "video/x-raw,format=YV12,width=320,height=240,framerate=30/1";
GstCaps * dataFilter = gst_caps_from_string(capsString.c_str());
q1 = gst_element_factory_make ("queue", NULL);
encoder = gst_element_factory_make ("x264enc", NULL);
q2 = gst_element_factory_make("queue", NULL);
gst_bin_add_many(GST_BIN(pipeline), src, q1, encoder, q2, 0);
gboolean link = gst_element_link_filtered(src, q1, dataFilter);
link &= gst_element_link(q1, encoder);
link &= gst_element_link(encoder, q2);
CreateNewBin();
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
gst_bus_add_watch (GST_ELEMENT_BUS (pipeline), bus_cb, loop);
g_timeout_add_seconds (10, timeout_cb, loop);
g_main_loop_run (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
void RemovePad()
{
GstPad * q2SrcPad;
q2SrcPad = gst_element_get_static_pad(q2, "src");
gst_pad_remove_probe(q2SrcPad, probeId);
gst_object_unref(q2SrcPad);
}
void DestroyBin()
{
gst_element_set_state(bin, GST_STATE_NULL);
gst_bin_remove(GST_BIN(pipeline), bin);
}
void CreateNewBin()
{
static std::string fileLocPattern = "deneme%d.mkv";
char buffer[12];
memset(buffer, 0, sizeof(buffer));
sprintf(buffer, fileLocPattern.c_str(), i++);
//Create Muxer Element
muxer = gst_element_factory_make("matroskamux", "MatroskaMuxer");
//Create File Sink Element
sink = gst_element_factory_make("filesink", buffer);
g_object_set(G_OBJECT(sink), "location", buffer, 0);
//Create muxsinkBin
bin = gst_bin_new(buffer);
g_object_set(G_OBJECT(bin), "message-forward", TRUE, 0);
//Add a src pad to the bin
gst_bin_add_many(GST_BIN(bin), muxer, sink, 0);
gboolean linkState = TRUE;
//Connect elements within muxsink_bin
//Link: matroskamuxer -> filesink
linkState &= gst_element_link_many(muxer, sink, 0);
//Add this bin to pipeline
gst_bin_add(GST_BIN(pipeline), bin);
//Create ghostpad and manually link muxsinkBin and remaining part of the pipeline
{
GstPadTemplate * muxerSinkPadTemplate;
if( !(muxerSinkPadTemplate = gst_element_class_get_pad_template(GST_ELEMENT_GET_CLASS(muxer), "video_%u")) )
{
std::cout << "Unable to get source pad template from muxing element" << std::endl;
}
//Obtain dynamic pad from element
muxerSinkPad = gst_element_request_pad(muxer, muxerSinkPadTemplate, 0, 0);
//Add ghostpad
GstPad * ghostPad = gst_ghost_pad_new("sink", muxerSinkPad);
gst_element_add_pad(bin, ghostPad);
gst_object_unref(GST_OBJECT(muxerSinkPad));
gst_element_sync_state_with_parent(bin);
//Get src pad from queue element
GstPad * queueBeforeBinSrcPad = gst_element_get_static_pad(q2, "src");
//Link queuebeforebin to ghostpad
if (gst_pad_link(queueBeforeBinSrcPad, ghostPad) != GST_PAD_LINK_OK )
{
std::cout << "QueueBeforeBin cannot be linked to MuxerSinkPad." << std::endl;
}
gst_object_unref(queueBeforeBinSrcPad);
}
}
http://gstreamer-devel.966125.n4.nabble.com/Listening-on-EOS-events-for-GstBin-td4669126.html
http://gstreamer-devel.966125.n4.nabble.com/file/n4669476/main.cpp
Depending on your use case you can use multifilesink element. It will switch files on the fly on certain events. A file for each buffer, a file for each segment... Check its properties and see if there is anything that would work for you.
It also serves as a good code base in case you want to write something similar (or maybe extend it?)
I'll post the code of actual custom GstBin aka 'muxsink_bin' that I ended up implementing to do that forwarding and EOS handling for the 'detachable sink part' of the pipeline.
plisolatedbin.h:
#pragma once
#include <gst/gst.h>
#include <gst/gstbin.h>
G_BEGIN_DECLS
#define PL_TYPE_ISOLATED_BIN (pl_isolated_bin_get_type ())
#define PL_IS_ISOLATED_BIN(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), PL_TYPE_ISOLATED_BIN))
#define PL_IS_ISOLATED_BIN_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), PL_TYPE_ISOLATED_BIN))
#define PL_ISOLATED_BIN_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), PL_TYPE_ISOLATED_BIN, PlIsolatedBinClass))
#define PL_ISOLATED_BIN(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), PL_TYPE_ISOLATED_BIN, PlIsolatedBin))
#define PL_ISOLATED_BIN_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), PL_TYPE_ISOLATED_BIN, PlIsolatedBinClass))
#define PL_ISOLATED_BIN_CAST(obj) ((PlIsolatedBin*)(obj))
typedef struct _PlIsolatedBin PlIsolatedBin;
typedef struct _PlIsolatedBinClass PlIsolatedBinClass;
/**
* Does not forward EOS to parent by default.
*/
struct _PlIsolatedBin
{
GstBin bin;
};
struct _PlIsolatedBinClass
{
GstBinClass parent_class;
};
GType pl_isolated_bin_get_type();
GstElement* pl_isolated_bin_new();
G_END_DECLS
plisolatedbin.c:
#include "plisolatedbin.h"
#include <assert.h>
G_DEFINE_TYPE(PlIsolatedBin, pl_isolated_bin, GST_TYPE_BIN)
static void pl_isolated_bin_init(PlIsolatedBin *plisolatedbin)
{
}
static void pl_isolated_bin_handle_message_func(GstBin *bin, GstMessage *message)
{
if (GST_MESSAGE_TYPE(message) != GST_MESSAGE_EOS)
{
GST_BIN_CLASS(pl_isolated_bin_parent_class)->handle_message(bin, message);
}
else
{
GstMessage *forwarded = gst_message_new_element(GST_OBJECT_CAST(bin), gst_structure_new("PlIsolatedBinForwarded", "message", GST_TYPE_MESSAGE, message, NULL));
gst_element_post_message(GST_ELEMENT_CAST(bin), forwarded);
}
}
static void pl_isolated_bin_class_init(PlIsolatedBinClass *class)
{
class->parent_class.handle_message = GST_DEBUG_FUNCPTR(pl_isolated_bin_handle_message_func);
}
GstElement* pl_isolated_bin_new()
{
return g_object_new(PL_TYPE_ISOLATED_BIN, NULL);
}

Gstreamer rtsp application for audio and video

I was trying to develop an application for the pipeline:
gst-launch-1.0 rtspsrc location="rtsp://192.168.3.30:8554/rajvi" latency=0 name=demux demux. ! queue ! rtpmp4gdepay ! aacparse ! avdec_aac ! audioconvert ! audioresample ! autoaudiosink demux. ! queue ! rtph264depay ! h264parse ! omxh264dec ! videoconvert ! videoscale ! video/x-raw,width=176, height=144 ! ximagesink
Following is the code which I have implemented:
#include <gst/gst.h>
static void onPadAdded(GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
GstCaps * p_caps = gst_pad_get_pad_template_caps (pad);
gchar * description = gst_caps_to_string(p_caps);
g_free(description);
GstElement *depay = GST_ELEMENT(data);
if(gst_element_link_pads(element, name, depay, "sink") == 0)
{
g_print("cb_new_rtspsrc_pad : failed to link elements \n");
}
g_free(name);
}
int main(int argc, char *argv[]) {
GstElement *source, *videosink, *audio, *video, *convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
GstPad *sinkpad,*ghost_sinkpad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make ("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make ("queue", "audio-queue");
audioDepay = gst_element_factory_make ("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make ("aacparse", "audio-parser");
audioDecode = gst_element_factory_make ("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make ("audioconvert", "aconv");
audioResample = gst_element_factory_make ("audioresample", "audio-resample");
audioSink = gst_element_factory_make ("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), audioDepay);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
video = gst_bin_new ("videobin");
videoQueue = gst_element_factory_make ("queue", "video-queue");
videoDepay= gst_element_factory_make ("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make ("h264parse", "video-parser");
videoDecode = gst_element_factory_make ("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(video),videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert,videosink, capsFilter);
gst_caps_unref (capsFilter);
if (!link_ok) {
g_warning ("Failed to link element1 and element2!");
}
sinkpad = gst_element_get_static_pad (videoConvert, "sink");
ghost_sinkpad = gst_ghost_pad_new ("sink", sinkpad);
gst_pad_set_active (ghost_sinkpad, TRUE);
gst_element_add_pad (video, ghost_sinkpad);
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
gst_bin_add_many (GST_BIN(pipeline), video,NULL);
/* Start playing */
gst_element_set_state ( pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
Getting error to link pipeline->audio->video bins
If you put the video and audio in the pipeline bin all together then you can do it. Figure out what you caps are for the video and audio and should be able to link them.
// ----------------------------------
// pad-added signal
// ----------------------------------
static void onPadAdded(GstElement* element, GstPad* pad, gpointer user_data)
{
gchar *name;
GstCaps * p_caps;
GstElement* nextElement;
GstElement* pipeline = (GstElement*)user_data;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
p_caps = gst_pad_get_pad_template_caps(pad);
if (strstr(name, "[CAPS FOR VIDEO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Video -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "video-depayer");
}
else if (strstr(name, "[CAPS FOR AUDIO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Audio -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "audio-depayer");
}
if (nextElement != NULL)
{
if (!gst_element_link_filtered(element, nextElement, p_caps))
//if (!gst_element_link_pads_filtered(element, name, nextElement, "sink", p_caps))
{
std::cout << std::endl << "Failed to link video element to src to sink" << std::endl;
}
gst_object_unref(nextElement);
}
g_free(name);
gst_caps_unref(p_caps);
}
// ----------------------------------
// main
// ----------------------------------
int main(int argc, char *argv[])
{
GstElement *source, *videosink, *audio,*convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init(&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make("queue", "audio-queue");
audioDepay = gst_element_factory_make("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make("aacparse", "audio-parser");
audioDecode = gst_element_factory_make("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make("audioconvert", "aconv");
audioResample = gst_element_factory_make("audioresample", "audio-resample");
audioSink = gst_element_factory_make("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), pipeline);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
videoQueue = gst_element_factory_make("queue", "video-queue");
videoDepay = gst_element_factory_make("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make("h264parse", "video-parser");
videoDecode = gst_element_factory_make("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(pipeline), videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert, videosink, capsFilter);
gst_caps_unref(capsFilter);
if (!link_ok) {
g_warning("Failed to link element1 and element2!");
}
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
/* Start playing */
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,(GstMessageType)( GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref(msg);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
}

running the gstreamer pipeline (not able to get video and audio data in the callback)

I'm a newbie to gstreamer and I wanted to get the audio and video both buffers from a 3gp file and do some processing in the callback.
(I'm starting my pipeline into a separate thread, pipeline gives audio buffers in a callback AudioCallback and video buffers in VideoCallback.)
This is how my pipeline looks:
GstElement* audioQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
GstElement* videoQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
//static functions
static gboolean
bus_call (GstBus* bus, GstMessage* msg, gpointer data)
{
GMainLoop* loop = (GMainLoop*) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return true;
}
static void link_two_elements(GstElement* src_element, GstElement* sink_element)
{
if(!gst_element_link(src_element, sink_element))
g_printerr ("Linking Error");
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstCaps *caps;
GstStructure *str;
gchar *tex;
GstPad* sinkpad;
/* check media type */
caps = gst_pad_get_caps (pad);
str = gst_caps_get_structure (caps, 0);
tex = (gchar*)gst_structure_get_name(str);
if(g_strrstr(tex,"audio"))
{
//GstElement *audioQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (audioQueue, "sink");
if(sinkpad)
{
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
if(g_strrstr(tex,"video"))
{
//GstElement *videoQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (videoQueue, "sink");
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
void runPipeline()
{
GMainLoop *loop;
GstElement *__pPipeline, *source, *demuxer, *audioDecoder, *audioConverter, *audioresample, /**audioQueue,*/ *audioSink, *videoDecoder, *videoSink, /**videoQueue,*/ *ffmpegcolorspace, *videoscale;
GstBus* bus;
//Initialisation
gst_init (null,null);
loop = g_main_loop_new (NULL, FALSE);
// Create gstreamer elements
__pPipeline = gst_pipeline_new("test_appsink");
source = gst_element_factory_make ("filesrc", "file-source");
demuxer = gst_element_factory_make("qtdemux", "demuxer");
//audioDecoder = gst_element_factory_make("ffdec_mp3", "audioDecoder");
audioDecoder = gst_element_factory_make("decodebin", "audioDecoder");
audioConverter = gst_element_factory_make("audioconvert", "audioConverter");
audioresample = gst_element_factory_make("audioresample", "audioresample");
audioSink = gst_element_factory_make("appsink", "audioSink");
audioQueue = gst_element_factory_make("queue2", "audioQueue");
//videoDecoder = gst_element_factory_make("ffdec_h264", "videoDecoder");
videoQueue = gst_element_factory_make("queue2", "videoQueue");
videoDecoder = gst_element_factory_make("decodebin ", "videoDecoder");
ffmpegcolorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace");
videoscale = gst_element_factory_make("videoscale", "videoscale");
videoSink = gst_element_factory_make("appsink", "videoSink");
//appsink = gst_element_factory_make("appsink", "sink-buffer");
if (!__pPipeline || !source || !demuxer || !audioDecoder || !audioConverter ||!audioresample || !audioSink || !videoSink || !audioQueue || !videoQueue || !videoDecoder || !ffmpegcolorspace || !videoscale )
{
//return -1;
}
//we set the input filename to the source element
g_object_set (G_OBJECT (source), "location", "/etc/20000101-161404.3gp", NULL);
//Make appsink emit the "new-preroll" and "new-buffer" signals.
gst_app_sink_set_emit_signals ((GstAppSink*) audioSink, TRUE);
gst_app_sink_set_emit_signals ((GstAppSink*) videoSink, TRUE);
//we add a message handler
bus = gst_pipeline_get_bus (GST_PIPELINE (__pPipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//we add all elements into the pipeline
gst_bin_add_many (GST_BIN (__pPipeline),
source, demuxer, videoDecoder, audioDecoder, audioConverter, audioresample, audioSink, videoSink,
audioQueue, videoQueue, ffmpegcolorspace, videoscale, NULL);
//link source and demuxer seperately
link_two_elements(source, demuxer);
//link rest of the elements
int retValVideoLinking = (int)gst_element_link_many (videoQueue, videoDecoder, ffmpegcolorspace, videoscale, videoSink, NULL);
int retValAudioLinking = (int)gst_element_link_many (audioQueue, audioDecoder, audioConverter, audioresample, audioSink, NULL);
gulong sigConRet = g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), null);
_ArAudioIn audioInstance = _ArAudioIn::GetArAudioInstance();
g_signal_connect (videoSink, "new-buffer", G_CALLBACK (AudioCallback), null);//AudioCallback static API
g_signal_connect (audioSink, "new-buffer", G_CALLBACK (VideoCallback), null);//VideoCallback static API
//Set the pipeline to "playing" state
GstStateChangeReturn state = gst_element_set_state (__pPipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
return null;
}
I'm just getting a single video buffer in my Videocallback and also in the on_pad_addded : I'm getting a linking err for audio pad linking.
GST_PAD_LINK_NOFORMAT = -4,
I'm trying to link the queue's sink pad to the pad recieved in on_pad_added, same is working for video but not for audio.
If anybody has any idea about this then please give me some pointers to get rid off this err and make this pipeline work.
It would be nice if you cleanup you code before asking us to debug it. As a general advice, check the return values and either log a warning or simply exit(1) to ensure that your pipeline setup works (E.g. in the pad_added handler). I'd also start using a normal video and audiosink to check that it plays.
Finally, it is usually a bad idea to pull out data from the pipleine. Perhaps you could tell what you want to do with the data once you have it in your callback, so that we can give better advice.