gstreamer rtsp tee appsink can't emit signal new-sample - c++

I am using gstreamer to play and slove the rtsp stream.
rtspsrc location=rtspt://admin:scut123456#192.168.1.64:554/Streaming/Channels/1 ! tee name=t ! queue ! decodebin ! videoconvert ! autovideosink t. ! queue ! rtph264depay ! h264parse ! appsink name=mysink
and i write in c++ code like this :
#include <gst/gst.h>
void printIt(GList *p) {
if(!p) {
g_print("p null\n");
return ;
}
while(p) {
GstPad *pad = (GstPad*)p->data;
g_print("[%s]", pad->object.name);
p = p->next;
}
g_print("\n");
}
GstFlowReturn new_sample_cb (GstElement * appsink, gpointer udata) {
g_print("new-sample cb\n");
return GST_FLOW_OK;
}
GstFlowReturn new_preroll_cb (GstElement* appsink, gpointer udata) {
g_print("new_preroll_cb cb\n");
return GST_FLOW_OK;
}
int
main (int argc, char *argv[]) {
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Build the pipeline */
pipeline = gst_parse_launch("rtspsrc location=rtspt://admin:scut123456#192.168.1.64:554/Streaming/Channels/1 ! tee name=t ! queue ! decodebin ! videoconvert ! autovideosink t. ! queue ! rtph264depay ! h264parse ! appsink name=mysink", NULL);
GstElement *appsink = gst_bin_get_by_name(GST_BIN(pipeline), "mysink");
printIt(appsink->pads);
g_signal_connect(appsink, "new-sample", G_CALLBACK(new_sample_cb), pipeline);
g_print("sig conn new-sample\n");
g_signal_connect(appsink, "new-preroll", G_CALLBACK(new_preroll_cb), pipeline);
g_print("sig conn new-preroll\n");
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GstMessageType(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
when i compile and run it. it has output video in the autovideosink but the appsink's signal new-sample is not be callbacked. what should i do if i what to slove a frame in appsink ?
thanks.

By default appsink favors to use callbacks instead of signals for performance reasons (but I wouldn't consider your use case as a performance problem). For appsink to emit signals you will need to set the emit-signals property of the appsink to true. It defaults to false.
P.S. Apart from the above, I think you will need a GMainLoop for the event processing as demonstrated in the GStreamer examples.

Related

GStreamer. Probe after rtph265pay never called

i have rtsp server and i want to extend rtp buffer header. For this purpose i added probe to src of rtph265pay, but it never called. My pipeline:
( appsrc name=vsrc ! nvvidconv ! video/x-raw(memory:NVMM),format=NV12 !
omxh265enc MeasureEncoderLatency=true bitrate=20000000 control-rate=2 !
rtph265pay name=pay0 pt=96 )
Code where i attach probe:
static GstPadProbeReturn test_probe (GstPad *pad, GstPadProbeInfo *info,
gpointer user_data)
{
cout << "i'm here";
}
void mediaConfigure (GstRTSPMediaFactory* factory, GstRTSPMedia* media,
gpointer user_data)
{
GstElement *element, *rtph265pay; GstPad *pad;
element = gst_rtsp_media_get_element (media);
rtph265pay = gst_bin_get_by_name_recurse_up (GST_BIN (element), "pay0");
pad = gst_element_get_static_pad (rtph265pay, "src");
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
(GstPadProbeCallback) test_probe, NULL, NULL);
gst_object_unref (pad);
}
If i set "sink" instead of "src", probe works, but i need "src" to change rtp buffer header...
What is wrong here?
Maybe because the rtph265pay's src pad isn't linked to any other pad - meaning rtph265pay is the end of the pipeline - the element doesn't pass any buffers to its src pad?
Try and attach a fakesink after the rtph265pay.

gstreamer audiomixer command to code converting

I want to use audiomixer in my application which receives audios from different sources and should play them together in speaker.
my final application should do something like this command:
gst-launch-1.0 audiomixer name=mix ! autoaudiosink autoaudiosrc ! \
audioconvert ! mix. udpsrc port=5001 caps="application/x-rtp" ! queue !\
rtppcmudepay ! mulawdec ! audioconvert ! audioresample ! mix.
I already wrote a code to use tee and queues and know how to work with tee and queues in code based on this code. but I don't know how to use mixer in my code.
so for simplicity I just want to write a code to work as this command does:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
I didn't find any useful example to reach this goal, how can I write a C code to do this?
for the second part:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
this code works:
#include <gst/gst.h>
static GMainLoop *loop;
int bus_callback (GstBus *bus, GstMessage *message, gpointer data)
{
g_print ("Got %s message\n", GST_MESSAGE_TYPE_NAME (message));
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR: {
GError *err;
gchar *debug;
gst_message_parse_error (message, &err, &debug);
g_print ("Error: %s\n", err->message);
g_error_free (err);
g_free (debug);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_EOS:
/* end-of-stream */
g_main_loop_quit (loop);
break;
default:
/* unhandled message */
break;
}
/* we want to be notified again the next time there is a message
* on the bus, so returning TRUE (FALSE means we want to stop watching
* for messages on the bus and our callback should not be called again)
*/
return TRUE;
}
int main(int argc, char *argv[])
{
/* Initialize GStreamer */
gst_init (nullptr, nullptr);
GstElement *pipeline, *src1,*src2, *sink, *convert1,*convert2,*audiomixer;
GstPad *conv_pad1, *conv_pad2, *mixer1_sinkpad,*mixer2_sinkpad;
gint i;
static GstBus *bus;
static guint bus_watch_id;
pipeline = gst_pipeline_new ("pipeline");
audiomixer = gst_element_factory_make ("adder", "mixer");
sink = gst_element_factory_make ("autoaudiosink", "sink");
src1 = gst_element_factory_make ("audiotestsrc", "src1");
convert1 = gst_element_factory_make ("audioconvert", "convert1");
src2 = gst_element_factory_make ("autoaudiosrc", "src2");
convert2 = gst_element_factory_make ("audioconvert", "convert2");
//g_object_set (sink, "async-handling", TRUE, NULL);
gst_bin_add_many (GST_BIN (pipeline), audiomixer ,sink, NULL);
gst_bin_add_many (GST_BIN (pipeline), src1 , convert1 , NULL);
gst_bin_add_many (GST_BIN (pipeline), src2 , convert2 , NULL);
gst_element_link (src1, convert1 );
gst_element_link (src2, convert2 );
gst_element_link(audiomixer , sink);
conv_pad1= gst_element_get_static_pad (convert1, "src");
mixer1_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad1, mixer1_sinkpad);
g_object_unref(mixer1_sinkpad);
conv_pad2= gst_element_get_static_pad (convert2, "src");
mixer2_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad2, mixer2_sinkpad);
g_object_unref(mixer2_sinkpad);
/* adds a watch for new message on our pipeline’s message bus to
* the default GLib main context, which is the main context that our
* GLib main loop is attached to below
*/
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_callback, NULL);
gst_object_unref (bus);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (loop);
g_object_unref(conv_pad1);
g_object_unref(conv_pad2);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_source_remove (bus_watch_id);
}

gst_parse_launch differs output from command line gst_launch

I have a pipeline like this, which works with gst-launch-1.0
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
In my c application, such as below which executes the same command line, it doesn't display any window.
#include <gst/gst.h>
int
main (int argc, char *argv[])
{
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Build the pipeline */
pipeline =
gst_parse_launch
("v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v",
NULL);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
That's why you should check for errors. You pipeline description has a -v at the end which is an option to the gst-launch-1.0 application but is not part of a valid pipeline description.
I figured out the mistake in the above code. In the caps filter, there is no quotes. and the rest works fine.
#include <gst/gst.h>
int
main (int argc, char *argv[])
{
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Build the pipeline */
pipeline =
gst_parse_launch
("v4l2src ! video/x-raw,format=(string)YUY2 ! nvvidconv ! video/x-raw(memory:NVMM),format=(string)NV12 ! nvvidconv ! video/x-raw,format=(string)NV12 ! nvvideoconvert ! video/x-raw(memory:NVMM),format=(string)NV12 ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink", NULL);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
This above code works for me in nvidia xavier!

gst_element_link faild in second thread

I have problem with linking elements in second thread.
I create two thread in each I create pipeline to store IP camera stream to file.
When second thread try link stream element (h264parse -> matroskamux ) I recieve Segmentation fault signal.
Gstreamer print some error:
(Cam_recorder:5529): GLib-GObject-WARNING **: cannot register existing type 'GstMatroskamuxPad'
(Cam_recorder:5529): GLib-GObject-CRITICAL **: g_object_new: assertion 'G_TYPE_IS_OBJECT (object_type)' failed
When I run same code with added some delay (500ms) between execution thread, both thread create pipeline correctly and program work.
Can anyone help me?
EDIT:
My code:
void Camera::Thread_function(){
GstElement *pipeline=NULL;
GstElement * temp_ele;
GstBus *bus=NULL;
GstMessage *msg=NULL;
GError *error = NULL;
STRING text;
DEBUG<<"Starting camera:"<<name<<END;
text=stream_uri;
DEBUG<< text <<END;
pipeline=gst_parse_launch(text.c_str(),&error);
if (error != NULL){
CRITICAL<<"Parse error: "<< error->message<<END;
g_error_free (error);
error=NULL;
goto STOP;
}
if(pipeline==NULL){
CRITICAL<<"Pipeline is NULL"<<END;
goto STOP;
}
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
gst_bus_add_watch (GST_ELEMENT_BUS (pipeline), bus_cb, loop);
g_timeout_add_seconds (1, CAM_REC::timeout_cb, this);
g_main_loop_run (loop);
g_main_loop_unref (loop);
STOP:
if (msg != NULL){
gst_message_unref (msg);
msg=NULL;
}
if(bus!=NULL){
gst_object_unref (bus);
bus=NULL;
}
if(pipeline){
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
pipeline=NULL;
}
//*/
INFO<<"Cam "<<name<<" end"<<END;
}
Pipelines:
rtspsrc name=kam3_stream location=\"rtsp://192.168.0.107/stream1\" ! rtph264depay name=kam3_deplay ! h264parse name=kam3_parse ! matroskamux name=kam3_mux ! filesink location=x.mkv name=kamera3_file_sink
rtspsrc name=kam4_stream location=\"rtsp://admin:admin#192.168.0.108/\" ! rtph264depay name=kam4_deplay ! h264parse name=kam4_parse ! matroskamux name=kam4_mux ! filesink location=x2.mkv name=kamera4_file_sink
Program crashes in function gst_parse_launch()
Tank

Gstreamer under Sailfish OS no output sound

I am developing an audio player for Sailfish OS, and trying to play file via gstreamer, but problem: there is no sound.
I checked gstream via console:
gst-launch-0.10 filesrc location=/path/to/file.ogg ! decodebin !
autoaudiosink
And it is working fine!
I tested converting audio file to audio file:
int
main (int argc,
char *argv[])
{
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
gst_init (&argc, &argv);
pipeline = gst_parse_launch ("filesrc location=/home/nemo/Music/Ringtones/Myfile.mp3 ! decodebin ! audioconvert ! vorbisenc ! oggmux ! filesink location=test.ogg", NULL);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
And it is working fine!
But, when i am trying to play it, there is no sound:
pipeline = gst_parse_launch ("filesrc location=/home/nemo/Music/Ringtones/Myfile.mp3 ! decodebin ! audioconvert ! autoaudiosink", NULL);
Gstreamer version: 0.10
The problem was because resources need acquiring before usage:
gst-launch is statically set as "player" in resource policy configuration, so it can be run without any extra work.
However when you are creating your own application, you will need to acquire audio playback resources yourself.
Check https://github.com/nemomobile/libaudioresource if you are developing C-only application or https://github.com/nemomobile/libaudioresource-qt for Qt application.
Example:
#include <gst/gst.h>
#include <audioresource.h>
#include <glib.h>
#include <unistd.h>
#include <stdio.h>
/*
* Dependencies glib2-devel, libaudioresource-devel, gstreamer-devel.
* Compile with:
* gcc `pkg-config --cflags --libs gstreamer-0.10` `pkg-config --cflags --libs audioresource` `pkg-config --cflags --libs glib-2.0` gst-example.c -o gst-example
*
* Check https://github.com/nemomobile/libaudioresource
*/
static GstElement *pipeline;
static int got_reply = 0;
static void on_acquired(audioresource_t *audio_resource, bool acquired, void *user_data)
{
got_reply = 1;
printf("on_acquired: %s\n", acquired ? "true" : "false");
if (acquired) {
// start playback here
printf("start playback\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
} else {
// stop playback here
}
}
static void naive_wait()
{
got_reply = 0;
while (!got_reply) {
g_main_context_iteration(NULL, false);
usleep(1000);
}
}
int main(int argc, char *argv[])
{
audioresource_t *resource;
void *user_data = NULL;
char tmp[1024];
GstBus *bus;
GstMessage *msg;
if (argc < 2) {
printf("audio file argument needed.\n");
return 1;
}
gst_init (&argc, &argv);
printf("initialize audioresource for media player\n");
resource = audioresource_init(AUDIO_RESOURCE_MEDIA, on_acquired, user_data);
snprintf(tmp, 1024, "filesrc location=%s ! decodebin ! audiocovert ! autoaudiosink", argv[1]);
printf("create pipeline: %s\n", tmp);
pipeline = gst_parse_launch (tmp, NULL);
printf("acquire audioresource..\n");
// When you want to start playback
audioresource_acquire(resource);
// Wait for the reply for acquire..
naive_wait();
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
printf("release audioresource..\n");
audioresource_release(resource);
// Wait for release..
naive_wait();
// When you close your application
audioresource_free(resource);
return 0;
}
Thanks to Juho Hämäläinen for an answer!