I am trying to embed streaming of IP camera using udpsrc in qwidget. Below pipeline works :
gst-launch-1.0 udpsrc port=20000 ! application/x-rtp,encoding-name=JPEG,payload=26,width=640,height=460 ! rtpjpegdepay ! jpegparse ! jpegdec ! videoconvert ! videoscale ! ximagesink sync=false
When tried to embed in qwidget, it's showing a plain window. My code is as follows :
#include <glib.h>
#include <gst/gst.h>
#include <gst/video/videooverlay.h>
#include <QApplication>
#include <QTimer>
#include <QWidget>
int main(int argc, char *argv[])
{
if (!g_thread_supported ())
g_thread_init (NULL);
gst_init (&argc, &argv);
QApplication app(argc, argv);
app.connect(&app, SIGNAL(lastWindowClosed()), &app, SLOT(quit ()));
// prepare the pipeline
GstElement *pipeline = gst_pipeline_new ("pipeline");
GstElement *src = gst_element_factory_make ("udpsrc", NULL);
GstCaps *caps = gst_caps_from_string ("application/x-rtp,encoding-name=JPEG,payload=26,width=640,height=460");
g_object_set(G_OBJECT(src),
"port", 20000,
"caps", caps, NULL);
GstElement *parser = gst_element_factory_make ("rtpjpegdepay", NULL);
GstElement *mux = gst_element_factory_make ("jpegparse", NULL);
GstElement *parse2 = gst_element_factory_make ("jpegdec", NULL);
GstElement *dec = gst_element_factory_make ("videoconvert", NULL);
GstElement *conv = gst_element_factory_make ("videoscale", NULL);
GstElement *sink = gst_element_factory_make ("ximagesink", NULL);
g_object_set(G_OBJECT(sink), "sync", FALSE, NULL);
gst_bin_add_many (GST_BIN (pipeline), src, parser, mux, parse2, dec, conv, sink, NULL);
gst_element_link (src, sink);
GstState state, pending;
//this is the call to overlay the gstreamer's output to the Qt Widgets...
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (sink), xwinid);
GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_object_unref (bus);
GstStateChangeReturn sret = gst_element_set_state (pipeline, GST_STATE_PLAYING); //Playback can be initiated by setting the element to PLAYING state using gst_element_set_state()
qDebug()<<"####-1"<<sret;
if (sret == GST_STATE_CHANGE_FAILURE) {
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
// Exit application
QTimer::singleShot(0, QApplication::activeWindow(), SLOT(quit()));
}
gst_element_get_state (pipeline,
&state,
&pending,
10);
qDebug()<<state<<pending;
window->show();
app.exec();
g_main_loop_run (loop);
return 0;
}
I solved it by Creating pipeline using gst_parse_launch()
GstElement *pipeline_2= gst_parse_launch("udpsrc port=20000 ! application/x-rtp,encoding-name=JPEG,payload=26 ! rtpjpegdepay ! jpegparse ! jpegdec ! videoconvert ! videoscale ! ximagesink name=mySink", NULL);
GstElement *sink = gst_bin_get_by_name((GstBin*)pipeline_2,"mySink");
QWidget *window = new QWidget();
window->setWindowTitle("udpsrc video stream");
window->resize(700, 700);
WId xwinid = window->winId();
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (sink), (guintptr)xwinid);
window->show();
GstStateChangeReturn sret = gst_element_set_state (pipeline_2, GST_STATE_PLAYING);
Hope this helps.
Related
I want to use audiomixer in my application which receives audios from different sources and should play them together in speaker.
my final application should do something like this command:
gst-launch-1.0 audiomixer name=mix ! autoaudiosink autoaudiosrc ! \
audioconvert ! mix. udpsrc port=5001 caps="application/x-rtp" ! queue !\
rtppcmudepay ! mulawdec ! audioconvert ! audioresample ! mix.
I already wrote a code to use tee and queues and know how to work with tee and queues in code based on this code. but I don't know how to use mixer in my code.
so for simplicity I just want to write a code to work as this command does:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
I didn't find any useful example to reach this goal, how can I write a C code to do this?
for the second part:
gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! autoaudiosink autoaudiosrc ! mix.
this code works:
#include <gst/gst.h>
static GMainLoop *loop;
int bus_callback (GstBus *bus, GstMessage *message, gpointer data)
{
g_print ("Got %s message\n", GST_MESSAGE_TYPE_NAME (message));
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR: {
GError *err;
gchar *debug;
gst_message_parse_error (message, &err, &debug);
g_print ("Error: %s\n", err->message);
g_error_free (err);
g_free (debug);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_EOS:
/* end-of-stream */
g_main_loop_quit (loop);
break;
default:
/* unhandled message */
break;
}
/* we want to be notified again the next time there is a message
* on the bus, so returning TRUE (FALSE means we want to stop watching
* for messages on the bus and our callback should not be called again)
*/
return TRUE;
}
int main(int argc, char *argv[])
{
/* Initialize GStreamer */
gst_init (nullptr, nullptr);
GstElement *pipeline, *src1,*src2, *sink, *convert1,*convert2,*audiomixer;
GstPad *conv_pad1, *conv_pad2, *mixer1_sinkpad,*mixer2_sinkpad;
gint i;
static GstBus *bus;
static guint bus_watch_id;
pipeline = gst_pipeline_new ("pipeline");
audiomixer = gst_element_factory_make ("adder", "mixer");
sink = gst_element_factory_make ("autoaudiosink", "sink");
src1 = gst_element_factory_make ("audiotestsrc", "src1");
convert1 = gst_element_factory_make ("audioconvert", "convert1");
src2 = gst_element_factory_make ("autoaudiosrc", "src2");
convert2 = gst_element_factory_make ("audioconvert", "convert2");
//g_object_set (sink, "async-handling", TRUE, NULL);
gst_bin_add_many (GST_BIN (pipeline), audiomixer ,sink, NULL);
gst_bin_add_many (GST_BIN (pipeline), src1 , convert1 , NULL);
gst_bin_add_many (GST_BIN (pipeline), src2 , convert2 , NULL);
gst_element_link (src1, convert1 );
gst_element_link (src2, convert2 );
gst_element_link(audiomixer , sink);
conv_pad1= gst_element_get_static_pad (convert1, "src");
mixer1_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad1, mixer1_sinkpad);
g_object_unref(mixer1_sinkpad);
conv_pad2= gst_element_get_static_pad (convert2, "src");
mixer2_sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
gst_pad_link (conv_pad2, mixer2_sinkpad);
g_object_unref(mixer2_sinkpad);
/* adds a watch for new message on our pipeline’s message bus to
* the default GLib main context, which is the main context that our
* GLib main loop is attached to below
*/
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_callback, NULL);
gst_object_unref (bus);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (loop);
g_object_unref(conv_pad1);
g_object_unref(conv_pad2);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_source_remove (bus_watch_id);
}
I made a .pcm audio file via the following command:
gst-launch-1.0 filesrc location=/home/pi/rawaudio/can-you-keep-a-secret.wav ! wavparse ! audioresample ! audioconvert ! audio/x-raw,format=S16BE,channels=1,rate=44100,layout=interleaved ! filesink location=/home/pi/rawaudio/test.pcm
I can play it with:
gst-launch-1.0 filesrc location=/home/pi/rawaudio/test.pcm ! audio/x-raw,format=S16BE,channels=1,rate=44100,layout=interleaved ! audioconvert ! audioresample ! alsasink
This is working perfectly. But now, I need to implement this into my c++ application. This is what I already have:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
#define AUDIO_LOCATION "/home/pi/rawaudio/test.pcm"
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *parser, *sink, *convert;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
sink = gst_element_factory_make ("alsasink", "sink");
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", AUDIO_LOCATION, NULL);
//convert = gst_element_factory_make("audioconvert", "convert");
//parser = gst_element_factory_make("audioresample","parse");
GstPad *sourcepad, *sinkpad;
sourcepad = gst_element_get_static_pad(source, "src");
gst_pad_set_caps(sourcepad,
gst_caps_new_simple("audio/x-raw",
"rate", G_TYPE_INT, 44100,
"channels", G_TYPE_INT, 1,
"format", G_TYPE_STRING, "S16BE",
"layout", G_TYPE_STRING, "interleaved",
NULL));
gst_object_unref(sourcepad);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, sink, NULL);
// link source to decode
gst_element_link_many(source, sink, NULL);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
But it is not working. I get
Error: The stream is in the wrong format
when running.
What am I missing? some elements like audioparse, audioconvert, audioresample ... I also tried inserting the audioconvert and audioresample after the source into the pipeline but then I get an internal data stream error exception.
I am developing an audio player for Sailfish OS, and trying to play file via gstreamer, but problem: there is no sound.
I checked gstream via console:
gst-launch-0.10 filesrc location=/path/to/file.ogg ! decodebin !
autoaudiosink
And it is working fine!
I tested converting audio file to audio file:
int
main (int argc,
char *argv[])
{
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
gst_init (&argc, &argv);
pipeline = gst_parse_launch ("filesrc location=/home/nemo/Music/Ringtones/Myfile.mp3 ! decodebin ! audioconvert ! vorbisenc ! oggmux ! filesink location=test.ogg", NULL);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
And it is working fine!
But, when i am trying to play it, there is no sound:
pipeline = gst_parse_launch ("filesrc location=/home/nemo/Music/Ringtones/Myfile.mp3 ! decodebin ! audioconvert ! autoaudiosink", NULL);
Gstreamer version: 0.10
The problem was because resources need acquiring before usage:
gst-launch is statically set as "player" in resource policy configuration, so it can be run without any extra work.
However when you are creating your own application, you will need to acquire audio playback resources yourself.
Check https://github.com/nemomobile/libaudioresource if you are developing C-only application or https://github.com/nemomobile/libaudioresource-qt for Qt application.
Example:
#include <gst/gst.h>
#include <audioresource.h>
#include <glib.h>
#include <unistd.h>
#include <stdio.h>
/*
* Dependencies glib2-devel, libaudioresource-devel, gstreamer-devel.
* Compile with:
* gcc `pkg-config --cflags --libs gstreamer-0.10` `pkg-config --cflags --libs audioresource` `pkg-config --cflags --libs glib-2.0` gst-example.c -o gst-example
*
* Check https://github.com/nemomobile/libaudioresource
*/
static GstElement *pipeline;
static int got_reply = 0;
static void on_acquired(audioresource_t *audio_resource, bool acquired, void *user_data)
{
got_reply = 1;
printf("on_acquired: %s\n", acquired ? "true" : "false");
if (acquired) {
// start playback here
printf("start playback\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
} else {
// stop playback here
}
}
static void naive_wait()
{
got_reply = 0;
while (!got_reply) {
g_main_context_iteration(NULL, false);
usleep(1000);
}
}
int main(int argc, char *argv[])
{
audioresource_t *resource;
void *user_data = NULL;
char tmp[1024];
GstBus *bus;
GstMessage *msg;
if (argc < 2) {
printf("audio file argument needed.\n");
return 1;
}
gst_init (&argc, &argv);
printf("initialize audioresource for media player\n");
resource = audioresource_init(AUDIO_RESOURCE_MEDIA, on_acquired, user_data);
snprintf(tmp, 1024, "filesrc location=%s ! decodebin ! audiocovert ! autoaudiosink", argv[1]);
printf("create pipeline: %s\n", tmp);
pipeline = gst_parse_launch (tmp, NULL);
printf("acquire audioresource..\n");
// When you want to start playback
audioresource_acquire(resource);
// Wait for the reply for acquire..
naive_wait();
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
printf("release audioresource..\n");
audioresource_release(resource);
// Wait for release..
naive_wait();
// When you close your application
audioresource_free(resource);
return 0;
}
Thanks to Juho Hämäläinen for an answer!
I want to play two local video file using gstreamer,but I got an error: Segmentation fault It from libgstvideomixer.so.What's wrong with my code? The videomixer element is needed to play two videos.Should I use videobox for that?
gst-launch --no-fault filesrc location=/mnt/upan/source.264 ! queue ! typefind ! ffdec_h264 ! videomixer name=mix ! xvimagesink sync=false filesrc location=/mnt/upan/source.264 ! queue ! typefind ! ffdec_h264! mix.
static void p_gst_init(void)
{
App *app = &s_app;
GError *error = NULL;
GstBus *bus;
GstElement *parse, *decoder, *queue;
GstElement *parse2, *decoder2, *queue2;
gst_init (NULL, NULL);
/* create a mainloop to get messages */
app->loop = g_main_loop_new (NULL, TRUE);
app->playbin = gst_pipeline_new ("pipeline");
app->appsrc = gst_element_factory_make ("filesrc", "disk_source");
g_object_set (G_OBJECT (app->appsrc), "location", "/mnt/upan/test.264", NULL);
queue = gst_element_factory_make ("queue", "queue");
parse = gst_element_factory_make ("typefind", "parse");
decoder = gst_element_factory_make ("ffdec_h264", "decoder");
GstElement *filesrc2;
filesrc2 = gst_element_factory_make ("filesrc", "disk_source2");
g_object_set (G_OBJECT (appsrc2), "location", "/mnt/upan/source.264", NULL);
queue2 = gst_element_factory_make ("queue", "queue2");
parse2 = gst_element_factory_make ("typefind", "parse2");
decoder2 = gst_element_factory_make ("ffdec_h264", "decoder2");
/*
GstElement * videobox;
videobox = gst_element_factory_make("videobox", NULL);
g_object_set (videobox, "alpha", 0, "border-alpha", 0, "bottom", 100, "left", 100, "right", 100, "top", 100, NULL);
*/
GstElement * videomixer;
videomixer = gst_element_factory_make("videomixer","videomixer");
app->xvimagesink = gst_element_factory_make ("xvimagesink", "play_video");
g_object_set (G_OBJECT (app->xvimagesink), "synchronous", FALSE, NULL);
gst_bin_add_many (GST_BIN (app->playbin), app->appsrc, queue, parse, decoder, videomixer, app->xvimagesink, filesrc2, queue2, parse2, decoder2, NULL);
if(gst_element_link_many (app->appsrc, queue, parse, decoder, videomixer, NULL))
{
printf("---------link element success-----------------\n");
}
else
printf("---------link element failed-----------------\n");
gst_element_link_many (filesrc2, queue2, parse2, decoder2, videomixer, NULL);
gst_element_link_many(videomixer, app->xvimagesink, NULL);
bus = gst_pipeline_get_bus (GST_PIPELINE (app->playbin));
gst_bus_add_watch (bus, (GstBusFunc) bus_message, app);
gst_bus_set_sync_handler(bus, (GstBusSyncHandler)create_window, app);
g_signal_connect (app->appsrc, "need-data", G_CALLBACK (feed_data), app);
return ;
}
Following code is written to play a .wav file but it doesn't seem to work.
I would like to know if i am missing something in it.
Code:
#include <gst/gst.h>
#include <glib.h>
int main(int argc , char *argv[])
{
GMainLoop *loop;
GstElement *source,*audioparser,*sink,*pipeline;
GstBus *bus;
gst_init(&argc,&argv);
// create a pipeline
loop = g_main_loop_new (NULL, FALSE);
pipeline = gst_pipeline_new ("wav-player");
source = gst_element_factory_make("filesrc","file-source");
audioparser = gst_element_factory_make("wavparse","wav-parser");
sink = gst_element_factory_make("alsasink","sink1");
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
gst_element_set_state (pipeline, GST_STATE_NULL);
// set location to current sourceg_object_set(G_OBJECT(source),"location",argv[1],NULL);
// add elements to bin
gst_bin_add_many(GST_BIN(pipeline),source,audioparser,sink,NULL);
gst_element_link_many(source,audioparser,sink,NULL);
// create bus
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
return 1;
}
Please compile this using following command:
gcc -Wall $(pkg-config --cflags --libs gstreamer-0.10) wav.c -o wavparser
Thanks in advance
Just use playbin2 instead of a hand-crafted pipeline
that is, replace everything from "pipeline = gst_pipeline_new()" to "gst_element_link_many" by:
pipeline = gst_element_factory_make("playbin2", NULL);
g_object_set(pipeline, "uri", "file:///the/file/I/want.wav", NULL);
maybe playbin2 is exactly what you need, but answerring question: wavparse does not have static src-pad, so you must handle "pad-added" signal from this element on runtime. something like the code:
gst_bin_add_many (GST_BIN (pipeline), wavsrc, wavparse,audioconvert, audiosink, NULL);
g_object_set (G_OBJECT (wavsrc), "location", "sound.wav", NULL);
gst_element_link(wavsrc, wavparse);
gst_element_link(audioconvert, audiosink);
g_signal_connect (wavparse, "pad-added", G_CALLBACK (on_pad_added), audioconvert);
where: wavsrc is filesrc, wavparse is wavparse, audioconvert is audioconvert, audiosink is alsasink (i'm not sure alsasink works for you, so you can choose another)
void on_pad_added (GstElement *src_element, GstPad *src_pad, gpointer data)
{
g_print ("Linking dynamic pad...\n");
GstElement *sink_element = (GstElement *) data; // is audioconvert
GstPad *sink_pad = gst_element_get_static_pad (sink_element, "sink");
gst_pad_link (src_pad, sink_pad);
gst_object_unref (sink_pad);
src_element = NULL; // yup, i don't want "unused" warning here
}