Is there a simple GStreamer Example for C++? - c++

I am looking for simple example for GStreamer that uses Video-Streaming...
I would like to create a simple example (showing a video-output) incl. compilation instructions.

This will be the source code of a GStreamer pipeline videotestsrc ! autovideosink. It will show a nice video output...
Prerequisites
Make sure you have GStreamer and gcc installed.
Add the following source code to helloworld.c
#include <gst/gst.h>
#include <glib.h>
static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
int main (int argc, char *argv[])
{
GMainLoop *loop;
GstElement *pipeline, *videotestsrcm, *autovideosinkm;
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("videotest-pipeline");
videotestsrcm = gst_element_factory_make ("videotestsrc", "testsource");
autovideosinkm = gst_element_factory_make ("autovideosink", "videosink");
if (!pipeline || !videotestsrcm || !autovideosinkm) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
videotestsrcm, autovideosinkm, NULL);
/* we link the elements together */
/* videotestsrcm -> autovideosinkm */
gst_element_link (videotestsrcm, autovideosinkm);
/* Set the pipeline to "playing" state*/
g_print ("Now set pipeline in state playing");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
Compile it
gcc -Wall helloworld.c -o helloworld $(pkg-config --cflags --libs gstreamer-1.0)
Run it
./helloworld

Related

How to feed an Opencv frame into Nvidia Deepstream pipeline?

I am struggling to find a away to input a single cv::Mat frame into a Nvidia Deepstream Pipeline using c++. I tried the code below but I received the following Error message:
ERROR from element gstappsrc: Internal data stream error.
Error details: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:dst_opencv/GstAppSrc:source:
streaming stopped, reason not-negotiated (-4)
Returned, stopping playback
Deleting pipeline
If anyone have an idea how to do it or show me where I am doing wrong, I will be very thankful.
#include <gst/gst.h>
#include <glib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
#include <gst/app/gstappsrc.h>
/* The muxer output resolution must be set if the input streams will be of
* different resolution. The muxer will scale all the input frames to this
* resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080
#define TILED_OUTPUT_WIDTH 1920
#define TILED_OUTPUT_HEIGHT 1080
/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
* based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000
/* NVIDIA Decoder source pad memory feature. This feature signifies that source
* pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"
// detection models
#define MODEL_CONFIG "dstest3_pgie_config.txt"
//#define MODEL_CONFIG "yoloV2_pgie_config.txt"
//#define MODEL_CONFIG "fd_lpd_config.txt"
#define FPS_PRINT_INTERVAL 300
static gboolean bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_WARNING:
{
gchar *debug;
GError *error;
gst_message_parse_warning (msg, &error, &debug);
g_printerr ("WARNING from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
g_free (debug);
g_printerr ("Warning: %s\n", error->message);
g_error_free (error);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr ("ERROR from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr ("Error details: %s\n", debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
//-------------------------------------------------------
static void cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{
g_print ("In cb_newpad\n");
GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
const GstStructure *str = gst_caps_get_structure (caps, 0);
const gchar *name = gst_structure_get_name (str);
GstElement *source_bin = (GstElement *) data;
GstCapsFeatures *features = gst_caps_get_features (caps, 0);
/* Need to check if the pad created by the decodebin is for video and not
* audio. */
if (!strncmp (name, "video", 5)) {
/* Link the decodebin pad only if decodebin has picked nvidia
* decoder plugin nvdec_*. We do this by checking if the pad caps contain
* NVMM memory features. */
if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
/* Get the source bin ghost pad */
GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
decoder_src_pad)) {
g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
}
gst_object_unref (bin_ghost_pad);
} else {
g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
}
}
}
//-------------------------------------------------------
static void decodebin_child_added (GstChildProxy * child_proxy, GObject * object,gchar * name, gpointer user_data)
{
g_print ("Decodebin child added: %s\n", name);
if (g_strrstr (name, "decodebin") == name) {
g_signal_connect (G_OBJECT (object), "child-added",
G_CALLBACK (decodebin_child_added), user_data);
}
if (g_strstr_len (name, -1, "nvv4l2decoder") == name) {
g_print ("Seting bufapi_version\n");
g_object_set (object, "bufapi-version", TRUE, NULL);
}
}
//-------------------------------------------------------
void buffer_destroy(gpointer data) {cv::Mat* done = (cv::Mat*)data; delete done;}
//-----------------------------------------------------
static gboolean cb_need_data(GstElement* appsrc,guint unused_size,gpointer user_data)
{
g_print("cb_need_data function \n");
GstBuffer* buffer;
GstMapInfo map;
guint size,depth,height,width,step,channels;
GstFlowReturn ret;
guchar *data1;
g_print("userdata: %s \n",user_data);
cv::Mat frame=cv::imread((const char*)user_data, CV_LOAD_IMAGE_COLOR);
height = frame.size().height;
width = frame.size().width;
channels = frame.channels();
data1 = (guchar *)frame.data;
gsize sizeInBytes = height*width*channels;
g_print("frame_height: %d \n",height);
g_print("frame_width: %d \n",width);
g_print("frame_channels: %d \n",channels);
g_print("frame_size: %d \n",sizeInBytes);
buffer=gst_buffer_new_allocate(NULL,sizeInBytes,NULL);
gst_buffer_map(buffer,&map,GST_MAP_WRITE);
memcpy( (guchar *)map.data, data1, gst_buffer_get_size( buffer ) );
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {g_print("cv 2 gst got an error"); return false;}
gst_buffer_unref(buffer);
//gst_buffer_unmap (buffer, &map);
g_print("cv converted to gst \n ");
return true;
}
//-------------------------------------------------------
static GstPadProbeReturn tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,gpointer u_data)
{
char *msg;
g_object_get(G_OBJECT(u_data),"last-message",&msg,NULL);
if (msg!=NULL) {g_print("FPS =%s \n",msg);}
return GST_PAD_PROBE_OK;
}
//-------------------------------------------------------
//------------------MAIN---------------------------------
//-------------------------------------------------------
int main(int argc,char** argv)
{
GMainLoop *loop;
GstElement *pipeline,*sink,*tiler,*nvvidconv,*nvosd,*nvsink,*pgie; //,*streammux
GstElement* appsrc,*conv;
GstBus *bus;
guint bus_watch_id;
GstPad *tiler_src_pad;
guint num_sources;
guint tiler_rows,tiler_columns;
guint pgie_batch_size;
GstCaps *caps;
//check input args
if(argc <2) {g_printerr("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]); return -1;}
num_sources=argc-1;
//start gstreamer
gst_init(&argc,&argv);
loop=g_main_loop_new(NULL,FALSE);
//Creating pipeline
pipeline=gst_pipeline_new("dst_opencv");
//streammux=gst_element_factory_make("nvstreammux","nvstream-muxer");
if(!pipeline){g_printerr("pipeline could not be created");}
//if(!streammux){g_printerr("Streammux could not be created");}
//gst_bin_add(GST_BIN(pipeline),streammux);
// Creating bin with all sources
appsrc=gst_element_factory_make("appsrc","gstappsrc");
conv=gst_element_factory_make("videoconvert","conv");
g_object_set (G_OBJECT (appsrc), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "I420",
"width", G_TYPE_INT, 1200,
"height", G_TYPE_INT, 600,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
NULL), NULL);
g_object_set(G_OBJECT(appsrc),"stream-type",0,"format",GST_FORMAT_TIME,NULL);
/* Use nvinfer to infer on batched frame. */
pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
/* Use nvtiler to composite the batched frames into a 2D tiled array based
* on the source of the frames. */
tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");
nvvidconv=gst_element_factory_make ("nvvideoconvert","nvvideo-converter");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
// nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
nvosd=gst_element_factory_make("nvdsosd","nv-onscreendisplay");
nvsink=gst_element_factory_make ("nveglglessink", "nvvideo-renderer"); //show on display
//nvsink=gst_element_factory_make("fakesink","nvvideo-render"); //Dont show frames on screen
sink=gst_element_factory_make("fpsdisplaysink","fps_display");
//sink=gst_element_factory_make("autovideosink","videosink");
//check if all plugin were created
if(!appsrc){g_printerr("appsrc could not be created"); return -1;}
if(!conv){g_printerr("conv could not be created"); return -1;}
if(!tiler){g_printerr("tiler could not be created"); return -1;}
if(!sink){g_printerr("sink could not be created"); return -1;}
if(!nvvidconv){g_printerr("nvvidconv could not be created"); return -1;}
if(!pgie){g_printerr("pgie could not be created"); return -1;}
if(!nvosd){g_printerr("nvosd could not be created"); return -1;}
//set streammux
/* Configure the nvinfer element using the nvinfer config file. */
g_object_set (G_OBJECT (pgie),"config-file-path", MODEL_CONFIG, NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",pgie_batch_size, num_sources);
g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);}
//g_print("Flag \n");
//set tiler
tiler_rows = (guint) sqrt (num_sources);
tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
/* we set the tiler properties here */
g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
"width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//set fps sink
g_object_set (G_OBJECT (sink), "text-overlay", FALSE, "video-sink", nvsink, "sync", FALSE, NULL);
//linking all elements
gst_bin_add_many(GST_BIN(pipeline),appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL);
if (!gst_element_link_many(appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL)){g_printerr("Elements could not be linked"); return -1;}
tiler_src_pad = gst_element_get_static_pad (pgie, "src");
if (!tiler_src_pad) {g_print ("Unable to get src pad\n");}
else{gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,tiler_src_pad_buffer_probe, (gpointer)sink, NULL);}
g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data),(gpointer)argv[1]);
/* Set the pipeline to "playing" state */
g_print ("Now playing:");
for (int i = 0; i < num_sources; i++) {g_print (" %s,", argv[i + 1]);}
g_print ("\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Gstreamer recording video with audio

I'm trying to record on a file a video from my webcam along with audio using Gstreamer on my Ubuntu 16 machine through glib library.
I'm able to watch the video streaming from the webcam through these code lines
#include <gst/gst.h>
int main(int argc, char *argv[]) {
GstElement *pipeline, *source, *sink, *convert;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
source = gst_element_factory_make ("v4l2src", "source");
sink = gst_element_factory_make ("autovideosink", "sink");
convert =gst_element_factory_make("videoconvert","convert");
//convert = gst_element_factory_make ("audioconvert", "convert");
//sink = gst_element_factory_make ("autoaudiosink", "sink");
/* Create the empty pipeline */
pipeline = gst_pipeline_new ("test-pipeline");
if (!pipeline || !source || !sink || !convert) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/*set der source*/
g_object_set (source, "device", "/dev/video0", NULL);
/* Build the pipeline */
gst_bin_add_many (GST_BIN (pipeline), source, sink, convert, NULL);
if (gst_element_link (convert, sink) != TRUE) {
g_printerr ("Elements could not be linked confert sink.\n");
gst_object_unref (pipeline);
return -1;
}
if (gst_element_link (source, convert) != TRUE) {
g_printerr ("Elements could not be linked source -convert.\n");
gst_object_unref (pipeline);
return -1;
}
/* Start playing */
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,(GstMessageType) (GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
and to capture audio from microphone and listen it through the speakers using these code lines
#include <gst/gst.h>
#include <glib.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data){
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
/* Main function for audio pipeline initialization and looping streaming process */
gint
main (gint argc, gchar **argv) {
GMainLoop *loop;
GstElement *pipeline, *audio_source, *sink;
GstBus *bus;
guint bus_watch_id;
GstCaps *caps;
gboolean ret;
/* Initialization of gstreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Elements creation */
pipeline = gst_pipeline_new ("audio_stream");
audio_source = gst_element_factory_make ("alsasrc", "audio_source");
sink = gst_element_factory_make ("alsasink", "audio_sink");
// video_source = gst_element_factory_make ("v4l2src", "source");
// video_sink = gst_element_factory_make ("autovideosink", "sink");
// video_convert= gst_element_factory_make("videoconvert","convert");
if (!pipeline) {
g_printerr ("Audio: Pipeline couldn't be created\n");
return -1;
}
if (!audio_source) {
g_printerr ("Audio: alsasrc couldn't be created\n");
return -1;
}
if (!sink) {
g_printerr ("Audio: Output file couldn't be created\n");
return -1;
}
g_object_set (G_OBJECT (audio_source), "device", "hw:1,0", NULL);
g_object_set (G_OBJECT (sink), "device", "hw:1,0", NULL);
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
gst_bin_add_many (GST_BIN(pipeline), audio_source, sink, NULL);
caps = gst_caps_new_simple ("audio/x-raw", "format", G_TYPE_STRING, "S16LE", "layout", G_TYPE_STRING, "interleaved", "rate", G_TYPE_INT, (int)44100, "channels", G_TYPE_INT, (int)2, NULL);
ret = gst_element_link_filtered (audio_source, sink, caps);
if (!ret) {
g_print ("audio_source and sink couldn't be linked\n");
gst_caps_unref (caps);
return FALSE;
}
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("streaming...\n");
g_main_loop_run (loop);
g_print ("Returned, stopping stream\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
What i really don't understand is how to get video from the webcam and audio from my alsa hw at the same time and save them into a file (such as .mp4 for ex). Can anyone help me? I tried to find something useful, but there's nothing on the board. In addition, it would be really appreciate also how to save just the video stream or just the audio stream in separated files.
UPDATE
I looked again to the tutorials and to the git link gave by #nayana, so i tried myself to code something. I have two results:
#include <string.h>
#include <gst/gst.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h>
static GMainLoop *loop;
static GstElement *pipeline;
static GstElement *muxer, *sink;
static GstElement *src_video, *encoder_video, *queue_video;
static GstElement *src_audio, *encoder_audio, *queue_audio;
static GstBus *bus;
static gboolean
message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
{
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_error (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_WARNING:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_warning (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
break;
}
case GST_MESSAGE_EOS:{
g_print ("Got EOS\n");
g_main_loop_quit (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_main_loop_unref (loop);
gst_object_unref (pipeline);
exit(0);
break;
}
default:
break;
}
return TRUE;
}
void sigintHandler(int unused) {
g_print("You ctrl-c-ed! Sending EoS");
gst_element_send_event(pipeline, gst_event_new_eos());
}
int main(int argc, char *argv[])
{
signal(SIGINT, sigintHandler);
gst_init (&argc, &argv);
pipeline = gst_pipeline_new(NULL);
src_video = gst_element_factory_make("v4l2src", NULL);
encoder_video = gst_element_factory_make("x264enc", NULL);
queue_video = gst_element_factory_make("queue", NULL);
src_audio = gst_element_factory_make ("alsasrc", NULL);
encoder_audio = gst_element_factory_make("lamemp3enc", NULL);
queue_audio = gst_element_factory_make("queue", NULL);
muxer = gst_element_factory_make("mp4mux", NULL);
sink = gst_element_factory_make("filesink", NULL);
if (!pipeline || !src_video || !encoder_video || !src_audio || !encoder_audio
|| !queue_video || !queue_audio || !muxer || !sink) {
g_error("Failed to create elements");
return -1;
}
g_object_set(src_audio, "device", "hw:1,0", NULL);
g_object_set(sink, "location", "video_audio_test.mp4", NULL);
gst_bin_add_many(GST_BIN(pipeline), src_video, encoder_video, queue_video,
src_audio, encoder_audio, queue_audio, muxer, sink, NULL);
gst_element_link_many (src_video,encoder_video,queue_video, muxer,NULL);
gst_element_link_many (src_audio,encoder_audio,queue_audio, muxer,NULL);
if (!gst_element_link_many(muxer, sink, NULL)){
g_error("Failed to link elements");
return -2;
}
loop = g_main_loop_new(NULL, FALSE);
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
gst_bus_add_signal_watch(bus);
g_signal_connect(G_OBJECT(bus), "message", G_CALLBACK(message_cb), NULL);
gst_object_unref(GST_OBJECT(bus));
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Starting loop");
g_main_loop_run(loop);
return 0;
}
With this upon i am able to record the video from the cam, but the audio is recorded for just one second somewhere randomly during the recording and it gives me this error
ERROR: from element /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0: Can't record audio fast enough
Additional debug info:
gstaudiobasesrc.c(869): gst_audio_base_src_create (): /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0:
Dropped 206388 samples. This is most likely because downstream can't keep up and is consuming samples too slowly.<br>
So i tried to add some setting and queues
#include <string.h>
#include <gst/gst.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h>
static GMainLoop *loop;
static GstElement *pipeline;
static GstElement *muxer, *sink;
static GstElement *src_video, *encoder_video, *queue_video, *rate_video, *scale_video, *capsfilter_video;
static GstElement *src_audio, *encoder_audio, *queue_audio, *queue_audio2, *capsfilter_audio, *rate_audio;
static GstBus *bus;
static GstCaps *caps;
static gboolean
message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
{
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_error (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_WARNING:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_warning (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
break;
}
case GST_MESSAGE_EOS:{
g_print ("Got EOS\n");
g_main_loop_quit (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_main_loop_unref (loop);
gst_object_unref (pipeline);
exit(0);
break;
}
default:
break;
}
return TRUE;
}
void sigintHandler(int unused) {
g_print("You ctrl-c-ed! Sending EoS");
gst_element_send_event(pipeline, gst_event_new_eos());
}
int main(int argc, char *argv[])
{
signal(SIGINT, sigintHandler);
gst_init (&argc, &argv);
pipeline = gst_pipeline_new(NULL);
src_video = gst_element_factory_make("v4l2src", NULL);
rate_video = gst_element_factory_make ("videorate", NULL);
scale_video = gst_element_factory_make ("videoscale", NULL);
capsfilter_video = gst_element_factory_make ("capsfilter", NULL);
queue_video = gst_element_factory_make("queue", NULL);
encoder_video = gst_element_factory_make("x264enc", NULL);
src_audio = gst_element_factory_make ("alsasrc", NULL);
capsfilter_audio = gst_element_factory_make ("capsfilter", NULL);
queue_audio = gst_element_factory_make("queue", NULL);
rate_audio = gst_element_factory_make ("audiorate", NULL);
queue_audio2 = gst_element_factory_make("queue", NULL);
encoder_audio = gst_element_factory_make("lamemp3enc", NULL);
muxer = gst_element_factory_make("mp4mux", NULL);
sink = gst_element_factory_make("filesink", NULL);
if (!pipeline || !src_video || !rate_video || !scale_video || !capsfilter_video
|| !queue_video || !encoder_video || !src_audio || !capsfilter_audio
|| !queue_audio || !rate_audio || !queue_audio2 || !encoder_audio
|| !muxer || !sink) {
g_error("Failed to create elements");
return -1;
}
// Set up the pipeline
g_object_set(src_video, "device", "/dev/video0", NULL);
g_object_set(src_audio, "device", "hw:1,0", NULL);
g_object_set(sink, "location", "video_audio_test.mp4", NULL);
// video settings
caps = gst_caps_from_string("video/x-raw,format=(string)I420,width=480,height=384,framerate=(fraction)25/1");
g_object_set (G_OBJECT (capsfilter_video), "caps", caps, NULL);
gst_caps_unref (caps);
// audio settings
caps = gst_caps_from_string("audio/x-raw,rate=44100,channels=1");
g_object_set (G_OBJECT (capsfilter_audio), "caps", caps, NULL);
gst_caps_unref (caps);
// add all elements into the pipeline
gst_bin_add_many(GST_BIN(pipeline), src_video, rate_video, scale_video, capsfilter_video,
queue_video, encoder_video, src_audio, capsfilter_audio, queue_audio, rate_audio,
queue_audio2, encoder_audio, muxer, sink, NULL);
if (!gst_element_link_many (src_video,rate_video,scale_video, capsfilter_video,
queue_video, encoder_video, muxer,NULL))
{
g_error("Failed to link video elements");
return -2;
}
if (!gst_element_link_many (src_audio, capsfilter_audio, queue_audio, rate_audio,
queue_audio2, encoder_audio, muxer,NULL))
{
g_error("Failed to link audio elements");
return -2;
}
if (!gst_element_link_many(muxer, sink, NULL))
{
g_error("Failed to link elements");
return -2;
}
loop = g_main_loop_new(NULL, FALSE);
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
gst_bus_add_signal_watch(bus);
g_signal_connect(G_OBJECT(bus), "message", G_CALLBACK(message_cb), NULL);
gst_object_unref(GST_OBJECT(bus));
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Starting loop");
g_main_loop_run(loop);
return 0;
}
This time the code doesnt record anything and give me the following error
ERROR: from element /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0: Internal data flow error.
Additional debug info:
gstbasesrc.c(2948): gst_base_src_loop (): /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0:
streaming task paused, reason not-negotiated (-4)
Can you address me to fix the error?
Thanks in advance
What you need is the multiplexer - such GStreamer element that can merge two streams into one.
mp4, mkv, avi.. are just a container formats which contains multiple "data streams", which can be audio, video, subtitles (not all formats support this).
I don't know about your use case, but you don't need C code for what you do. You can just use gst-launch-1.0 tool which has its own GStreamer kind-of-scripting language.
For simplicity I will use debugging elements videotestsrc and audiotestsrc for simulating input (instead of actual camera etc).
gst-launch-1.0 -e videotestsrc ! x264enc ! mp4mux name=mux ! filesink location="bla.mp4" audiotestsrc ! lamemp3enc ! mux.
videotestsrc --> x264enc -----\
>---> mp4mux ---> filesink
audiotestsrc --> lamemp3enc --/
Explanation:
Videotestsrc generates raw video which is in GStreamer terms called "video/x-raw".
However mp4 cannot hold raw video, so we need to encode it with for example x264enc which makes our data "video/x-h264".
Then we can finally mux this into our mp4 with mp4mux element.
When we take a look into GStreamer docs using gst-inspect-1.0 mp4mux we see that this element supports various formats amongst which there is also video/x-h264.
The same thing we do with audio with either faac for AAC format or lamemp3enc for mp3.
With gst-launch-1.0 I did two tricks and one bonus trick:
ability to have separate branches in one line. This is achieved by just separating those branches with space instead of !
ability to make alias with name=mux and later on using it with adding dot right at the end of name like mux. . You can make up any name for that element you like.
Write EOS after hitting ctrl+c to stop the recording. This is achieved with parameter -e
Finally the output goes to filesink which just writes anything you give it to file.
Now for a homework you:
Use your elements for what you need - v4l2, alsasrc
Add queue elements to add buffering and thread separation

Gstreamer FLAC pipeline creation Error

I keep getting this error:
$ ./test recit24bit.flac
Now playing: recit24bit.flac
Running...
Error: Internal data flow error.
Returned, stopping playback
Deleting pipeline
When compiling this code:
#include <gst/gst.h>
#include <glib.h>
static gboolean bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
{
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
{
break;
}
}
return TRUE;
}
/*
static void on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
// We can now link this pad with the vorbis-decoder sink pad
g_print ("Dynamic pad created, linking demuxer/decoder\n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
*/
int main (int argc,
char *argv[])
{
GMainLoop *loop;
GstElement *pipeline,
*source,
//*demuxer,
*decoder,
*conv,
*sink;
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
if (argc != 2)
{
g_printerr ("Usage: %s <Flac filename>\n", argv[0]);
return -1;
}
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("audio-player");
source = gst_element_factory_make ("filesrc", "file-source");
//demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer");
decoder = gst_element_factory_make ("flacdec", "flac-decoder");
conv = gst_element_factory_make ("audioconvert", "converter");
sink = gst_element_factory_make ("alsasink", "audio-output");
if (!pipeline || !source ||/* !demuxer ||*/ !decoder ||/* !conv ||*/ !sink)
{
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
/* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */
gst_bin_add_many (GST_BIN (pipeline), source,/* demuxer,*/ decoder, conv, sink, NULL);
/* we link the elements together */
/* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */
//gst_element_link (source, demuxer);
gst_element_link_many (source, decoder, conv, sink, NULL);
// g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder);
/* note that the demuxer will be linked to the decoder dynamically.
The reason is that Ogg may contain various streams (for example
audio and video). The source pad(s) will be created at run time,
by the demuxer when it detects the amount and nature of streams.
Therefore we connect a callback function which will be executed
when the "pad-added" is emitted.*/
/* Set the pipeline to "playing" state*/
g_print ("Now playing: %s\n", argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
I'm using this to compile it successfully:
g++ -Wall test-flac.cc -o test $(pkg-config --cflags --libs gstreamer-1.0)
I'm using Arch, if that means anything. Does anybody have some advice? I'm a pretty big noob, but I don't understand what I'm not doing right because it seems like it should work.
I just needed to replace the demuxer with a parsar, which is (apparently) necessary. Derp. I used flacparse, of course.

Internal data flow error in gstreamer c code that uses "adder" element?

I want to convert my gst-launch command into c code. I am new to gstreamer coding. can anyone help me?
command: gst-launch-0.10 uridecodebin uri=file:///media/afeb7785-7c21-45bf-b1b7-41d3263022f6/gst/bigcity.wav ! audioconvert ! volume volume='0.9' ! audioconvert ! adder name = m ! autoaudiosink uridecodebin uri=file:///media/afeb7785-7c21-45bf-b1b7-41d3263022f6/gst/tereliya.wav ! audioconvert ! volume volume='0.3' ! audioconvert ! m.
c-code:
#include <gst/gst.h>
#include <glib.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
/* We can now link this pad with the vorbis-decoder sink pad */
g_print ("Dynamic pad created, linking \n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
int
main (int argc,
char *argv[])
{
GMainLoop *loop;
GstElement *pipeline, *source1, *source2, *mixer, *conv, *conv2, *sink;
GstBus *bus;
guint bus_watch_id;
GstPad *adder_sinkpad;
GstPad *adder_sinkpad2;
GstPad *conv1_pad;
GstPad *conv2_pad;
gchar *pad1name;
gchar *pad2name;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
/* if (argc != 3) {
g_printerr ("Usage: %s \n", argv[0]);
return -1;
}*/
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("audio-player");
source1 = gst_element_factory_make ("uridecodebin", "uri-source1");
source2 = gst_element_factory_make ("uridecodebin", "uri-source2");
mixer = gst_element_factory_make ("adder", "audio-mix");
conv = gst_element_factory_make ("audioconvert", "conv");
conv2 = gst_element_factory_make ("audioconvert", "conv2");
sink = gst_element_factory_make ("alsasink", "audio-output");
if (!pipeline || !source1 || !source2 || !mixer || !conv || !conv2 || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source1), "uri", "file:///home/baibhav/gst/shadowoftheday.wav", NULL);
g_object_set (G_OBJECT (source2), "uri", "file:///home/baibhav/gst/valentinesday.wav" , NULL);
g_object_set (G_OBJECT (mixer), "name", "mix", NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source1, conv, mixer, sink, source2, conv2, NULL);
/* we link the elements together */
int k,n;
if((k=gst_element_link (source1, conv)) !=0 ) {
g_print ("link1 error: %d\n",k);
g_print ("cannot link source1 with conv\n");
}
if((n=gst_element_link (source2, conv2)) != 0) {
g_print ("link2 error: %d\n",n);
g_print ("cannot link source2 with conv2\n");
}
if(gst_element_link (mixer, sink) != TRUE) {
g_print ("cannot link sink with mixer\n");
}
conv1_pad= gst_element_get_static_pad (conv, "src");
conv2_pad= gst_element_get_static_pad (conv2, "src");
adder_sinkpad = gst_element_get_request_pad (mixer, "sink%d");
pad1name = gst_pad_get_name (adder_sinkpad);
g_print ("pad1name: %s\n",pad1name );
adder_sinkpad2 = gst_element_get_request_pad (mixer, "sink%d");
pad2name = gst_pad_get_name (adder_sinkpad2);
g_print ("pad2name: %s\n",pad2name );
int i,j;
if((i=gst_pad_link (conv1_pad, adder_sinkpad)) != 0) {
g_print ("pad error: %d\n",i);
g_print ("cannot link conv1 with adder1\n");
}
if((j=gst_pad_link (conv2_pad, adder_sinkpad2))!= 0) {
g_print ("pad2 error: %d\n",j);
g_print ("cannot link conv2 with adder2\n");
}
// g_signal_connect (conv, "pad-added", G_CALLBACK (on_pad_added), mixer);
// g_signal_connect (conv2, "pad-added", G_CALLBACK (on_pad_added), mixer);
/* Set the pipeline to "playing" state*/
g_print ("Now playing\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
// gst_pad_unlink (conv2_pad, adder_sinkpad2);
// gst_pad_unlink ((conv1_pad, adder_sinkpad);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
For your application, there is not any requirement of adding signal handler for pad-added signal. You can simply write your application as follows :
#include <gst/gst.h>
#include <glib.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
int main (int argc,
char *argv[])
{
GMainLoop *loop;
GstElement *pipeline, *source1, *source2, *mixer, *conv, *conv2, *sink;
GstBus *bus;
guint bus_watch_id;
GstPad *adder_sinkpad;
GstPad *adder_sinkpad2;
GstPad *conv1_pad;
GstPad *conv2_pad;
gchar *pad1name;
gchar *pad2name;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
/* if (argc != 3) {
* g_printerr ("Usage: %s \n", argv[0]);
* return -1;
* }*/
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("audio-player");
source1 = gst_element_factory_make ("audiotestsrc", "uri-source1");
source2 = gst_element_factory_make ("audiotestsrc", "uri-source2");
mixer = gst_element_factory_make ("adder", "audio-mix");
conv = gst_element_factory_make ("audioconvert", "conv");
conv2 = gst_element_factory_make ("audioconvert", "conv2");
sink = gst_element_factory_make ("alsasink", "audio-output");
if (!pipeline || !source1 || !source2 || !mixer || !conv || !conv2 || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source1), "uri", "file:///home/baibhav/gst/shadowoftheday.wav", NULL);
g_object_set (G_OBJECT (source2), "uri", "file:///home/baibhav/gst/valentinesday.wav" , NULL);
g_object_set (G_OBJECT (mixer), "name", "mix", NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source1, conv, mixer, sink, source2, conv2, NULL);
/* we link the elements together */
int k,n;
if((k=gst_element_link_many (source1, conv,mixer,NULL)== 0) ) {
g_print ("link1 error: %d\n",k);
g_print ("cannot link source1 with conv\n");
}
if((n=gst_element_link_many (source2, conv2,mixer,NULL))== 0 ) {
g_print ("link2 error: %d\n",n);
g_print ("cannot link source2 with conv2\n");
}
if(gst_element_link (mixer, sink) == 0 ) {
g_print ("cannot link sink with mixer\n");
}
// /* Set the pipeline to "playing" state*/
g_print ("Now playing\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
//
//
// /* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
//
// /* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
// gst_pad_unlink (conv2_pad, adder_sinkpad2);
// gst_pad_unlink ((conv1_pad, adder_sinkpad);
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
Below code works.
#include <gst/gst.h>
#include <glib.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
/* We can now link this pad with the vorbis-decoder sink pad */
g_print ("Dynamic pad created, linking \n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
int
main (int argc,
char *argv[])
{
GMainLoop *loop;
GstElement *pipeline, *source1, *source2, *mixer, *conv1, *conv2, *sink;
GstBus *bus;
guint bus_watch_id;
GstPad *adder_sinkpad1;
GstPad *adder_sinkpad2;
GstPad *vol1_pad, *vol2_pad;
gchar *pad1name;
gchar *pad2name;
GstElement *vol1, *vol2;
int n;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
if (argc != 3) {
g_printerr ("Usage: %s file1 file2\n", argv[0]);
return -1;
}
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("audio-player");
source1 = gst_element_factory_make ("uridecodebin", "uri-source1");
source2 = gst_element_factory_make ("uridecodebin", "uri-source2");
mixer = gst_element_factory_make ("adder", "audio-mix");
conv1 = gst_element_factory_make ("audioconvert", "conv1");
conv2 = gst_element_factory_make ("audioconvert", "conv2");
vol1 = gst_element_factory_make("volume", "vol1");
vol2 = gst_element_factory_make("volume", "vol2");
sink = gst_element_factory_make ("alsasink", "audio-output");
if (!pipeline || !source1 || !source2 || !mixer || !conv1 || !conv2 || !vol1 || !vol2 || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source1), "uri", argv[1], NULL);
g_object_set (G_OBJECT (source2), "uri", argv[2], NULL);
g_object_set (G_OBJECT (mixer), "name", "mix", NULL);
g_object_set(G_OBJECT (vol1), "volume", 0.9, NULL);
g_object_set(G_OBJECT (vol2), "volume", 0.3, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source1, conv1, vol1, mixer, sink, source2, conv2, vol2, NULL);
/* we link the elements together */
#if 0 //Doesnt work directly linking elements
g_print("link elements directly \n");
if((n=gst_element_link (source1, conv1)) == 0 ) {
g_print ("link1 error: %d\n",n);
g_print ("cannot link source1 with conv1\n");
}
if((n=gst_element_link (source2, conv2)) == 0) {
g_print ("link2 error: %d\n",n);
g_print ("cannot link source2 with conv2\n");
}
#else
g_print("use dynamic pads to link elements \n");
g_signal_connect (source1, "pad-added", G_CALLBACK (on_pad_added), conv1);
g_signal_connect (source2, "pad-added", G_CALLBACK (on_pad_added), conv2);
#endif
if((n=gst_element_link (conv1, vol1)) == 0) {
g_print ("link2 error: %d\n",n);
g_print ("cannot link conv with vol1\n");
}
if((n=gst_element_link (conv2, vol2)) == 0) {
g_print ("link2 error: %d\n",n);
g_print ("cannot link conv2 with conv2\n");
}
if((n = gst_element_link (mixer, sink)) == 0) {
g_print ("cannot link sink with mixer\n");
}
vol1_pad= gst_element_get_static_pad (vol1, "src");
vol2_pad= gst_element_get_static_pad (vol2, "src");
adder_sinkpad1 = gst_element_get_request_pad (mixer, "sink_%u");
pad1name = gst_pad_get_name (adder_sinkpad1);
g_print ("pad1name: %s\n",pad1name );
adder_sinkpad2 = gst_element_get_request_pad (mixer, "sink_%u");
pad2name = gst_pad_get_name (adder_sinkpad2);
g_print ("pad2name: %s\n",pad2name );
int i,j;
if((i=gst_pad_link (vol1_pad, adder_sinkpad1)) != 0) {
g_print ("pad error: %d\n",i);
g_print ("cannot link conv1 with adder1\n");
}
if((j=gst_pad_link (vol2_pad, adder_sinkpad2))!= 0) {
g_print ("pad2 error: %d\n",j);
g_print ("cannot link conv2 with adder2\n");
}
/* Set the pipeline to "playing" state*/
g_print ("Now playing\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

How to convert a Gstreamer program to stream video via udp into a Qt program?

I have a Gstreamer program that streams video via UDP source. I need to implement the program in Qt. Please let me know how can I do it.
The program I'm using is shown below.
#include <gst/gst.h>
#include <stdio.h>
#include <stdlib.h>
GstElement *pipeline,
*source,
*decoder,
*video_sink,
*text,
*audio_sink;
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End-of-stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *err;
gst_message_parse_error (msg, &err, &debug);
g_free (debug);
g_print ("Error: %s\n", err->message);
g_error_free (err);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
static void
new_pad (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad = NULL;
const gchar *mime;
GstCaps *caps;
// get capabilities
caps = gst_pad_get_caps (pad);
// get mime type
mime = gst_structure_get_name (gst_caps_get_structure (caps, 0));
g_print ("Dynamic pad %s:%s created with mime-type %s\n", GST_OBJECT_NAME (element), GST_OBJECT_NAME (pad), mime);
if (g_strrstr (mime, "video"))
{
g_print ("Linking video...\n");
sinkpad = gst_element_get_static_pad (text, "video_sink");
}
if (g_strrstr (mime, "audio"))
{
g_print ("Linking audio...\n");
sinkpad = gst_element_get_static_pad (audio_sink, "sink");
}
if(sinkpad!=NULL)
{
// link
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
gst_caps_unref (caps);
}
int main (int argc, char *argv[])
{
GMainLoop *loop;
GstBus *bus;
// initialize GStreamer
gst_init (&argc, &argv);
printf("step 0\n");
loop = g_main_loop_new (NULL, FALSE);
/* check input arguments
if (argc != 2)
{
g_print ("Usage: %s <filename>\n", argv[0]);
return -1;
}*/
// argv[1]="http://192.168.0.247:1234/Documents/6.mpg";
//"udp://192.168.0.247:1234";
//"/home/quarkgluon/Documents/rajesh/gstreamer/Serenity.mp4";
printf("step 1\n");
// create elements
pipeline = gst_pipeline_new ("video-player");
source = gst_element_factory_make ("udpsrc", "source");
decoder = gst_element_factory_make ("decodebin2", "decoder");
text = gst_element_factory_make ("textoverlay", "text");
video_sink = gst_element_factory_make ("xvimagesink", "vsink");
audio_sink = gst_element_factory_make ("alsasink", "asink");
if (!pipeline || !source || !decoder || !video_sink || !text || !audio_sink)
{
g_print ("One element could not be created\n");
return -1;
}
// set filename property on the file source. Also add a message
// handler.
g_object_set (G_OBJECT (source),"port",1234, NULL);
// g_object_set (G_OBJECT (text), "text", "hello world awertyuiop!!!", NULL);
// g_object_set (G_OBJECT (text), "italic", 1, NULL);
//g_object_set (G_OBJECT (text), "bold", 1, NULL);
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
// put all elements in a bin
gst_bin_add_many (GST_BIN (pipeline), source, decoder, video_sink, text, audio_sink, NULL);
// link together - note that we cannot link the decoder and
// sink yet, because the decoder uses dynamic pads. For that,
// we set a pad-added signal handler.
gst_element_link (source, decoder);
gst_element_link (text, video_sink);
g_signal_connect (decoder, "pad-added", G_CALLBACK (new_pad), NULL);
printf("step 2\n");
// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");
g_main_loop_run (loop);
// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
return 0;
}
http://cgit.freedesktop.org/gstreamer/qt-gstreamer/