GStreamer C++ pipeline Replace filesink with ximagesink .Internal error: can't allocate images - c++

I have the following example code which works correctly:
It's use filesink as sink
/* GStreamer
*
* appsink-snoop.c: example for modify data in video pipeline
* using appsink and appsrc.
*
* Based on the appsink-src.c example
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include <gst/gst.h>
#include <string.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
typedef struct
{
GMainLoop *loop;
GstElement *source;
GstElement *sink;
} ProgramData;
/* user modify the data here */
static GstFlowReturn
modify_in_data (GstMapInfo * map)
{
int dataLength;
guint8 *rdata;
dataLength = map->size;
rdata = map->data;
g_print ("%s dataLen = %d\n", __func__, dataLength);
/* Modify half of frame to plane white */
for (int i=0; i <= dataLength/2; i++) {
rdata[i] = 0xff;
}
}
/* called when the appsink notifies us that there is a new buffer ready for
* processing */
static GstFlowReturn
on_new_sample_from_sink (GstElement * elt, ProgramData * data)
{
GstSample *sample;
GstBuffer *app_buffer, *buffer;
GstElement *source;
GstFlowReturn ret;
GstMapInfo map;
guint8 *rdata;
int dataLength;
int i;
g_print ("%s\n", __func__);
/* get the sample from appsink */
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
buffer = gst_sample_get_buffer (sample);
/* make a copy */
app_buffer = gst_buffer_copy_deep (buffer);
gst_buffer_map (app_buffer, &map, GST_MAP_WRITE);
/* Here You modify your buffer data */
modify_in_data (&map);
/* get source an push new buffer */
source = gst_bin_get_by_name (GST_BIN (data->sink), "testsource");
ret = gst_app_src_push_buffer (GST_APP_SRC (source), app_buffer);
gst_sample_unref (sample);
/* we don't need the appsink sample anymore */
gst_buffer_unmap (app_buffer, &map);
gst_object_unref (source);
return ret;
}
/* called when we get a GstMessage from the source pipeline when we get EOS, we
* notify the appsrc of it. */
static gboolean
on_source_message (GstBus * bus, GstMessage * message, ProgramData * data)
{
GstElement *source;
g_print ("%s\n", __func__);
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_EOS:
g_print ("The source got dry\n");
source = gst_bin_get_by_name (GST_BIN (data->sink), "testsource");
gst_app_src_end_of_stream (GST_APP_SRC (source));
gst_object_unref (source);
break;
case GST_MESSAGE_ERROR:
g_print ("Received error\n");
g_main_loop_quit (data->loop);
break;
default:
break;
}
return TRUE;
}
/* called when we get a GstMessage from the sink pipeline when we get EOS, we
* exit the mainloop and this testapp. */
static gboolean
on_sink_message (GstBus * bus, GstMessage * message, ProgramData * data)
{
/* nil */
g_print ("%s\n", __func__);
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_EOS:
g_print ("Finished playback\n");
g_main_loop_quit (data->loop);
break;
case GST_MESSAGE_ERROR:
g_print ("Received error\n");
g_main_loop_quit (data->loop);
break;
default:
break;
}
return TRUE;
}
int
main (int argc, char *argv[])
{
ProgramData *data = NULL;
gchar *string = NULL;
GstBus *bus = NULL;
GstElement *testsink = NULL;
GstElement *testsource = NULL;
gst_init (&argc, &argv);
data = g_new0 (ProgramData, 1);
data->loop = g_main_loop_new (NULL, FALSE);
/* setting up source pipeline, we read from a file and convert to our desired
* caps. */
string =
g_strdup_printf
("videotestsrc num-buffers=5 ! video/x-raw, width=640, height=480, format=RGB ! appsink name=testsink");
data->source = gst_parse_launch (string, NULL);
g_free (string);
if (data->source == NULL) {
g_print ("Bad source\n");
g_main_loop_unref (data->loop);
g_free (data);
return -1;
}
g_print ("Capture bin launched\n");
/* to be notified of messages from this pipeline, mostly EOS */
bus = gst_element_get_bus (data->source);
gst_bus_add_watch (bus, (GstBusFunc) on_source_message, data);
gst_object_unref (bus);
/* we use appsink in push mode, it sends us a signal when data is available
* and we pull out the data in the signal callback. */
testsink = gst_bin_get_by_name (GST_BIN (data->source), "testsink");
g_object_set (G_OBJECT (testsink), "emit-signals", TRUE, "sync", FALSE, NULL);
g_signal_connect (testsink, "new-sample",
G_CALLBACK (on_new_sample_from_sink), data);
gst_object_unref (testsink);
/* setting up sink pipeline, we push video data into this pipeline that will
* then copy in file using the default filesink. We have no blocking
* behaviour on the src which means that we will push the entire file into
* memory. */
string =
g_strdup_printf ("appsrc name=testsource ! filesink location=tmp.yuv");
data->sink = gst_parse_launch (string, NULL);
g_free (string);
if (data->sink == NULL) {
g_print ("Bad sink\n");
gst_object_unref (data->source);
g_main_loop_unref (data->loop);
g_free (data);
return -1;
}
g_print ("Play bin launched\n");
testsource = gst_bin_get_by_name (GST_BIN (data->sink), "testsource");
/* configure for time-based format */
g_object_set (testsource, "format", GST_FORMAT_TIME, NULL);
/* uncomment the next line to block when appsrc has buffered enough */
/* g_object_set (testsource, "block", TRUE, NULL); */
gst_object_unref (testsource);
bus = gst_element_get_bus (data->sink);
gst_bus_add_watch (bus, (GstBusFunc) on_sink_message, data);
gst_object_unref (bus);
g_print ("Going to set state to play\n");
/* launching things */
gst_element_set_state (data->sink, GST_STATE_PLAYING);
gst_element_set_state (data->source, GST_STATE_PLAYING);
/* let's run !, this loop will quit when the sink pipeline goes EOS or when an
* error occurs in the source or sink pipelines. */
g_print ("Let's run!\n");
g_main_loop_run (data->loop);
g_print ("Going out\n");
gst_element_set_state (data->source, GST_STATE_NULL);
gst_element_set_state (data->sink, GST_STATE_NULL);
gst_object_unref (data->source);
gst_object_unref (data->sink);
g_main_loop_unref (data->loop);
g_free (data);
return 0;
}
But when I replace "filesink" with "ximagesink" or "autovideosink" I get this error
on_sink_message
Received error on_sink_message
Error: Internal error: can't allocate images
Going out
segfault
Im trying to get some sample video (videotestsrc or some rtsp video from internet) , modify it and the show it in a window
Any ideas?

Related

Why the gstreamer pipeline won't play if I added a udpsink plugin to it

I tried to send an RTP stream with gstreamer, but I found that the pipeline won't play at all. When I simplified my code I found that if the udpsink plugin was added in the pipeline, the pipeline is blocked, and the status is always READY.
My code:
#include <gst/gst.h>
int main(int argc, char *argv[]) {
GstElement *pipeline, *source, *sink, *udp, *convert;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
source = gst_element_factory_make ("videotestsrc", "source");
convert = gst_element_factory_make ("videoconvert", "convert");
sink = gst_element_factory_make ("autovideosink", "sink");
udp = gst_element_factory_make ("udpsink", "udp");
/* Create the empty pipeline */
pipeline = gst_pipeline_new ("test-pipeline");
/* Build the pipeline */
gst_bin_add_many (GST_BIN (pipeline), source, sink, convert, /*udp,*/ NULL);
gst_element_link_many (source, convert, sink, NULL);
/* Modify the source's properties */
g_object_set (source, "pattern", 0, NULL);
/* Start playing */
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
bus = gst_element_get_bus (pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
case GST_MESSAGE_EOS:
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s\n", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
// ...
}
As you can see, the pipeline works fine if the udpsink is not added. This also happens in command line:
gst-launch-1.0 -v udpsink videotestsrc ! videoconvert ! autovideosink
The command above will popup a window and the video stops at the first frame.
I don't know what's wrong with my code, can anyone give me help, thanks!
A pipeline ends with a sink element, e.g. (autovideosink) you cannot add anything after that. That is why autovideosink does not have src pad and you can not link it to udpsink. You have to have a second thread linked to udpsink. To create threads you can use queue and tee elements.
You can find more here (GStreamer Basic tutorial 7: Multithreading and Pad Availability)

How to feed an Opencv frame into Nvidia Deepstream pipeline?

I am struggling to find a away to input a single cv::Mat frame into a Nvidia Deepstream Pipeline using c++. I tried the code below but I received the following Error message:
ERROR from element gstappsrc: Internal data stream error.
Error details: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:dst_opencv/GstAppSrc:source:
streaming stopped, reason not-negotiated (-4)
Returned, stopping playback
Deleting pipeline
If anyone have an idea how to do it or show me where I am doing wrong, I will be very thankful.
#include <gst/gst.h>
#include <glib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
#include <gst/app/gstappsrc.h>
/* The muxer output resolution must be set if the input streams will be of
* different resolution. The muxer will scale all the input frames to this
* resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080
#define TILED_OUTPUT_WIDTH 1920
#define TILED_OUTPUT_HEIGHT 1080
/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
* based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000
/* NVIDIA Decoder source pad memory feature. This feature signifies that source
* pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"
// detection models
#define MODEL_CONFIG "dstest3_pgie_config.txt"
//#define MODEL_CONFIG "yoloV2_pgie_config.txt"
//#define MODEL_CONFIG "fd_lpd_config.txt"
#define FPS_PRINT_INTERVAL 300
static gboolean bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_WARNING:
{
gchar *debug;
GError *error;
gst_message_parse_warning (msg, &error, &debug);
g_printerr ("WARNING from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
g_free (debug);
g_printerr ("Warning: %s\n", error->message);
g_error_free (error);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr ("ERROR from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr ("Error details: %s\n", debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
//-------------------------------------------------------
static void cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{
g_print ("In cb_newpad\n");
GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
const GstStructure *str = gst_caps_get_structure (caps, 0);
const gchar *name = gst_structure_get_name (str);
GstElement *source_bin = (GstElement *) data;
GstCapsFeatures *features = gst_caps_get_features (caps, 0);
/* Need to check if the pad created by the decodebin is for video and not
* audio. */
if (!strncmp (name, "video", 5)) {
/* Link the decodebin pad only if decodebin has picked nvidia
* decoder plugin nvdec_*. We do this by checking if the pad caps contain
* NVMM memory features. */
if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
/* Get the source bin ghost pad */
GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
decoder_src_pad)) {
g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
}
gst_object_unref (bin_ghost_pad);
} else {
g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
}
}
}
//-------------------------------------------------------
static void decodebin_child_added (GstChildProxy * child_proxy, GObject * object,gchar * name, gpointer user_data)
{
g_print ("Decodebin child added: %s\n", name);
if (g_strrstr (name, "decodebin") == name) {
g_signal_connect (G_OBJECT (object), "child-added",
G_CALLBACK (decodebin_child_added), user_data);
}
if (g_strstr_len (name, -1, "nvv4l2decoder") == name) {
g_print ("Seting bufapi_version\n");
g_object_set (object, "bufapi-version", TRUE, NULL);
}
}
//-------------------------------------------------------
void buffer_destroy(gpointer data) {cv::Mat* done = (cv::Mat*)data; delete done;}
//-----------------------------------------------------
static gboolean cb_need_data(GstElement* appsrc,guint unused_size,gpointer user_data)
{
g_print("cb_need_data function \n");
GstBuffer* buffer;
GstMapInfo map;
guint size,depth,height,width,step,channels;
GstFlowReturn ret;
guchar *data1;
g_print("userdata: %s \n",user_data);
cv::Mat frame=cv::imread((const char*)user_data, CV_LOAD_IMAGE_COLOR);
height = frame.size().height;
width = frame.size().width;
channels = frame.channels();
data1 = (guchar *)frame.data;
gsize sizeInBytes = height*width*channels;
g_print("frame_height: %d \n",height);
g_print("frame_width: %d \n",width);
g_print("frame_channels: %d \n",channels);
g_print("frame_size: %d \n",sizeInBytes);
buffer=gst_buffer_new_allocate(NULL,sizeInBytes,NULL);
gst_buffer_map(buffer,&map,GST_MAP_WRITE);
memcpy( (guchar *)map.data, data1, gst_buffer_get_size( buffer ) );
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {g_print("cv 2 gst got an error"); return false;}
gst_buffer_unref(buffer);
//gst_buffer_unmap (buffer, &map);
g_print("cv converted to gst \n ");
return true;
}
//-------------------------------------------------------
static GstPadProbeReturn tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,gpointer u_data)
{
char *msg;
g_object_get(G_OBJECT(u_data),"last-message",&msg,NULL);
if (msg!=NULL) {g_print("FPS =%s \n",msg);}
return GST_PAD_PROBE_OK;
}
//-------------------------------------------------------
//------------------MAIN---------------------------------
//-------------------------------------------------------
int main(int argc,char** argv)
{
GMainLoop *loop;
GstElement *pipeline,*sink,*tiler,*nvvidconv,*nvosd,*nvsink,*pgie; //,*streammux
GstElement* appsrc,*conv;
GstBus *bus;
guint bus_watch_id;
GstPad *tiler_src_pad;
guint num_sources;
guint tiler_rows,tiler_columns;
guint pgie_batch_size;
GstCaps *caps;
//check input args
if(argc <2) {g_printerr("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]); return -1;}
num_sources=argc-1;
//start gstreamer
gst_init(&argc,&argv);
loop=g_main_loop_new(NULL,FALSE);
//Creating pipeline
pipeline=gst_pipeline_new("dst_opencv");
//streammux=gst_element_factory_make("nvstreammux","nvstream-muxer");
if(!pipeline){g_printerr("pipeline could not be created");}
//if(!streammux){g_printerr("Streammux could not be created");}
//gst_bin_add(GST_BIN(pipeline),streammux);
// Creating bin with all sources
appsrc=gst_element_factory_make("appsrc","gstappsrc");
conv=gst_element_factory_make("videoconvert","conv");
g_object_set (G_OBJECT (appsrc), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "I420",
"width", G_TYPE_INT, 1200,
"height", G_TYPE_INT, 600,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
NULL), NULL);
g_object_set(G_OBJECT(appsrc),"stream-type",0,"format",GST_FORMAT_TIME,NULL);
/* Use nvinfer to infer on batched frame. */
pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
/* Use nvtiler to composite the batched frames into a 2D tiled array based
* on the source of the frames. */
tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");
nvvidconv=gst_element_factory_make ("nvvideoconvert","nvvideo-converter");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
// nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
nvosd=gst_element_factory_make("nvdsosd","nv-onscreendisplay");
nvsink=gst_element_factory_make ("nveglglessink", "nvvideo-renderer"); //show on display
//nvsink=gst_element_factory_make("fakesink","nvvideo-render"); //Dont show frames on screen
sink=gst_element_factory_make("fpsdisplaysink","fps_display");
//sink=gst_element_factory_make("autovideosink","videosink");
//check if all plugin were created
if(!appsrc){g_printerr("appsrc could not be created"); return -1;}
if(!conv){g_printerr("conv could not be created"); return -1;}
if(!tiler){g_printerr("tiler could not be created"); return -1;}
if(!sink){g_printerr("sink could not be created"); return -1;}
if(!nvvidconv){g_printerr("nvvidconv could not be created"); return -1;}
if(!pgie){g_printerr("pgie could not be created"); return -1;}
if(!nvosd){g_printerr("nvosd could not be created"); return -1;}
//set streammux
/* Configure the nvinfer element using the nvinfer config file. */
g_object_set (G_OBJECT (pgie),"config-file-path", MODEL_CONFIG, NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",pgie_batch_size, num_sources);
g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);}
//g_print("Flag \n");
//set tiler
tiler_rows = (guint) sqrt (num_sources);
tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
/* we set the tiler properties here */
g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
"width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//set fps sink
g_object_set (G_OBJECT (sink), "text-overlay", FALSE, "video-sink", nvsink, "sync", FALSE, NULL);
//linking all elements
gst_bin_add_many(GST_BIN(pipeline),appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL);
if (!gst_element_link_many(appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL)){g_printerr("Elements could not be linked"); return -1;}
tiler_src_pad = gst_element_get_static_pad (pgie, "src");
if (!tiler_src_pad) {g_print ("Unable to get src pad\n");}
else{gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,tiler_src_pad_buffer_probe, (gpointer)sink, NULL);}
g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data),(gpointer)argv[1]);
/* Set the pipeline to "playing" state */
g_print ("Now playing:");
for (int i = 0; i < num_sources; i++) {g_print (" %s,", argv[i + 1]);}
g_print ("\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Gstreamer FLAC pipeline creation Error

I keep getting this error:
$ ./test recit24bit.flac
Now playing: recit24bit.flac
Running...
Error: Internal data flow error.
Returned, stopping playback
Deleting pipeline
When compiling this code:
#include <gst/gst.h>
#include <glib.h>
static gboolean bus_call (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
{
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
{
break;
}
}
return TRUE;
}
/*
static void on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstPad *sinkpad;
GstElement *decoder = (GstElement *) data;
// We can now link this pad with the vorbis-decoder sink pad
g_print ("Dynamic pad created, linking demuxer/decoder\n");
sinkpad = gst_element_get_static_pad (decoder, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
*/
int main (int argc,
char *argv[])
{
GMainLoop *loop;
GstElement *pipeline,
*source,
//*demuxer,
*decoder,
*conv,
*sink;
GstBus *bus;
guint bus_watch_id;
/* Initialisation */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Check input arguments */
if (argc != 2)
{
g_printerr ("Usage: %s <Flac filename>\n", argv[0]);
return -1;
}
/* Create gstreamer elements */
pipeline = gst_pipeline_new ("audio-player");
source = gst_element_factory_make ("filesrc", "file-source");
//demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer");
decoder = gst_element_factory_make ("flacdec", "flac-decoder");
conv = gst_element_factory_make ("audioconvert", "converter");
sink = gst_element_factory_make ("alsasink", "audio-output");
if (!pipeline || !source ||/* !demuxer ||*/ !decoder ||/* !conv ||*/ !sink)
{
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Set up the pipeline */
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* we add all elements into the pipeline */
/* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */
gst_bin_add_many (GST_BIN (pipeline), source,/* demuxer,*/ decoder, conv, sink, NULL);
/* we link the elements together */
/* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */
//gst_element_link (source, demuxer);
gst_element_link_many (source, decoder, conv, sink, NULL);
// g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder);
/* note that the demuxer will be linked to the decoder dynamically.
The reason is that Ogg may contain various streams (for example
audio and video). The source pad(s) will be created at run time,
by the demuxer when it detects the amount and nature of streams.
Therefore we connect a callback function which will be executed
when the "pad-added" is emitted.*/
/* Set the pipeline to "playing" state*/
g_print ("Now playing: %s\n", argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Iterate */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
I'm using this to compile it successfully:
g++ -Wall test-flac.cc -o test $(pkg-config --cflags --libs gstreamer-1.0)
I'm using Arch, if that means anything. Does anybody have some advice? I'm a pretty big noob, but I don't understand what I'm not doing right because it seems like it should work.
I just needed to replace the demuxer with a parsar, which is (apparently) necessary. Derp. I used flacparse, of course.

Playing .AVI file with GStreamer SDK in Windows

I want to play an .AVI file using GStreamer in Windows 7. GStreamer SDK was installed as given in this link. Then a GStreamer SDK project was created and the following code was added to a C file as given is this link with the suggested corrections. Project properties -> Configurations properties -> Debugging -> Working directory was changed to "$(GSTREAMER_SDK_ROOT_X86)\bin" and the same was added to windows PATH variable as suggested in the installation link given above. When run the code, it just exits without playing the video, last few lines of the output is given below. Please note that I have installed 32 bit Gstreamer SDK on a 64 bit Windows 7.
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[]){
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source), "location", "file:///C:/Video.avi", NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || ! video_convertor || !audio_sink || !video_sink ){
g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(
Bin,
demuxer,
audioqueue,videoqueue,
audio_decoder,audio_convertor,
video_decoder,video_convertor,
audio_sink,video_sink,
NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,audio_decoder,NULL);
gst_element_link_many(audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,NULL);
gst_element_link_many(video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data){
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg)){
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data) {
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data) {
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
Output:
The thread 'Win32 Thread' (0x19c4) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2370) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2040) has exited with code 0 (0x0).
The program '[5368] GstProject2.exe: Native' has exited with code 0 (0x0).
Finally I was able to play the AVI file using the following code which is based on this example in Gstreamer SDK website.
In command prompt:
Option 1:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! queue ! ffmpegcolorspace ! autovideosink dec. ! queue ! audioconvert ! audioresample ! autoaudiosink
Option 2:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! audioresample ! autoaudiosink
Option 3:
gst-launch-0.10 uridecodebin uri=file:///C:/Video.avi name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! autoaudiosink
In Visual Studio:
#include <gst/gst.h>
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *audio_sink;
GstElement *colorspace;
GstElement *video_sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
int main(int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.source = gst_element_factory_make ("uridecodebin", "source");
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.audio_sink = gst_element_factory_make ("autoaudiosink", "audio_sink");
data.colorspace = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
data.video_sink = gst_element_factory_make ("autovideosink", "video_sink");
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.audio_sink || !data.colorspace || !data.video_sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , data.audio_sink, data.colorspace, data.video_sink, NULL);
if (!(gst_element_link (data.convert, data.audio_sink) && gst_element_link (data.colorspace, data.video_sink))) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Set the URI to play */
g_object_set (data.source, "uri", "file:///C:/Video.avi", NULL);
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad_audio = gst_element_get_static_pad (data->convert, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->colorspace, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
///* If our converter is already linked, we have nothing to do here */
//if (gst_pad_is_linked (sink_pad)) {
// g_print (" We are already linked. Ignoring.\n");
// goto exit;
//}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is raw video. Connecting.\n", new_pad_type);
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}

running the gstreamer pipeline (not able to get video and audio data in the callback)

I'm a newbie to gstreamer and I wanted to get the audio and video both buffers from a 3gp file and do some processing in the callback.
(I'm starting my pipeline into a separate thread, pipeline gives audio buffers in a callback AudioCallback and video buffers in VideoCallback.)
This is how my pipeline looks:
GstElement* audioQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
GstElement* videoQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
//static functions
static gboolean
bus_call (GstBus* bus, GstMessage* msg, gpointer data)
{
GMainLoop* loop = (GMainLoop*) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return true;
}
static void link_two_elements(GstElement* src_element, GstElement* sink_element)
{
if(!gst_element_link(src_element, sink_element))
g_printerr ("Linking Error");
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstCaps *caps;
GstStructure *str;
gchar *tex;
GstPad* sinkpad;
/* check media type */
caps = gst_pad_get_caps (pad);
str = gst_caps_get_structure (caps, 0);
tex = (gchar*)gst_structure_get_name(str);
if(g_strrstr(tex,"audio"))
{
//GstElement *audioQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (audioQueue, "sink");
if(sinkpad)
{
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
if(g_strrstr(tex,"video"))
{
//GstElement *videoQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (videoQueue, "sink");
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
void runPipeline()
{
GMainLoop *loop;
GstElement *__pPipeline, *source, *demuxer, *audioDecoder, *audioConverter, *audioresample, /**audioQueue,*/ *audioSink, *videoDecoder, *videoSink, /**videoQueue,*/ *ffmpegcolorspace, *videoscale;
GstBus* bus;
//Initialisation
gst_init (null,null);
loop = g_main_loop_new (NULL, FALSE);
// Create gstreamer elements
__pPipeline = gst_pipeline_new("test_appsink");
source = gst_element_factory_make ("filesrc", "file-source");
demuxer = gst_element_factory_make("qtdemux", "demuxer");
//audioDecoder = gst_element_factory_make("ffdec_mp3", "audioDecoder");
audioDecoder = gst_element_factory_make("decodebin", "audioDecoder");
audioConverter = gst_element_factory_make("audioconvert", "audioConverter");
audioresample = gst_element_factory_make("audioresample", "audioresample");
audioSink = gst_element_factory_make("appsink", "audioSink");
audioQueue = gst_element_factory_make("queue2", "audioQueue");
//videoDecoder = gst_element_factory_make("ffdec_h264", "videoDecoder");
videoQueue = gst_element_factory_make("queue2", "videoQueue");
videoDecoder = gst_element_factory_make("decodebin ", "videoDecoder");
ffmpegcolorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace");
videoscale = gst_element_factory_make("videoscale", "videoscale");
videoSink = gst_element_factory_make("appsink", "videoSink");
//appsink = gst_element_factory_make("appsink", "sink-buffer");
if (!__pPipeline || !source || !demuxer || !audioDecoder || !audioConverter ||!audioresample || !audioSink || !videoSink || !audioQueue || !videoQueue || !videoDecoder || !ffmpegcolorspace || !videoscale )
{
//return -1;
}
//we set the input filename to the source element
g_object_set (G_OBJECT (source), "location", "/etc/20000101-161404.3gp", NULL);
//Make appsink emit the "new-preroll" and "new-buffer" signals.
gst_app_sink_set_emit_signals ((GstAppSink*) audioSink, TRUE);
gst_app_sink_set_emit_signals ((GstAppSink*) videoSink, TRUE);
//we add a message handler
bus = gst_pipeline_get_bus (GST_PIPELINE (__pPipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//we add all elements into the pipeline
gst_bin_add_many (GST_BIN (__pPipeline),
source, demuxer, videoDecoder, audioDecoder, audioConverter, audioresample, audioSink, videoSink,
audioQueue, videoQueue, ffmpegcolorspace, videoscale, NULL);
//link source and demuxer seperately
link_two_elements(source, demuxer);
//link rest of the elements
int retValVideoLinking = (int)gst_element_link_many (videoQueue, videoDecoder, ffmpegcolorspace, videoscale, videoSink, NULL);
int retValAudioLinking = (int)gst_element_link_many (audioQueue, audioDecoder, audioConverter, audioresample, audioSink, NULL);
gulong sigConRet = g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), null);
_ArAudioIn audioInstance = _ArAudioIn::GetArAudioInstance();
g_signal_connect (videoSink, "new-buffer", G_CALLBACK (AudioCallback), null);//AudioCallback static API
g_signal_connect (audioSink, "new-buffer", G_CALLBACK (VideoCallback), null);//VideoCallback static API
//Set the pipeline to "playing" state
GstStateChangeReturn state = gst_element_set_state (__pPipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
return null;
}
I'm just getting a single video buffer in my Videocallback and also in the on_pad_addded : I'm getting a linking err for audio pad linking.
GST_PAD_LINK_NOFORMAT = -4,
I'm trying to link the queue's sink pad to the pad recieved in on_pad_added, same is working for video but not for audio.
If anybody has any idea about this then please give me some pointers to get rid off this err and make this pipeline work.
It would be nice if you cleanup you code before asking us to debug it. As a general advice, check the return values and either log a warning or simply exit(1) to ensure that your pipeline setup works (E.g. in the pad_added handler). I'd also start using a normal video and audiosink to check that it plays.
Finally, it is usually a bad idea to pull out data from the pipleine. Perhaps you could tell what you want to do with the data once you have it in your callback, so that we can give better advice.