gstreamer flacenc How to add duration information to flac files? - gstreamer

I'm using gstreamer(gst-launch-1.0 version 1.8.3) to record flac files. the command line looks like this:
gst-launch-1.0 -v alsasrc ! flacenc ! filesink location="output.flac"
mediainfo output.flac
mediainfo result
The above picture shows the results of using the mediainfo.
This file can play at media player, but it does not support navigation and play time.
I think that there is no duration information.
player screen
How to add duration imformation to flac files?

I think .flac is a very basic stream format. It does not support random access or carry a duration. You can't know the exact duration unless you parse the complete file. Some players may make a "best effort" approach here and seek to roughly the file position to where you out the slider to but its nothing the format itself provides.
I think for seeking you are supposed to put .flac into a container like .ogg. This is actually very similar to .aac files which should be put into .mp4.
So try gst-launch-1.0 -e -v alsasrc ! flacenc ! oggmux ! filesink location="output.ogg".
$ mediainfo output.ogg
General
Complete name : output.ogg
Format : Ogg
Format/Info : Free Lossless Audio Codec
File size : 598 KiB
Duration : 7 s 941 ms
Overall bit rate mode : Variable
Overall bit rate : 617 kb/s
Audio
ID : 256729656 (0xF4D6238)
Format : FLAC
Format/Info : Free Lossless Audio Codec
Duration : 7 s 941 ms
Bit rate mode : Variable
Channel(s) : 2 channels
Channel positions : Front: L R
Sampling rate : 44.1 kHz
Bit depth : 16 bits
Writing library : libFLAC 1.3.2 (UTC 2017-01-01)

I made some modifications to user199309's answer
https://stackoverflow.com/a/47569428/5564626
compile: g++ -o test test.c $(pkg-config --cflags --libs gstreamer-1.0)
#include <stdio.h>
#include <gst/gst.h>
#define GLIB_DISABLE_DEPRECATION_WARNINGS
static GstElement *pipeline;
static GstPad *queue_src_pad;
static GstElement *bins[2];
static GstPad *bin_pads[2];
static GstElement *filesink[2];
static GMainLoop *loop;
static GstElement *flacenc[2];
static size_t current_bin = 0;
static int current_file = 0;
static GstPadProbeReturn
pad_probe_cb(GstPad * pad, GstPadProbeInfo * info, gpointer user_data) {
gst_pad_remove_probe(pad, GST_PAD_PROBE_INFO_ID (info));
gst_pad_unlink(queue_src_pad, bin_pads[current_bin]);
gst_pad_send_event(bin_pads[current_bin], gst_event_new_eos());
gst_element_set_state(bins[current_bin], GST_STATE_NULL);
gst_object_ref(bins[current_bin]);
gst_bin_remove(GST_BIN(pipeline), bins[current_bin]);
current_file++;
current_bin = (current_file % 2);
{
char file_location[32];
sprintf(file_location, "recording_%ld.flac", current_file);
g_object_set(G_OBJECT(
filesink[current_bin]), "location", file_location, NULL);
printf("now writing to %s\n", file_location);
}
gst_bin_add(GST_BIN(pipeline), bins[current_bin]);
gst_pad_link(queue_src_pad, bin_pads[current_bin]);
gst_element_set_state(bins[current_bin], GST_STATE_PLAYING);
gst_element_sync_state_with_parent(bins[current_bin]);
return GST_PAD_PROBE_OK;
}
static gboolean timeout_cb(gpointer user_data) {
gst_pad_add_probe(queue_src_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, NULL, NULL);
return TRUE;
}
static gboolean
bus_cb (GstBus *bus,
GstMessage *msg,
gpointer data)
{
GMainLoop *loop = (GMainLoop *)data;
g_print("Got %s message\n", GST_MESSAGE_TYPE_NAME(msg));
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
//g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
gint main(gint argc, gchar *argv[])
{
GstElement *audiosrc;
GstElement *queue;
GstBus *bus;
GMainLoop *loop;
pthread_t libusb_tid;
guint bus_watch_id;
gst_init (&argc, &argv);
audiosrc = gst_element_factory_make("alsasrc", "audiosrc");
queue = gst_element_factory_make("queue", "queue");
bins[0] = gst_bin_new("bin0");
bins[1] = gst_bin_new("bin1");
flacenc[0] = gst_element_factory_make("flacenc", "flacenc0");
flacenc[1] = gst_element_factory_make("flacenc", "flacenc1");
filesink[0] = gst_element_factory_make("filesink", "filesink0");
filesink[1] = gst_element_factory_make("filesink", "filesink1");
pipeline = gst_pipeline_new("test-pipeline");
if (!pipeline || !audiosrc || !queue
|| !flacenc[0] || !filesink[0]
|| !flacenc[1] || !filesink[1]
) {
g_printerr ("not all elements could be created\n");
//return -1;
}
gst_bin_add_many(GST_BIN(bins[0]), flacenc[0], filesink[0], NULL);
gst_bin_add_many(GST_BIN(bins[1]), flacenc[1], filesink[1], NULL);
gst_bin_add_many(GST_BIN(pipeline), audiosrc, queue, bins[0], NULL);
g_assert(gst_element_link(audiosrc, queue));
g_assert(gst_element_link_many(flacenc[0], filesink[0], NULL));
g_assert(gst_element_link_many(flacenc[1], filesink[1], NULL));
GstPad* pad = gst_element_get_static_pad(flacenc[0], "sink");
gst_element_add_pad(bins[0], gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
GstPad* pad2 = gst_element_get_static_pad(flacenc[1], "sink");
gst_element_add_pad(bins[1], gst_ghost_pad_new("sink", pad2));
gst_object_unref(pad2);
bin_pads[0] = gst_element_get_static_pad(bins[0], "sink");
bin_pads[1] = gst_element_get_static_pad(bins[1], "sink");
current_bin = 0;
gst_element_link(queue, bins[current_bin]);
g_object_set(filesink[current_bin], "location", "recording_0.flac", NULL);
queue_src_pad = gst_element_get_static_pad(queue, "src");
bus = gst_element_get_bus(pipeline);
bus_watch_id = gst_bus_add_watch(bus, bus_cb, loop);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new(NULL, FALSE);
g_timeout_add_seconds(10, timeout_cb, NULL);
g_main_loop_run (loop);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}

Related

pad creation and some mistakes on gst_parse_launch

I am a newbie to gstreamer and I would like to know if we have to create source and sink pads for convert like video convert in a pipeline. I have a pipeline like this
gst-launch-1.0 v4l2src ! video/x-raw,format=YUY2 ! videoconvert ! xvimagesink
I am trying to create a simple c application to understand the creation of pads and would like to know if video convert has a source pad and sink pad too. I am creating a source and sink pad for the filter.
EDIT:
yeah well you see, I tried following the dynamic pipelines example and wrote the code below
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
g_print("In pad handler\n");
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
g_print ("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
if(!g_str_has_prefix(new_pad_type, "video/x-raw,format=(string)YUY2"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link( data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_print("before signal connect\n");
gint id= g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
g_print("after signal connect with id = %d\n", id);
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
// gst_element_link(data.source, data.convert);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and it gave me an error
before signal connect
after signal connect with id = 1
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
(The above code works if I write gst_elements_link(data.source, data.convert) after the element link statement for convert and sink)
So I tried the normal way in which I just added and linked all the elements together and it began to work without use of the pads.
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *convert, *sink;
GstBus *bus;
GstMessage *msg;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
convert = gst_element_factory_make("nvvidconv", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source,convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
But, inorder to fully grasp the knowledge of pads, I wanted to execute simpler pipelines with pads.
I just don't fully understand the usage of pads and link them and all.
EDIT2:
Ultimately I want to write application for pipeline like this which works on command line perfectly well,
gst-launch-1.0 v4l2src device='/dev/video0' ! 'video/x-raw,format=(string)YUY2,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! nvvidconv ! 'video/x-raw,format=(string)NV12,width=(int)640,height=(int)480' ! nvvideoconvert ! 'video/x-raw(memory:NVMM),format=(string)NV12,width=(int)640,height=(int)480' ! mux.sink_0 nvstreammux live-source=1 name=mux batch-size=1 width=640 height=480 ! nvinfer config-file-path=/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt batch-size=1 ! nvmultistreamtiler rows=1 columns=1 width=640 height=480 ! nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink sync=false -v
But as I don't understand the usage of both pads and bins, I am unable to implement them in my above pipeline. However, I tried this,
#include <gst/gst.h>
#include <glib.h>
#include <stdio.h>
#include "gstnvdsmeta.h"
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2
gint frame_number = 0;
gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
"Roadsign"
};
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
GstBuffer *buf=(GstBuffer *)info->data;
guint num_rects =0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
vehicle_count++;
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
}
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
g_print ("Frame Number = %d Number of objects = %d "
"Vehicle Count = %d Person Count = %d\n",
frame_number, num_rects, vehicle_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
int main(int argc, char *argv[])
{
GstElement *pipeline, *source, *filter1, *convert,*filter2, *filter3, *vidconv, *filter4, *mux, *infer, *tiler, *osd, *transform , *sink, *bin, *convert2 , *vidconv2;
GMainLoop *loop;
GstCaps *caps1, *caps2, *caps3, *caps4;
GstPad *osd_sink_pad =NULL, *srcpad, *sinkpad;
loop = g_main_loop_new(NULL,FALSE);
gst_init(&argc, &argv);
pipeline = gst_pipeline_new("nv_pipeline");
gchar *string1 = "video/x-raw(memory:NVMM),format=(string)NV12";
source = gst_element_factory_make("v4l2src", "source");
filter1 = gst_element_factory_make("capsfilter", "filter1");
convert = gst_element_factory_make("nvvidconv", "convert");
filter2 = gst_element_factory_make("capsfilter", "filter2");
filter3 = gst_element_factory_make("capsfilter", "filter3");
filter4 = gst_element_factory_make("capsfilter", "filter4");
vidconv = gst_element_factory_make("nvvideoconvert", "vidconv");
mux = gst_element_factory_make("nvstreammux", "mux");
infer = gst_element_factory_make("nvinfer", "infer");
tiler = gst_element_factory_make("nvmultistreamtiler", "tiler");
osd = gst_element_factory_make("nvosd", "osd");
transform = gst_element_factory_make("nvegltransform", "transform");
sink = gst_element_factory_make("nveglglessink", "sink");
convert2 = gst_element_factory_make("nvvidconv", "convert2");
vidconv2 = gst_element_factory_make("nvvideoconvert", "vidconv2");
gst_bin_add_many(GST_BIN(pipeline), source,filter1,convert,filter2, convert2,filter3,vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
gst_element_link_many(source,filter1,convert,filter2, convert2,filter3, vidconv, filter4,mux,infer, tiler,vidconv2, osd,transform,sink,NULL);
osd_sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_pad_buffer_probe, NULL, NULL);
caps1 = gst_caps_new_simple("video/x-raw", "format",G_TYPE_STRING,"YUY2",NULL);
caps2 = gst_caps_from_string(string1);
caps3 = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING,"NV12", NULL);
caps4 = gst_caps_from_string(string1);
g_object_set(G_OBJECT(filter1), "caps", caps1, NULL);
g_object_set(G_OBJECT(filter2), "caps", caps2, NULL);
g_object_set(G_OBJECT(filter3), "caps", caps3, NULL);
g_object_set(G_OBJECT(filter4), "caps", caps4, NULL);
g_object_set(G_OBJECT(mux), "live-source", 1, "name", "mux", "batch-size", 1, "width", 1280, "height", 720, NULL);
g_object_set(G_OBJECT(infer), "config-file-path","/opt/nvidia/deepstream/deepstream-4.0/sources/apps/sample_apps/deepstream-test1/dstest1_pgie_config.txt",NULL);
g_object_set(G_OBJECT(infer), "batch-size", 1, NULL);
g_object_set(G_OBJECT(tiler), "rows", 1, "columns", 1, "width", 1280, "height", 720, NULL);
gst_caps_unref(caps1);
gst_caps_unref(caps2);
gst_caps_unref(caps3);
gst_caps_unref(caps4);
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Running ...\n");
g_main_loop_run(loop);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
which gives the exact same output as the command line gst-launch-1.0 like,
(deep_pads:15648): GLib-GObject-WARNING **: 11:29:18.761: cannot register existing type 'GstInterpolationMethod'
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: g_param_spec_enum: assertion 'G_TYPE_IS_ENUM (enum_type)' failed
(deep_pads:15648): GLib-GObject-CRITICAL **: 11:29:18.761: validate_pspec_to_install: assertion 'G_IS_PARAM_SPEC (pspec)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_element_get_static_pad: assertion 'GST_IS_ELEMENT (element)' failed
(deep_pads:15648): GStreamer-CRITICAL **: 11:29:18.814: gst_pad_add_probe: assertion 'GST_IS_PAD (pad)' failed
0:00:00.843318172 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:initialize(): Trying to create engine from model files
0:00:20.693301580 15648 0x5591be52c0 INFO nvinfer gstnvinfer.cpp:519:gst_nvinfer_logger:<infer> NvDsInferContext[UID 1]:generateTRTModel(): Storing the serialized cuda engine to file at /opt/nvidia/deepstream/deepstream-4.0/samples/models/Primary_Detector/resnet10.caffemodel_b1_int8.engine
except It doesn't display an output window from the above c application and doesn't display the rest.
Your first example fails because data.source element is actually never linked to data.convert element. Since both elements have static pads you need to "manually" create them and link them before setting pipeline to GST_STATE_PLAYING:
GstPad *source_pad = gst_element_get_static_pad(data.source, "src");
GstPad *sink_pad = gst_element_get_static_pad(data.convert, "sink");
ret = gst_pad_link(source_pad, sink_pad);
You probably expected that static source pad of your data.source element will somehow be automatically created so you registered
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
But, as you can see from your debug, pad_added_handler was never called because g_signal_connect can be registered and will be called for elements that have dynamic pads. For example, demultiplexer tsdemux will dynamically create its source pads during discovery of elementary streams so, in that case, registration of pad-added callback would be necessary.
The first step in your learning curve would be to understand fundamental differences between "static" (mandatory, always exists, "manually" created), dynamic (exists sometimes, automatically created by element) and request (optional, "manually" created when needed) gstreamer pads. After that everything will be much easier for you.

Gstreamer recording video with audio

I'm trying to record on a file a video from my webcam along with audio using Gstreamer on my Ubuntu 16 machine through glib library.
I'm able to watch the video streaming from the webcam through these code lines
#include <gst/gst.h>
int main(int argc, char *argv[]) {
GstElement *pipeline, *source, *sink, *convert;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
source = gst_element_factory_make ("v4l2src", "source");
sink = gst_element_factory_make ("autovideosink", "sink");
convert =gst_element_factory_make("videoconvert","convert");
//convert = gst_element_factory_make ("audioconvert", "convert");
//sink = gst_element_factory_make ("autoaudiosink", "sink");
/* Create the empty pipeline */
pipeline = gst_pipeline_new ("test-pipeline");
if (!pipeline || !source || !sink || !convert) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/*set der source*/
g_object_set (source, "device", "/dev/video0", NULL);
/* Build the pipeline */
gst_bin_add_many (GST_BIN (pipeline), source, sink, convert, NULL);
if (gst_element_link (convert, sink) != TRUE) {
g_printerr ("Elements could not be linked confert sink.\n");
gst_object_unref (pipeline);
return -1;
}
if (gst_element_link (source, convert) != TRUE) {
g_printerr ("Elements could not be linked source -convert.\n");
gst_object_unref (pipeline);
return -1;
}
/* Start playing */
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,(GstMessageType) (GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
and to capture audio from microphone and listen it through the speakers using these code lines
#include <gst/gst.h>
#include <glib.h>
static gboolean
bus_call (GstBus *bus,
GstMessage *msg,
gpointer data){
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
/* Main function for audio pipeline initialization and looping streaming process */
gint
main (gint argc, gchar **argv) {
GMainLoop *loop;
GstElement *pipeline, *audio_source, *sink;
GstBus *bus;
guint bus_watch_id;
GstCaps *caps;
gboolean ret;
/* Initialization of gstreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Elements creation */
pipeline = gst_pipeline_new ("audio_stream");
audio_source = gst_element_factory_make ("alsasrc", "audio_source");
sink = gst_element_factory_make ("alsasink", "audio_sink");
// video_source = gst_element_factory_make ("v4l2src", "source");
// video_sink = gst_element_factory_make ("autovideosink", "sink");
// video_convert= gst_element_factory_make("videoconvert","convert");
if (!pipeline) {
g_printerr ("Audio: Pipeline couldn't be created\n");
return -1;
}
if (!audio_source) {
g_printerr ("Audio: alsasrc couldn't be created\n");
return -1;
}
if (!sink) {
g_printerr ("Audio: Output file couldn't be created\n");
return -1;
}
g_object_set (G_OBJECT (audio_source), "device", "hw:1,0", NULL);
g_object_set (G_OBJECT (sink), "device", "hw:1,0", NULL);
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
gst_bin_add_many (GST_BIN(pipeline), audio_source, sink, NULL);
caps = gst_caps_new_simple ("audio/x-raw", "format", G_TYPE_STRING, "S16LE", "layout", G_TYPE_STRING, "interleaved", "rate", G_TYPE_INT, (int)44100, "channels", G_TYPE_INT, (int)2, NULL);
ret = gst_element_link_filtered (audio_source, sink, caps);
if (!ret) {
g_print ("audio_source and sink couldn't be linked\n");
gst_caps_unref (caps);
return FALSE;
}
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("streaming...\n");
g_main_loop_run (loop);
g_print ("Returned, stopping stream\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
What i really don't understand is how to get video from the webcam and audio from my alsa hw at the same time and save them into a file (such as .mp4 for ex). Can anyone help me? I tried to find something useful, but there's nothing on the board. In addition, it would be really appreciate also how to save just the video stream or just the audio stream in separated files.
UPDATE
I looked again to the tutorials and to the git link gave by #nayana, so i tried myself to code something. I have two results:
#include <string.h>
#include <gst/gst.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h>
static GMainLoop *loop;
static GstElement *pipeline;
static GstElement *muxer, *sink;
static GstElement *src_video, *encoder_video, *queue_video;
static GstElement *src_audio, *encoder_audio, *queue_audio;
static GstBus *bus;
static gboolean
message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
{
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_error (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_WARNING:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_warning (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
break;
}
case GST_MESSAGE_EOS:{
g_print ("Got EOS\n");
g_main_loop_quit (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_main_loop_unref (loop);
gst_object_unref (pipeline);
exit(0);
break;
}
default:
break;
}
return TRUE;
}
void sigintHandler(int unused) {
g_print("You ctrl-c-ed! Sending EoS");
gst_element_send_event(pipeline, gst_event_new_eos());
}
int main(int argc, char *argv[])
{
signal(SIGINT, sigintHandler);
gst_init (&argc, &argv);
pipeline = gst_pipeline_new(NULL);
src_video = gst_element_factory_make("v4l2src", NULL);
encoder_video = gst_element_factory_make("x264enc", NULL);
queue_video = gst_element_factory_make("queue", NULL);
src_audio = gst_element_factory_make ("alsasrc", NULL);
encoder_audio = gst_element_factory_make("lamemp3enc", NULL);
queue_audio = gst_element_factory_make("queue", NULL);
muxer = gst_element_factory_make("mp4mux", NULL);
sink = gst_element_factory_make("filesink", NULL);
if (!pipeline || !src_video || !encoder_video || !src_audio || !encoder_audio
|| !queue_video || !queue_audio || !muxer || !sink) {
g_error("Failed to create elements");
return -1;
}
g_object_set(src_audio, "device", "hw:1,0", NULL);
g_object_set(sink, "location", "video_audio_test.mp4", NULL);
gst_bin_add_many(GST_BIN(pipeline), src_video, encoder_video, queue_video,
src_audio, encoder_audio, queue_audio, muxer, sink, NULL);
gst_element_link_many (src_video,encoder_video,queue_video, muxer,NULL);
gst_element_link_many (src_audio,encoder_audio,queue_audio, muxer,NULL);
if (!gst_element_link_many(muxer, sink, NULL)){
g_error("Failed to link elements");
return -2;
}
loop = g_main_loop_new(NULL, FALSE);
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
gst_bus_add_signal_watch(bus);
g_signal_connect(G_OBJECT(bus), "message", G_CALLBACK(message_cb), NULL);
gst_object_unref(GST_OBJECT(bus));
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Starting loop");
g_main_loop_run(loop);
return 0;
}
With this upon i am able to record the video from the cam, but the audio is recorded for just one second somewhere randomly during the recording and it gives me this error
ERROR: from element /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0: Can't record audio fast enough
Additional debug info:
gstaudiobasesrc.c(869): gst_audio_base_src_create (): /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0:
Dropped 206388 samples. This is most likely because downstream can't keep up and is consuming samples too slowly.<br>
So i tried to add some setting and queues
#include <string.h>
#include <gst/gst.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h>
static GMainLoop *loop;
static GstElement *pipeline;
static GstElement *muxer, *sink;
static GstElement *src_video, *encoder_video, *queue_video, *rate_video, *scale_video, *capsfilter_video;
static GstElement *src_audio, *encoder_audio, *queue_audio, *queue_audio2, *capsfilter_audio, *rate_audio;
static GstBus *bus;
static GstCaps *caps;
static gboolean
message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
{
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_error (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
g_main_loop_quit (loop);
break;
}
case GST_MESSAGE_WARNING:{
GError *err = NULL;
gchar *name, *debug = NULL;
name = gst_object_get_path_string (message->src);
gst_message_parse_warning (message, &err, &debug);
g_printerr ("ERROR: from element %s: %s\n", name, err->message);
if (debug != NULL)
g_printerr ("Additional debug info:\n%s\n", debug);
g_error_free (err);
g_free (debug);
g_free (name);
break;
}
case GST_MESSAGE_EOS:{
g_print ("Got EOS\n");
g_main_loop_quit (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
g_main_loop_unref (loop);
gst_object_unref (pipeline);
exit(0);
break;
}
default:
break;
}
return TRUE;
}
void sigintHandler(int unused) {
g_print("You ctrl-c-ed! Sending EoS");
gst_element_send_event(pipeline, gst_event_new_eos());
}
int main(int argc, char *argv[])
{
signal(SIGINT, sigintHandler);
gst_init (&argc, &argv);
pipeline = gst_pipeline_new(NULL);
src_video = gst_element_factory_make("v4l2src", NULL);
rate_video = gst_element_factory_make ("videorate", NULL);
scale_video = gst_element_factory_make ("videoscale", NULL);
capsfilter_video = gst_element_factory_make ("capsfilter", NULL);
queue_video = gst_element_factory_make("queue", NULL);
encoder_video = gst_element_factory_make("x264enc", NULL);
src_audio = gst_element_factory_make ("alsasrc", NULL);
capsfilter_audio = gst_element_factory_make ("capsfilter", NULL);
queue_audio = gst_element_factory_make("queue", NULL);
rate_audio = gst_element_factory_make ("audiorate", NULL);
queue_audio2 = gst_element_factory_make("queue", NULL);
encoder_audio = gst_element_factory_make("lamemp3enc", NULL);
muxer = gst_element_factory_make("mp4mux", NULL);
sink = gst_element_factory_make("filesink", NULL);
if (!pipeline || !src_video || !rate_video || !scale_video || !capsfilter_video
|| !queue_video || !encoder_video || !src_audio || !capsfilter_audio
|| !queue_audio || !rate_audio || !queue_audio2 || !encoder_audio
|| !muxer || !sink) {
g_error("Failed to create elements");
return -1;
}
// Set up the pipeline
g_object_set(src_video, "device", "/dev/video0", NULL);
g_object_set(src_audio, "device", "hw:1,0", NULL);
g_object_set(sink, "location", "video_audio_test.mp4", NULL);
// video settings
caps = gst_caps_from_string("video/x-raw,format=(string)I420,width=480,height=384,framerate=(fraction)25/1");
g_object_set (G_OBJECT (capsfilter_video), "caps", caps, NULL);
gst_caps_unref (caps);
// audio settings
caps = gst_caps_from_string("audio/x-raw,rate=44100,channels=1");
g_object_set (G_OBJECT (capsfilter_audio), "caps", caps, NULL);
gst_caps_unref (caps);
// add all elements into the pipeline
gst_bin_add_many(GST_BIN(pipeline), src_video, rate_video, scale_video, capsfilter_video,
queue_video, encoder_video, src_audio, capsfilter_audio, queue_audio, rate_audio,
queue_audio2, encoder_audio, muxer, sink, NULL);
if (!gst_element_link_many (src_video,rate_video,scale_video, capsfilter_video,
queue_video, encoder_video, muxer,NULL))
{
g_error("Failed to link video elements");
return -2;
}
if (!gst_element_link_many (src_audio, capsfilter_audio, queue_audio, rate_audio,
queue_audio2, encoder_audio, muxer,NULL))
{
g_error("Failed to link audio elements");
return -2;
}
if (!gst_element_link_many(muxer, sink, NULL))
{
g_error("Failed to link elements");
return -2;
}
loop = g_main_loop_new(NULL, FALSE);
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
gst_bus_add_signal_watch(bus);
g_signal_connect(G_OBJECT(bus), "message", G_CALLBACK(message_cb), NULL);
gst_object_unref(GST_OBJECT(bus));
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_print("Starting loop");
g_main_loop_run(loop);
return 0;
}
This time the code doesnt record anything and give me the following error
ERROR: from element /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0: Internal data flow error.
Additional debug info:
gstbasesrc.c(2948): gst_base_src_loop (): /GstPipeline:pipeline0/GstAlsaSrc:alsasrc0:
streaming task paused, reason not-negotiated (-4)
Can you address me to fix the error?
Thanks in advance
What you need is the multiplexer - such GStreamer element that can merge two streams into one.
mp4, mkv, avi.. are just a container formats which contains multiple "data streams", which can be audio, video, subtitles (not all formats support this).
I don't know about your use case, but you don't need C code for what you do. You can just use gst-launch-1.0 tool which has its own GStreamer kind-of-scripting language.
For simplicity I will use debugging elements videotestsrc and audiotestsrc for simulating input (instead of actual camera etc).
gst-launch-1.0 -e videotestsrc ! x264enc ! mp4mux name=mux ! filesink location="bla.mp4" audiotestsrc ! lamemp3enc ! mux.
videotestsrc --> x264enc -----\
>---> mp4mux ---> filesink
audiotestsrc --> lamemp3enc --/
Explanation:
Videotestsrc generates raw video which is in GStreamer terms called "video/x-raw".
However mp4 cannot hold raw video, so we need to encode it with for example x264enc which makes our data "video/x-h264".
Then we can finally mux this into our mp4 with mp4mux element.
When we take a look into GStreamer docs using gst-inspect-1.0 mp4mux we see that this element supports various formats amongst which there is also video/x-h264.
The same thing we do with audio with either faac for AAC format or lamemp3enc for mp3.
With gst-launch-1.0 I did two tricks and one bonus trick:
ability to have separate branches in one line. This is achieved by just separating those branches with space instead of !
ability to make alias with name=mux and later on using it with adding dot right at the end of name like mux. . You can make up any name for that element you like.
Write EOS after hitting ctrl+c to stop the recording. This is achieved with parameter -e
Finally the output goes to filesink which just writes anything you give it to file.
Now for a homework you:
Use your elements for what you need - v4l2, alsasrc
Add queue elements to add buffering and thread separation

C++ (Ubuntu): load audio file (wav, mp3, aiff) to array/vector with gstreamer

how can I decode with C++ (Ubuntu) an audio file (wav, mp3, aiff) and store it (PCM/int) in a vector/array?
What I did so far: I used gstreamer (I'm a very beginner) to decode the file and I can play it and get data with pull-buffer, however I did't find a method to get the whole audio data at ones to store in an array.
Is there such a method in gstreamer? Or exists there an other C++ library to decode audio files and get the raw (PCM/int) data?
edit: change frequency to PCM
I solved the problem by myself with gstreamer. The trick is to use giostreamsink as a sink, this stores the data into a G_MEMORY_OUTPUT_STREAM.
The full code sample:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
static void on_pad_added(GstElement *decodebin,
GstPad *pad,
gpointer data) {
GstElement *convert = (GstElement *) data;
GstCaps *caps;
GstStructure *str;
GstPad *audiopad;
audiopad = gst_element_get_static_pad(convert, "sink");
if (GST_PAD_IS_LINKED(audiopad)) {
g_object_unref(audiopad);
return;
}
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
printf("here %s\n",gst_structure_get_name(str));
if (!g_strrstr(gst_structure_get_name(str), "audio")) {
gst_caps_unref(caps);
gst_object_unref(audiopad);
return;
}
gst_caps_unref(caps);
gst_pad_link(pad, audiopad);
g_object_unref(audiopad);
}
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *decode, *sink, *convert;
int rate = 44100;
int channels = 1;
int depth = 16;
bool output_signed = true;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
GMemoryOutputStream *stream;
gpointer out_data;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
// sink
stream = G_MEMORY_OUTPUT_STREAM(g_memory_output_stream_new(NULL, 0, (GReallocFunc)g_realloc, (GDestroyNotify)g_free));
sink = gst_element_factory_make ("giostreamsink", "sink");
g_object_set(G_OBJECT(sink), "stream", stream, NULL);
// source
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", "/home/sam/Desktop/audio/audio.wav", NULL);
// convert
convert = gst_element_factory_make("audioconvert", "convert");
// decode
decode = gst_element_factory_make("decodebin", "decoder");
// link decode to convert
g_signal_connect(decode, "pad-added", G_CALLBACK(on_pad_added), convert);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, decode, convert, sink, NULL);
// link source to decode
gst_element_link(source, decode);
// caps
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw-int",
"rate", G_TYPE_INT, rate,
"channels", G_TYPE_INT, channels,
"width", G_TYPE_INT, depth,
"depth", G_TYPE_INT, depth,
"signed", G_TYPE_BOOLEAN, output_signed,
NULL);
// link convert to sink
gst_element_link_filtered(convert, sink, caps);
gst_caps_unref(caps);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
// get data
g_print("get data\n");
out_data = g_memory_output_stream_get_data(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long size = g_memory_output_stream_get_size(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long sizeData = g_memory_output_stream_get_data_size(G_MEMORY_OUTPUT_STREAM(stream));
std::cout << "stream size: " << size << std::endl;
std::cout << "stream data size: " << sizeData << std::endl;
// access data and store in vector
std::vector<int16_t> data;
for (unsigned long i = 0; i < sizeData/2; ++i) {
data.push_back(((gint16*)out_data)[i]);
}
return 0;
}

Playing .AVI file with GStreamer SDK in Windows

I want to play an .AVI file using GStreamer in Windows 7. GStreamer SDK was installed as given in this link. Then a GStreamer SDK project was created and the following code was added to a C file as given is this link with the suggested corrections. Project properties -> Configurations properties -> Debugging -> Working directory was changed to "$(GSTREAMER_SDK_ROOT_X86)\bin" and the same was added to windows PATH variable as suggested in the installation link given above. When run the code, it just exits without playing the video, last few lines of the output is given below. Please note that I have installed 32 bit Gstreamer SDK on a 64 bit Windows 7.
Code:
#include<stdio.h>
#include<gst/gst.h>
#include<glib.h>
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);
//Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);
GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,
int main(int argc,char* argv[]){
GstPipeline *pipeline;
GstBin *Bin;
GstBus *bus;
GMainLoop *Mainloop;
gst_init (&argc,&argv);
Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state
GstElement *pipeline = gst_pipeline_new("PIPELINE");
Bin = GST_BIN(pipeline);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
source = gst_element_factory_make("filesrc","file-source");
g_object_set(G_OBJECT(source), "location", "file:///C:/Video.avi", NULL);
demuxer = gst_element_factory_make("avidemux","avi-demuxer");
audioqueue = gst_element_factory_make("queue","Queue for audio");
videoqueue = gst_element_factory_make("queue","Queue for video");
audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
video_sink = gst_element_factory_make("xvimagesink","XV video sink ");
if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || ! video_convertor || !audio_sink || !video_sink ){
g_print("Could not not create element\n");
return 0;
}
gst_bin_add(Bin,source);
gst_bin_add_many(
Bin,
demuxer,
audioqueue,videoqueue,
audio_decoder,audio_convertor,
video_decoder,video_convertor,
audio_sink,video_sink,
NULL);
gst_element_link(source,demuxer);
gst_element_link_many(audioqueue,audio_decoder,NULL);
gst_element_link_many(audio_convertor,audio_sink,NULL);
gst_element_link_many(videoqueue,video_decoder,NULL);
gst_element_link_many(video_convertor,video_sink,NULL);
g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
g_object_unref(bus);
g_print("In playing state\n");
gst_element_set_state(pipeline, GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.
g_main_loop_run(Mainloop);
g_print("In playing state2\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_object_unref(G_OBJECT(pipeline));
}
//Function to process message on bus of pipeline
gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data){
GError *error;
gchar *debug;
GMainLoop *loop = (GMainLoop *)data;
g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
switch(GST_MESSAGE_TYPE(msg)){
case GST_MESSAGE_UNKNOWN :
g_print("GST_MESSAGE_UNKNOWN \n");
break;
case GST_MESSAGE_EOS :
g_print("GST_MESSAGE_EOS \n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR :
g_print("GST_MESSAGE_ERROR \n");
gst_message_parse_error (msg, &error, &debug);
g_free(debug);
//if(!error)
{
g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
}
g_main_loop_quit(loop);
break;
case GST_MESSAGE_WARNING :
g_print("GST_MESSAGE_WARNING \n");
break;
case GST_MESSAGE_INFO :
g_print("GST_MESSAGE_INFO \n");
break;
case GST_MESSAGE_TAG :
g_print("GST_MESSAGE_TAG \n");
break;
case GST_MESSAGE_BUFFERING:
g_print("GST_MESSAGE_BUFFERING \n");
break;
case GST_MESSAGE_STATE_CHANGED:
g_print("GST_MESSAGE_STATE_CHANGED \n");
break;
default :
g_print("default \n");
break;
}
return TRUE; //returns true always as it has to be always registered returning false will deregister the function
}
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data) {
char* pad_name = gst_pad_get_name(pad);
g_print(" In dynamic ADDING PAD %s\n", pad_name);
if (g_str_has_prefix(pad_name,"audio")) {
GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
gst_pad_link(pad,audiodemuxsink );
}
else if (g_str_has_prefix(pad_name,"video")) {
GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
gst_pad_link(pad,videodemuxsink );
}
g_free (pad_name);
}
void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data) {
GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
if (gst_pad_can_link(pad,videoconvertsink)) {
gst_pad_link(pad,videoconvertsink);
}
GstPad* audioconvertsink = gst_element_get_static_pad(audio_convertor,"sink");
if (gst_pad_can_link(pad,audioconvertsink)) {
gst_pad_link(pad,audioconvertsink);
}
}
Output:
The thread 'Win32 Thread' (0x19c4) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2370) has exited with code 0 (0x0).
The thread 'Win32 Thread' (0x2040) has exited with code 0 (0x0).
The program '[5368] GstProject2.exe: Native' has exited with code 0 (0x0).
Finally I was able to play the AVI file using the following code which is based on this example in Gstreamer SDK website.
In command prompt:
Option 1:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! queue ! ffmpegcolorspace ! autovideosink dec. ! queue ! audioconvert ! audioresample ! autoaudiosink
Option 2:
gst-launch-0.10 filesrc location=C:\\Video.avi ! decodebin2 name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! audioresample ! autoaudiosink
Option 3:
gst-launch-0.10 uridecodebin uri=file:///C:/Video.avi name=dec ! ffmpegcolorspace ! autovideosink dec. ! audioconvert ! autoaudiosink
In Visual Studio:
#include <gst/gst.h>
/* Structure to contain all our information, so we can pass it to callbacks */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *audio_sink;
GstElement *colorspace;
GstElement *video_sink;
} CustomData;
/* Handler for the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *pad, CustomData *data);
int main(int argc, char *argv[]) {
CustomData data;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
gboolean terminate = FALSE;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.source = gst_element_factory_make ("uridecodebin", "source");
data.convert = gst_element_factory_make ("audioconvert", "convert");
data.audio_sink = gst_element_factory_make ("autoaudiosink", "audio_sink");
data.colorspace = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
data.video_sink = gst_element_factory_make ("autovideosink", "video_sink");
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.convert || !data.audio_sink || !data.colorspace || !data.video_sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Build the pipeline. Note that we are NOT linking the source at this
* point. We will do it later. */
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.convert , data.audio_sink, data.colorspace, data.video_sink, NULL);
if (!(gst_element_link (data.convert, data.audio_sink) && gst_element_link (data.colorspace, data.video_sink))) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Set the URI to play */
g_object_set (data.source, "uri", "file:///C:/Video.avi", NULL);
/* Connect to the pad-added signal */
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
return 0;
}
/* This function will be called by the pad-added signal */
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad_audio = gst_element_get_static_pad (data->convert, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->colorspace, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
///* If our converter is already linked, we have nothing to do here */
//if (gst_pad_is_linked (sink_pad)) {
// g_print (" We are already linked. Ignoring.\n");
// goto exit;
//}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "audio/x-raw")) {
g_print (" It has type '%s' which is raw video. Connecting.\n", new_pad_type);
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}

running the gstreamer pipeline (not able to get video and audio data in the callback)

I'm a newbie to gstreamer and I wanted to get the audio and video both buffers from a 3gp file and do some processing in the callback.
(I'm starting my pipeline into a separate thread, pipeline gives audio buffers in a callback AudioCallback and video buffers in VideoCallback.)
This is how my pipeline looks:
GstElement* audioQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
GstElement* videoQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
//static functions
static gboolean
bus_call (GstBus* bus, GstMessage* msg, gpointer data)
{
GMainLoop* loop = (GMainLoop*) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return true;
}
static void link_two_elements(GstElement* src_element, GstElement* sink_element)
{
if(!gst_element_link(src_element, sink_element))
g_printerr ("Linking Error");
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstCaps *caps;
GstStructure *str;
gchar *tex;
GstPad* sinkpad;
/* check media type */
caps = gst_pad_get_caps (pad);
str = gst_caps_get_structure (caps, 0);
tex = (gchar*)gst_structure_get_name(str);
if(g_strrstr(tex,"audio"))
{
//GstElement *audioQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (audioQueue, "sink");
if(sinkpad)
{
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
if(g_strrstr(tex,"video"))
{
//GstElement *videoQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (videoQueue, "sink");
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
void runPipeline()
{
GMainLoop *loop;
GstElement *__pPipeline, *source, *demuxer, *audioDecoder, *audioConverter, *audioresample, /**audioQueue,*/ *audioSink, *videoDecoder, *videoSink, /**videoQueue,*/ *ffmpegcolorspace, *videoscale;
GstBus* bus;
//Initialisation
gst_init (null,null);
loop = g_main_loop_new (NULL, FALSE);
// Create gstreamer elements
__pPipeline = gst_pipeline_new("test_appsink");
source = gst_element_factory_make ("filesrc", "file-source");
demuxer = gst_element_factory_make("qtdemux", "demuxer");
//audioDecoder = gst_element_factory_make("ffdec_mp3", "audioDecoder");
audioDecoder = gst_element_factory_make("decodebin", "audioDecoder");
audioConverter = gst_element_factory_make("audioconvert", "audioConverter");
audioresample = gst_element_factory_make("audioresample", "audioresample");
audioSink = gst_element_factory_make("appsink", "audioSink");
audioQueue = gst_element_factory_make("queue2", "audioQueue");
//videoDecoder = gst_element_factory_make("ffdec_h264", "videoDecoder");
videoQueue = gst_element_factory_make("queue2", "videoQueue");
videoDecoder = gst_element_factory_make("decodebin ", "videoDecoder");
ffmpegcolorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace");
videoscale = gst_element_factory_make("videoscale", "videoscale");
videoSink = gst_element_factory_make("appsink", "videoSink");
//appsink = gst_element_factory_make("appsink", "sink-buffer");
if (!__pPipeline || !source || !demuxer || !audioDecoder || !audioConverter ||!audioresample || !audioSink || !videoSink || !audioQueue || !videoQueue || !videoDecoder || !ffmpegcolorspace || !videoscale )
{
//return -1;
}
//we set the input filename to the source element
g_object_set (G_OBJECT (source), "location", "/etc/20000101-161404.3gp", NULL);
//Make appsink emit the "new-preroll" and "new-buffer" signals.
gst_app_sink_set_emit_signals ((GstAppSink*) audioSink, TRUE);
gst_app_sink_set_emit_signals ((GstAppSink*) videoSink, TRUE);
//we add a message handler
bus = gst_pipeline_get_bus (GST_PIPELINE (__pPipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//we add all elements into the pipeline
gst_bin_add_many (GST_BIN (__pPipeline),
source, demuxer, videoDecoder, audioDecoder, audioConverter, audioresample, audioSink, videoSink,
audioQueue, videoQueue, ffmpegcolorspace, videoscale, NULL);
//link source and demuxer seperately
link_two_elements(source, demuxer);
//link rest of the elements
int retValVideoLinking = (int)gst_element_link_many (videoQueue, videoDecoder, ffmpegcolorspace, videoscale, videoSink, NULL);
int retValAudioLinking = (int)gst_element_link_many (audioQueue, audioDecoder, audioConverter, audioresample, audioSink, NULL);
gulong sigConRet = g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), null);
_ArAudioIn audioInstance = _ArAudioIn::GetArAudioInstance();
g_signal_connect (videoSink, "new-buffer", G_CALLBACK (AudioCallback), null);//AudioCallback static API
g_signal_connect (audioSink, "new-buffer", G_CALLBACK (VideoCallback), null);//VideoCallback static API
//Set the pipeline to "playing" state
GstStateChangeReturn state = gst_element_set_state (__pPipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
return null;
}
I'm just getting a single video buffer in my Videocallback and also in the on_pad_addded : I'm getting a linking err for audio pad linking.
GST_PAD_LINK_NOFORMAT = -4,
I'm trying to link the queue's sink pad to the pad recieved in on_pad_added, same is working for video but not for audio.
If anybody has any idea about this then please give me some pointers to get rid off this err and make this pipeline work.
It would be nice if you cleanup you code before asking us to debug it. As a general advice, check the return values and either log a warning or simply exit(1) to ensure that your pipeline setup works (E.g. in the pad_added handler). I'd also start using a normal video and audiosink to check that it plays.
Finally, it is usually a bad idea to pull out data from the pipleine. Perhaps you could tell what you want to do with the data once you have it in your callback, so that we can give better advice.