Properly close the pipeline to save image of udpsrc (currently my image is empty) - c++

I would like to save one image of my updsrc. When the user click on a button the code bellow is running. But when I look at my image, it is empty. I try a lot of "way" to stop the pipeline but I think that I did not closed the pipeline properly.
Does anyone have any idea ?
GstElement* snappipe;
GError* error = NULL;
GstElement* source;
GstElement* filesink;
GstCaps* caps = gst_caps_new_simple("application/x-rtp",
"media", G_TYPE_STRING, "video",
"payload", G_TYPE_INT, 96,
"encoding-name", G_TYPE_STRING, "H264",
NULL);
m_strPathNameSave += CreateFileName("png");
snappipe = gst_parse_launch("udpsrc name=source num-buffers=1 !rtph264depay !h264parse !avdec_h264 !autovideoconvert ! pngenc ! filesink name=mysink", &error);
if (!snappipe) {
g_print("Parse error: %s\n", error->message);
exit(1);
}
filesink = gst_bin_get_by_name(GST_BIN(snappipe), "mysink");
g_object_set(filesink, "location", m_strPathNameSave.c_str(), NULL);
source = gst_bin_get_by_name(GST_BIN(snappipe), "source");
g_object_set(G_OBJECT(source), "caps", caps, NULL);
g_object_set(G_OBJECT(source), "port", m_port, NULL);
gst_element_set_state(snappipe, GST_STATE_PLAYING);
GstBus* bus = gst_element_get_bus(snappipe);
gst_object_unref(bus);
Sleep(10000);
gst_element_set_state(snappipe, GST_STATE_NULL);
gst_object_unref(snappipe);

I solve the problem like that :
std::string strPathImage = "\\image.png";
GstCaps* caps;
GstSample* from_sample, * to_sample;
GError* err = NULL;
GstBuffer* buf;
GstMapInfo map_info;
g_object_get((*ptrstats).sink, "last-sample", &from_sample, NULL);
if (from_sample == NULL) {
GST_ERROR("Error getting last sample form sink");
return;
}
caps = gst_caps_from_string("image/png");
to_sample = gst_video_convert_sample(from_sample, caps, GST_CLOCK_TIME_NONE, &err);
gst_caps_unref(caps);
gst_sample_unref(from_sample);
if (to_sample == NULL && err) {
GST_ERROR("Error converting frame: %s", err->message);
g_error_free(err);
return;
}
buf = gst_sample_get_buffer(to_sample);
if (gst_buffer_map(buf, &map_info, GST_MAP_READ)) {
if (!g_file_set_contents(strPathImage.c_str(), (const char*)map_info.data,
map_info.size, &err)) {
GST_WARNING("Could not save thumbnail: %s", err->message);
g_error_free(err);
}
}
gst_sample_unref(to_sample);
gst_buffer_unmap(buf, &map_info);

Related

GstOverlay and gtk : Display without padding (Black strips)

I render my streaming in a wxWidget ImagePanel (m_renderwindow in my code) like in the picture : Image. The problem is that I have padding at top and bottom and I don't succeed to remove it. I Try gst_video_overlay_set_render_rectangle but the area stay black and don't display the stream. Does anyone have an idea?
GstState state;
GstState pending;
GstPad* pad = nullptr;
GError* error = NULL;
GstElement* source;
GstElement* clocktime;
GstElement* textoverlay;
GstCaps* caps = gst_caps_new_simple("application/x-rtp",
"media", G_TYPE_STRING, "video",
"payload", G_TYPE_INT, 96,
"encoding-name", G_TYPE_STRING, "H264",
NULL);
(*ptrstats).pipeline = gst_parse_launch("udpsrc name=source !rtph264depay !h264parse !avdec_h264 ! videoconvert !d3dvideosink name=mysink sync=false ", &error);
if (!(*ptrstats).pipeline) {
outfile << "Load video : ", error->message, "\n";
exit(1);
}
source = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "source");
g_object_set(G_OBJECT(source), "caps", caps, NULL);
g_object_set(G_OBJECT(source), "port", m_port, NULL);
pad = gst_element_get_static_pad(source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback)buffer_out_cb, ptrstats, NULL);
gst_object_unref(pad);
#ifdef __WXGTK__
outfile << "__WXGTK__\n";
GstElement* sink = gst_bin_get_by_name((GstBin*)(*ptrstats).pipeline, "mysink");
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink), m_xid);
#elif defined __WXMSW__
GstElement* sink = gst_bin_get_by_name((GstBin*)(*ptrstats).pipeline, "mysink");
WXWidget hwnd = m_renderWindow->GetHandle();
//gst_video_overlay_set_render_rectangle(GST_VIDEO_OVERLAY(sink), 0, 0, 1224, 1024);
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink),
reinterpret_cast<guintptr>(hwnd));
#endif
gst_element_set_state((*ptrstats).pipeline, GST_STATE_PLAYING);
SOLUTION : add d3dvideosink force-aspect-ratio=false

gstreamer pipeline from cam to file C code ends up with empty output file

I have an USB camera. I have working terminal commands to record or display fullHD video and to save one 4k image. I would like to handle it all via C++ app. If we will concentrate on the video-saving:
gst-launch-1.0 v4l2src device=/dev/video0 num-buffers=900! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location=/mnt/ssd/test.avi
will save 900frames (aka 30s) of video. I would like to have C++ code to record indefinetly (in future maybe in hour-long segments) until I (the app) tell it to end.
I came up with
struct {
GstElement *pipeline_sink, *source, *appsink;
GstElement *pipeline_src, *appsrc, *decoder, *mux, *sink, *encoder;
} usbCam::mGstData;
int usbCam::gstInit(){
GstCaps *caps;
GstStateChangeReturn ret;
// Initialize GStreamer
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
// Create the elements
mGstData.source = gst_element_factory_make ("v4l2src", "source");
g_object_set (mGstData.source, "device", "/dev/video0", NULL);
mGstData.pipeline_sink = gst_pipeline_new ("pipeline_sink");
caps = gst_caps_new_any();
gst_app_sink_set_caps(GST_APP_SINK(mGstData.appsink), caps);
gst_caps_unref (caps);
gst_app_sink_set_emit_signals(GST_APP_SINK(mGstData.appsink), true);
// Build the pipeline
gst_bin_add_many (GST_BIN (mGstData.pipeline_sink), mGstData.source, mGstData.appsink, NULL);
if (gst_element_link_many(mGstData.source, mGstData.appsink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_sink);
return -1;
}
return 0;
}
int usbCam::videoStart(){
GstCaps *caps;
GstStateChangeReturn ret;
if (!mGstData.pipeline_sink || !mGstData.source) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
mGstData.appsrc = gst_element_factory_make ("appsrc", "appsrc");
mGstData.decoder = gst_element_factory_make ("imxvpudec", "transform_enc");
mGstData.mux = gst_element_factory_make ("avimux", "avimux");
mGstData.sink = gst_element_factory_make ("filesink", "sink");
g_object_set (mGstData.sink, "location", "/mnt/ssd/videoTest.avi", NULL);
mGstData.pipeline_src = gst_pipeline_new ("pipeline_src");
if (!mGstData.pipeline_src || !mGstData.appsrc || !mGstData.decoder || !mGstData.mux || !mGstData.sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
NULL);
gst_app_src_set_caps(GST_APP_SRC(mGstData.appsrc), caps);
gst_caps_unref (caps);
gst_app_src_set_duration(GST_APP_SRC(mGstData.appsrc), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(mGstData.appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(mGstData.appsrc), -1, 0);
gst_bin_add_many (GST_BIN (mGstData.pipeline_src), mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL);
if (gst_element_link_many(mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
ret = gst_element_set_state (mGstData.pipeline_src, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
return 0;
}
int usbCam::videoEnd(){
{
gst_app_src_end_of_stream(GST_APP_SRC(mGstData.appsrc));
usleep(500000);
gst_element_set_state (mGstData.pipeline_src, GST_STATE_NULL);
gst_object_unref (mGstData.pipeline_src);
return 0;
}
Now, this code runs. No error in the output, one warning though:
(GLib-GObject-WARNING **: 17:51:34.132: g_object_set_is_valid_property: object class 'GstSplitMuxSink' has no property named 'h}\x9fe h\xe6a_no_\xc1')
.
What actually bothers me is the output file. It is created, but it is an empty file with 0b size. Can anyone point me in the direction of the proper fix?
Edit: Today I came up with two other attempts. The firs one is not that different from the one already posted here. The second gives me pipeline with wrong parameters (different FPS) and I am unable to correctly stop it so that the file have correct EOF.
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
std::string command = "v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location = /mnt/ssd/testPipeline.avi";
/* Build the pipeline */
pipeline =
gst_parse_launch
(command.c_str(),
NULL);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GstMessageType(
GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
EDIT2:
OK now my code looks like this:
GstElement *pipeline;
GstElement *tee; //in the future I would like to save video and images AND stream or use thi pipeline data internally.
void gstFail(const gchar* message){
g_printerr(message);
gst_object_unref (pipeline);
return;
}
void videoStart(std::string path){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstCaps *caps;
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", mVideoDevice.toStdString().c_str(), NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
g_object_set (sink, "location", path.c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink, NULL);
if (gst_element_link_filtered(source, muxer, caps) != TRUE) {
gst_caps_unref (caps);
gstFail("Elements could not be linked or caps set.\n");
return;
}
gst_caps_unref (caps);
if (gst_element_link_many(muxer,tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gstFail("Unable to set the pipeline to the playing state.\n");
return;
}
return;
}
void videoEnd(void)
{
GstMessage *message = gst_message_new_eos(&pipeline->object);
gst_bus_post(pipeline->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PLAYING_TO_PAUSED);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PAUSED_TO_READY);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
}
void takeImage(std::string path){
GstElement *sink = gst_element_factory_make("multifilesink", "multifilesink");
g_object_set (sink, "location", path.c_str(), NULL);
gst_bin_add_many (GST_BIN (pipeline), sink, NULL);
if (gst_element_link_many(tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
return;
}
This saves the video ALMOST ok (VLC does not display correct lenght. But when I see the video file properties via Nautilus in Ubuntu the correct lenght is displayed and the video is playable). It does not save the pictures.
OK, so here's how I solved it: my initial pipeline is split with tee element into two sinks: the original sink that saves the video and appsink. In the callback functuion for the appsink I create new pipeline and push the frame any time I want to save the image. Basically:
...
int saveSampleFromAppsinkJpeg( GstSample *sample){
if (!shouldSaveImage) {
return -2;
}
if (capturing){
return -3;
}
std::thread([=]{
capturing = true;
GstStateChangeReturn ret;
GstElement *appsrc = gst_element_factory_make ("appsrc", "appsrc");
GstElement *sink = gst_element_factory_make ("multifilesink", "sink");
g_object_set (sink, "location", "some/path", NULL);
GstElement *pipeline_img = gst_pipeline_new ("pipeline_img");
if (!pipeline_img || !appsrc || !sink) {
g_printerr ("Not all elements could be created.\n");
capturing = false;
return -1;
}
gst_app_src_set_caps(GST_APP_SRC(appsrc), caps);
gst_app_src_set_duration(GST_APP_SRC(appsrc), GST_TIME_AS_MSECONDS(80)); // TODO 80
gst_app_src_set_stream_type(GST_APP_SRC(appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(appsrc), -1, 0);
gst_bin_add_many (GST_BIN (pipeline_img), appsrc, sink, NULL);
if (gst_element_link_many(appsrc, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
ret = gst_element_set_state (pipeline_img, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
//push the image in the pipeline
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_push_sample(GST_APP_SRC(appsrc), sample);
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("Sample for saving image not pushed.\n");
status = gst_app_src_end_of_stream(GST_APP_SRC(appsrc));
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("EOS for saving image not pushed.\n");
//end the pipeline
usleep(500000); // Important
GstMessage *message = gst_message_new_eos(&pipeline_img->object);
gst_bus_post(pipeline_img->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_set_state (pipeline_img, GST_STATE_PAUSED);
gst_element_set_state (pipeline_img, GST_STATE_NULL);
gst_object_unref (pipeline_img);
shouldSaveImage = false;
capturing = false;
return 1;
}).detach();
return 1;
}
static GstFlowReturn new_sample_jpeg(GstElement * elt)
{
GstSample *sample;
GstBuffer *buffer;
GstMemory *memory;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
buffer = gst_sample_get_buffer (sample);
if (buffer != NULL) {
memory = gst_buffer_get_memory (buffer, 0);
if (memory != NULL) {
//now all data are image data. If image wanted->image save!
if (wantToSave) saveSampleFromAppsinkJpeg(sample);
}
...
}
}
void startVideo(){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink, *queue_rcr, *queue_app, *appsink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", "/dev/video1", NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
queue_rcr = gst_element_factory_make ("queue", "record_queue");
queue_app = gst_element_factory_make ("queue", "app_queue");
appsink = gst_element_factory_make("appsink", "appsink");
g_object_set (sink, "location", path.toStdString().c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink || !queue_rcr || !appsink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink,queue_rcr, appsink, queue_app, NULL);
if (gst_element_link_filtered(source, tee, caps) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_rcr, muxer, sink, NULL) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_app, appsink, NULL) != TRUE) {
//failhandling
}
gst_app_sink_set_emit_signals(GST_APP_SINK(appsink), true);
g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample_jpeg));
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
//failhandling
}
// Start playing
recording = true;
return;
}

Gstreamer rtsp application for audio and video

I was trying to develop an application for the pipeline:
gst-launch-1.0 rtspsrc location="rtsp://192.168.3.30:8554/rajvi" latency=0 name=demux demux. ! queue ! rtpmp4gdepay ! aacparse ! avdec_aac ! audioconvert ! audioresample ! autoaudiosink demux. ! queue ! rtph264depay ! h264parse ! omxh264dec ! videoconvert ! videoscale ! video/x-raw,width=176, height=144 ! ximagesink
Following is the code which I have implemented:
#include <gst/gst.h>
static void onPadAdded(GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
GstCaps * p_caps = gst_pad_get_pad_template_caps (pad);
gchar * description = gst_caps_to_string(p_caps);
g_free(description);
GstElement *depay = GST_ELEMENT(data);
if(gst_element_link_pads(element, name, depay, "sink") == 0)
{
g_print("cb_new_rtspsrc_pad : failed to link elements \n");
}
g_free(name);
}
int main(int argc, char *argv[]) {
GstElement *source, *videosink, *audio, *video, *convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
GstPad *sinkpad,*ghost_sinkpad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make ("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make ("queue", "audio-queue");
audioDepay = gst_element_factory_make ("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make ("aacparse", "audio-parser");
audioDecode = gst_element_factory_make ("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make ("audioconvert", "aconv");
audioResample = gst_element_factory_make ("audioresample", "audio-resample");
audioSink = gst_element_factory_make ("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), audioDepay);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
video = gst_bin_new ("videobin");
videoQueue = gst_element_factory_make ("queue", "video-queue");
videoDepay= gst_element_factory_make ("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make ("h264parse", "video-parser");
videoDecode = gst_element_factory_make ("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(video),videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert,videosink, capsFilter);
gst_caps_unref (capsFilter);
if (!link_ok) {
g_warning ("Failed to link element1 and element2!");
}
sinkpad = gst_element_get_static_pad (videoConvert, "sink");
ghost_sinkpad = gst_ghost_pad_new ("sink", sinkpad);
gst_pad_set_active (ghost_sinkpad, TRUE);
gst_element_add_pad (video, ghost_sinkpad);
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
gst_bin_add_many (GST_BIN(pipeline), video,NULL);
/* Start playing */
gst_element_set_state ( pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
Getting error to link pipeline->audio->video bins
If you put the video and audio in the pipeline bin all together then you can do it. Figure out what you caps are for the video and audio and should be able to link them.
// ----------------------------------
// pad-added signal
// ----------------------------------
static void onPadAdded(GstElement* element, GstPad* pad, gpointer user_data)
{
gchar *name;
GstCaps * p_caps;
GstElement* nextElement;
GstElement* pipeline = (GstElement*)user_data;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
p_caps = gst_pad_get_pad_template_caps(pad);
if (strstr(name, "[CAPS FOR VIDEO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Video -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "video-depayer");
}
else if (strstr(name, "[CAPS FOR AUDIO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Audio -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "audio-depayer");
}
if (nextElement != NULL)
{
if (!gst_element_link_filtered(element, nextElement, p_caps))
//if (!gst_element_link_pads_filtered(element, name, nextElement, "sink", p_caps))
{
std::cout << std::endl << "Failed to link video element to src to sink" << std::endl;
}
gst_object_unref(nextElement);
}
g_free(name);
gst_caps_unref(p_caps);
}
// ----------------------------------
// main
// ----------------------------------
int main(int argc, char *argv[])
{
GstElement *source, *videosink, *audio,*convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init(&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make("queue", "audio-queue");
audioDepay = gst_element_factory_make("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make("aacparse", "audio-parser");
audioDecode = gst_element_factory_make("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make("audioconvert", "aconv");
audioResample = gst_element_factory_make("audioresample", "audio-resample");
audioSink = gst_element_factory_make("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), pipeline);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
videoQueue = gst_element_factory_make("queue", "video-queue");
videoDepay = gst_element_factory_make("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make("h264parse", "video-parser");
videoDecode = gst_element_factory_make("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(pipeline), videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert, videosink, capsFilter);
gst_caps_unref(capsFilter);
if (!link_ok) {
g_warning("Failed to link element1 and element2!");
}
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
/* Start playing */
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,(GstMessageType)( GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref(msg);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
}

Timestamping error/or comptuer too slow with gstreamer/gstbasesink in Qt

I am building a simple video player in Qt using gstreamer-1.0. When I run it from Qt, or .exe in my pc, everything runs and works ok. But when I try it from another pc, it plays for some seconds that it skips some seconds/minutes and so on. I guess the problem is with sync, I have tried setting d3dvidesink property: sync=false, but is the same. I have read many similiar threads but none seems to help.
A lot of buffers are being dropped.
Additional debug info:
gstbasesink.c(2846): gst_base_sink_is_too_late ():
There may be a timestamping problem, or this computer is too slow.
I have tried setting different properties, but none helped. I have seen the following threads, but still the same problem:
Thread 1
Thread 2
Thread 3
On Thread 3 there is a suggestion setting "do-timestamp" property on appsrc to TRUE, but I use uridecodebin as source that has not a "do-timestamp" property.
My pipeline is as follows:
uridecodebin ! audioconvert ! volume ! autoaudiosink ! videoconvert ! gamma ! d3dvideosink
Thanks in Advance!
Here is some code from the elements creation/linking. Please comment if you need any other code.
// Create the elements
data.source = gst_element_factory_make ( "uridecodebin", "source" );
data.audio_convert = gst_element_factory_make ( "audioconvert", "audio_convert" );
data.volume = gst_element_factory_make ( "volume", "volume");
data.audio_sink = gst_element_factory_make ( "autoaudiosink", "audio_sink" );
data.video_convert = gst_element_factory_make ( "videoconvert", "video_convert" );
data.filter = gst_element_factory_make ( "gamma", "filter");
data.video_sink = gst_element_factory_make ( "d3dvideosink", "video_sink" );
// Create the empty pipeline
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline || !data.source || !data.audio_convert || !data.volume || !data.audio_sink
|| !data.video_convert || !data.filter || !data.video_sink ) {
g_printerr ("Not all elements could be created.\n");}
return ;
}
// Build the pipeline. Note that we are NOT linking the source at this point. We will do it later.
gst_bin_add_many (GST_BIN (data.pipeline), data.source, data.audio_convert , data.volume, data.audio_sink,
data.video_convert, data.filter, data.video_sink, NULL);
if (!gst_element_link (data.audio_convert, data.volume)) {
g_printerr ("Elements AUDIO_CONVERT - VOLUME could not be linked.\n");
gst_object_unref (data.pipeline);
return ;
}
if (!gst_element_link (data.volume, data.audio_sink)) {
g_printerr ("Elements VOLUME - AUDIO_SINK could not be linked.\n");
gst_object_unref (data.pipeline);
return ;
}
if (!gst_element_link(data.video_convert, data.filter)) {
g_printerr("Elements VIDEO_CONVERT - FILTER could not be linked.\n");
gst_object_unref(data.pipeline);
return ;
}
if (!gst_element_link(data.filter, data.video_sink)) {
g_printerr("Elements FILTER - VIDEO_SINK could not be linked.\n");
gst_object_unref(data.pipeline);
return ;
}
When I open video:
// Set the URI to play
QString filePath = "file:///"+filename;
QByteArray ba = filePath.toLatin1();
const char *c_filePath = ba.data();
ret = gst_element_set_state (data.pipeline, GST_STATE_NULL);
gint64 max_lateness = 2000000; //2 milli sec
g_object_set (data.source, "uri", c_filePath, NULL);
// I have tried setting the following properties, but none helped
// g_object_set (data.source, "do-timestamp", true, NULL);
// g_object_set( data.video_sink, "sync", false, NULL);
// g_object_set( data.video_sink, "max-lateness", max_lateness, NULL);
qDebug() << &c_filePath;
// Link video_sink with playingWidget->winId()
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (data.video_sink), xwinid);
// Connect to the pad-added signal
g_signal_connect (data.source, "pad-added", G_CALLBACK (pad_added_handler), &data) ;
// Start playing
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gst_element_set_state (data.pipeline, GST_STATE_NULL);
gst_object_unref (data.pipeline);
// Exit application
QTimer::singleShot(0, QApplication::activeWindow(), SLOT(quit()));}
data.playing = TRUE;
data.rate = 1.0;
// Iterate - gets the position and length every 200 msec
g_print ("Running...\n");
emit setMsg( "Running...\n" );
currFileName = filename;
timer->start(500);
Pad_added_handler:
void gst_pipeline::pad_added_handler(GstElement *src, GstPad *new_pad, CustomData *data)
{
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
GstPad *sink_pad_audio = gst_element_get_static_pad (data->audio_queue, "sink");
GstPad *sink_pad_video = gst_element_get_static_pad (data->video_queue, "sink");
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
// If our audio converter is already linked, we have nothing to do here
if (gst_pad_is_linked (sink_pad_audio))
{
g_print (" We have already linked sink_pad_audio. Ignoring.\n");
// goto exit;
}
// If our video converter is already linked, we have nothing to do here
if (gst_pad_is_linked (sink_pad_video))
{
g_print (" We have already linked sink_pad_video. Ignoring.\n");
// goto exit;
}
// Check the new pad's type
new_pad_caps = gst_pad_get_current_caps (new_pad); //gst_pad_get_caps
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (g_str_has_prefix (new_pad_type, "audio/x-raw"))
{
// Attempt the link
ret = gst_pad_link (new_pad, sink_pad_audio);
if (GST_PAD_LINK_FAILED (ret))
{ g_print (" Type is '%s' but link failed.\n", new_pad_type); }
else
{ g_print (" Link succeeded (type '%s').\n", new_pad_type); }
}
else if (g_str_has_prefix (new_pad_type, "video/x-raw"))
{
// Attempt the link
ret = gst_pad_link (new_pad, sink_pad_video);
if (GST_PAD_LINK_FAILED (ret))
{ g_print (" Type is '%s' but link failed.\n", new_pad_type); }
else
{ g_print (" Link succeeded (type '%s').\n", new_pad_type); }
}
else
{
g_print (" It has type '%s' which is not audio/x-raw OR video/x-raw. Ignoring.\n", new_pad_type);
goto exit;
}
exit:
// Unreference the new pad's caps, if we got them
if (new_pad_caps != NULL)
{ gst_caps_unref (new_pad_caps); g_print("EXIT"); msg_STRING2 += "EXIT\n" ; }
// Unreference the sink pad
gst_object_unref (sink_pad_audio);
gst_object_unref (sink_pad_video);
}

muxing jpeg to mkv using gstreamer

Situation:
When I'm trying to mux jpegs to mkv file I'll get a zero sized file. I must put encode and decode elements between parser and muxer for correct output. When I'm muxing a h264 video with same code I'll get correct video file, that means the time setting of buffers should be OK(duration and pts parameter). Anyway after bad buffer settings is size of file not zero.
Matroskamux requires on sink pad for "image/jpeg" only "width" and "heigth" capabilities but it looks like that this is not sufficient. Jpegparse is giving correct values and the program does not work after manual setting of this capabilities too.
Example of pipeline:
This pipeline doesn't work
appsrc ! "image/jpeg" ! jpegparse ! matroskamux ! filesink location=mjpeg.mkv
But this works
appsrc ! "image/jpeg" ! jpegparse ! avdec_mjpeg ! x264enc ! matroskamux ! filesink location=mjpeg.mkv
Example of code:
Working code, but with reencoding
app = new _App();
app->src = (GstAppSrc*)gst_element_factory_make ("appsrc", "source");
if(IsH264Frame(codecType))
app->parser = gst_element_factory_make("h264parse", "parser");
else if(codecType == IMAGE_MJPEG_FRAME)
app->parser = gst_element_factory_make("jpegparse", "parser");
//additional code
app->decoder = gst_element_factory_make("avdec_mjpeg", "decoder");
app->encoder = gst_element_factory_make("x264enc", "encoder");
app->muxer = gst_element_factory_make("matroskamux", "muxer");
app->sink = (GstAppSink*)gst_element_factory_make ("filesink", "sink");
if (!app->pipeline || !app->src || !app->decoder || !app->encoder || !app->muxer || !app->sink || !app->parser)
return;
app->bus = gst_pipeline_get_bus (GST_PIPELINE (app->pipeline));
g_assert(app->bus);
gst_bus_add_watch (app->bus, (GstBusFunc) BusMessage, this);
gst_bin_add_many (GST_BIN (app->pipeline), (GstElement*)app->src, app->decoder, app->encoder, app->muxer, app->sink, app->parser
,NULL);
/* SETUP ELEMENTS */
g_object_set(app->src,
"stream-type", 0,
"format", GST_FORMAT_BUFFERS,
"is-live", true,
"block", true,
NULL);
if(IsH264Frame(codecType)){
g_object_set(app->src, "caps", gst_caps_new_simple("video/x-h264",
NULL), NULL);
} else if(codecType == IMAGE_MJPEG_FRAME) {
g_object_set(app->src, "caps", gst_caps_new_simple("image/jpeg",
"framerate",GST_TYPE_FRACTION,(int)framerate,1,
NULL), NULL);
//additional code
g_object_set(app->decoder, "caps", gst_caps_new_simple("video/x-raw",
NULL), NULL);
g_object_set(app->encoder, "caps", gst_caps_new_simple("video/x-h264",
NULL), NULL);
}
g_signal_connect(app->src, "need-data", G_CALLBACK(StartFeed), this);
g_signal_connect(app->src, "enough-data", G_CALLBACK(StopFeed), this);
g_object_set (app->sink,
"location", GenerateFileName().c_str(),
"buffer-mode", 0,
NULL);
/* LINKING */
GstPad *padDecSrc, *padMuxSink, *parserSrc,
GstPadTemplate *mux_sink_pad_template;
mux_sink_pad_template = gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (app->muxer), "video_%u");
padMuxSink = gst_element_request_pad (app->muxer, mux_sink_pad_template, NULL, NULL);
parserSrc = gst_element_get_static_pad (app->parser, "src");
padEncSrc = gst_element_get_static_pad (app->encoder, "src");
if(!gst_element_link( (GstElement*)app->src, app->parser))
return;
if(IsH264Frame(codecType)){
if(gst_pad_link (parserSrc, padMuxSink) != GST_PAD_LINK_OK)
return;
} else if(codecType == IMAGE_MJPEG_FRAME){
//additional code
if(!gst_element_link( app->parser, app->decoder))
return;
if(!gst_element_link( app->decoder, app->encoder))
return;
if(gst_pad_link (padDecSrc, padMuxSink) != GST_PAD_LINK_OK)
return;
}
if(!gst_element_link( app->muxer, (GstElement*)app->sink))
return;
/* PLAY */
GstStateChangeReturn ret = gst_element_set_state (app->pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE)
{
gst_object_unref (app->pipeline);
return;
}
Question:
What I am doing wrong? Any ideas to solve this problem?
I solved this problem with change of appsrc property "format" from GST_FORMAT_BUFFERS to GST_FORMAT_TIME. Correct timestamps on buffers is not enought.