I want to create a pipeline that takes a rtsp stream in input and output jpeg images of different resolutions. While the pipeline is playing, I would like to block a certain branch but I don't manage to block the element.
The command line looks like this:
gst-launch-1.0 -v rtspsrc location="rtsp://ip:port/live.sdp" ! rtph264depay ! h264parse ! avdec_h264 ! videorate ! video/x-raw,framerate=5/1 ! tee name=t ! queue ! videoscale ! video/x-raw,width=320,height=240 ! jpegenc ! multifilesink location=snapshot320-%05d.jpg t. ! queue ! videoscale ! video/x-raw,width=1280,height=720 ! jpegenc ! multifilesink location=snapshot1280-%05d.jpg
I want to be able to block the data from passing through a branch but I can't manage to get it working with the tee element.
I've seen that the function gst_pad_add_probe allows to block a pad of an element.
This is what I did:
1) Get the pads:
srcpad = gst_element_get_static_pad(tee, "src");
sinkpad = gst_element_get_static_pad(tee, "sink");
2) Add the probe:
gst_pad_add_probe(srcpad, GST_PAD_PROBE_TYPE_IDLE, &GstProbeCallback, this, NULL)
3) Flush the data:
gst_pad_send_event (sinkpad, gst_event_new_eos ());
4) Unref the pads
gst_object_unref (sinkpad);
gst_object_unref (srcpad);
5) Set the pipeline in playing state:
gst_element_set_state(this->pipeline, GST_STATE_PLAYING)
This is the callback given to gst_pad_add_probe:
static GstPadProbeReturn
GstProbeCallback(GstPad* pad, GstPadProbeInfo* info, gpointer user_data) {
std::cout << "probe callback" << std::endl;
return GST_PAD_PROBE_DROP;
}
[Update]
If I set the probe on the queue which is right after the tee element, all my branches get blocked.
More code bellow:
this->pipeline = gst_pipeline_new(NULL);
if (this->pipeline == NULL) {
LOG_ERR("Failed to create the pipeline", "image_configuration");
return NULL;
}
this->elements.tree.src = gst_element_factory_make("rtspsrc", NULL);
this->elements.tree.depay = gst_element_factory_make("rtph264depay", NULL);
this->elements.tree.parse = gst_element_factory_make("h264parse", NULL);
this->elements.tree.dec = gst_element_factory_make("avdec_h264", NULL);
this->elements.tree.rate = gst_element_factory_make("videorate", NULL);
this->elements.tree.ratefilter = gst_element_factory_make("capsfilter", NULL);
this->elements.tree.tee = gst_element_factory_make("tee", NULL);
for (auto& branch : this->elements.branches) {
branch.queue = gst_element_factory_make("queue", NULL);
branch.scale = gst_element_factory_make("videoscale", NULL);
branch.scalecaps = gst_element_factory_make("capsfilter", NULL);
branch.enc = gst_element_factory_make("jpegenc", NULL);
branch.sink = gst_element_factory_make("redissink", NULL);
branch.fakesink = gst_element_factory_make("fakesink", NULL);
if (not(branch.queue && branch.scale && branch.scalecaps && branch.sink && branch.enc &&
branch.fakesink)) {
LOG_ERR("Failed to create elements", "image_configuration");
return NULL;
}
}
if (!this->pipeline || !this->elements.tree.src || !this->elements.tree.depay ||
!this->elements.tree.parse || !this->elements.tree.dec || !this->elements.tree.rate ||
!this->elements.tree.ratefilter || !this->elements.tree.tee) {
LOG_ERR("Failed to create elements", "image_configuration");
return NULL;
}
this->set_rate_caps(this->elements.tree.ratefilter);
g_object_set(
this->elements.tree.src, "location", this->loc_in.c_str(), "latency", this->latency, NULL);
for (auto& branch : this->elements.branches) {
this->set_scale_caps(branch.scalecaps, branch.resolution);
g_object_set(branch.enc, "quality", 50, NULL);
g_object_set(branch.sink, "func", &send_event, NULL);
g_object_set(branch.sink, "camera_id", this->camera_id, NULL);
g_object_set(branch.sink, "is_init", TRUE, NULL);
}
gst_bin_add_many(GST_BIN(this->pipeline),
this->elements.tree.src,
this->elements.tree.depay,
this->elements.tree.parse,
this->elements.tree.dec,
this->elements.tree.rate,
this->elements.tree.ratefilter,
this->elements.tree.tee,
NULL);
for (const auto& branch : this->elements.branches) {
gst_bin_add_many(GST_BIN(this->pipeline),
branch.queue,
branch.scale,
branch.scalecaps,
branch.enc,
branch.sink,
branch.fakesink,
NULL);
}
if (!gst_element_link_many(this->elements.tree.depay,
this->elements.tree.parse,
this->elements.tree.dec,
this->elements.tree.rate,
this->elements.tree.ratefilter,
this->elements.tree.tee,
NULL)) {
LOG_ERR("Failed to link elements", "image_configuration");
return NULL;
}
g_signal_connect(
this->elements.tree.src, "pad-added", G_CALLBACK(on_pad_added), &this->elements);
for (const auto& branch : this->elements.branches) {
if (!gst_element_link_many(this->elements.tree.tee,
branch.queue,
branch.scale,
branch.scalecaps,
branch.enc,
branch.sink,
branch.fakesink,
NULL)) {
LOG_ERR("Failed to link elements", "image_configuration");
return NULL;
}
}
if (not this->launch_pipeline()) return NULL;
getchar();
std::cout << "Add probe" << std::endl;
GstPad* srcpad;
GstPad* sinkpad;
srcpad = gst_element_get_static_pad(this->elements.branches[0].queue, "src");
sinkpad = gst_element_get_static_pad(this->elements.branches[0].queue, "sink");
this->elements.branches[0].probe_id =
gst_pad_add_probe(srcpad, GST_PAD_PROBE_TYPE_BLOCK, &GstProbeCallback, this, NULL);
gst_pad_send_event (sinkpad, gst_event_new_eos ());
gst_object_unref (sinkpad);
gst_object_unref (srcpad);
return this->pipeline;
Any help will be appreciated
Related
I have a gstreamer media pipeline as shown below which I am trying to convert into a c code. The command line works fine.
gst-launch-1.0 v4l2src device=/dev/video1 ! capsfilter caps=video/x-raw,width=1280,height=720,format=UYVY ! queue ! videoconvert ! queue ! capsfilter caps=video/x-raw,format=NV12,width=1280,height=720,pixel-aspect-ratio=1/1 ! v4l2h264enc extra-controls="controls,h264_level=12,h264_profile=1" ! h264parse ! autovideosink
I have written the code and compilation is successful. When, I execute the code, the videosrc element is unable to link to capsfilter. I have surfed through the internet and was unsuccessful in rectifying the problem. Can someone help in correcting me what, I am doing wrong.
The code snippet is below:
/* Create the gstreamer elements */
source = gst_element_factory_make ("v4l2src", "source");
capsfilter = gst_element_factory_make ("capsfilter", "Caps-Filter");
capsfilter2 = gst_element_factory_make ("capsfilter", "caps-filter2");
video_convert = gst_element_factory_make ("videoconvert", "Video Convert");
queue1 = gst_element_factory_make ("queue", "Encoded Video Queue 1");
queue2 = gst_element_factory_make ("queue", "Encoded Video Queue 2");
encoder = gst_element_factory_make ("v4l2h264enc", "HW Accelerated Encoder");
H264_pay = gst_element_factory_make ("h264parse", "Payload-encode H264 video into RTP packets");
sink = gst_element_factory_make("autovideosink", "sink");
/* Create the empty pipeline */
pipeline = gst_pipeline_new ("test-pipeline");
if(!source || !capsfilter || ! capsfilter2 || !video_convert || !queue1 || !queue2 || !encoder || !H264_pay || !sink)
/* Set Source element properties */
g_object_set (G_OBJECT(source), "device", "/dev/video1", NULL);
GstCaps* filtercaps = gst_caps_from_string("video/x-raw,width=1280,height=720,format=(string)UYUY");
GstCaps* vconvertfilter = gst_caps_from_string("video/x-raw,width=1280,height=720,format=(string)NV12,pixel-aspect-ratio=1/1");
GstStructure *test = gst_structure_new_from_string("controls,h264_level=12,h264_profile=1");
g_object_set(G_OBJECT(capsfilter), "caps", filtercaps,NULL);
g_object_set(G_OBJECT(capsfilter2), "caps", vconvertfilter, NULL);
g_object_set (G_OBJECT(encoder), "extra-controls", test, NULL);
gst_caps_unref(filtercaps);
gst_caps_unref(vconvertfilter);
/* Link all elements that can be automatically linked because they have "Always" pads */
gst_bin_add_many (GST_BIN (pipeline),
source, capsfilter,
queue1, video_convert, queue2,
capsfilter2, encoder,
H264_pay, sink, NULL);
if(!gst_element_link(source, capsfilter))
{
g_printerr("Unable to link Source to filter. check your caps. \n");
gst_object_unref (pipeline);
}
if (gst_element_link_many (capsfilter, queue1, video_convert, NULL) != TRUE)
{
g_printerr("Capsfilter could not be linked to queue1. \n");
gst_object_unref (pipeline);
}
if (gst_element_link_many (video_convert, queue2, capsfilter2, encoder, H264_pay, NULL) != TRUE)
{
g_printerr("video_convert could not be linked to queue2. \n");
gst_object_unref (pipeline);
}
if(gst_element_link_many (H264_pay, sink, NULL) != TRUE)
{
g_printerr("parse could not link to sink.\n");
gst_object_unref (pipeline);
}
I get the error as below;
Unable to link Source to filter. check your caps.
Can somebody help me correct the mistake?
Below is my pipeline to display and record my stream coming from an udp source. The problem is that a latency increase (start at no latency) with the time on my stream and my record. However, if I just display my stream, there is no latency.
Can someone have an of where the problem can come ?
pipeline = gst_parse_launch("udpsrc name=source ! rtpjitterbuffer mode=0 ! rtph264depay ! h264parse ! avdec_h264 ! tee name = t ! queue ! avenc_mpeg4 bitrate=10000000 ! matroskamux name=matrox !filesink name=myFile t. ! queue ! videoconvert ! d3dvideosink name=mysink sync=false", &error);
Thanks,
EDIT :
All my save and display code :
void MainWindow::SaveVideo()
{
std::string strPathVideo = m_VideoPath + CreateFileName("mkv");
GError* error = NULL;
GstElement* source;
GstElement* filesink;
GstElement* matrox;
GstElement* clocktime;
//GstElement* compression;
GstElement* textoverlay;
GstElement* sink;
GstPad* padsink;
GstCaps* caps = gst_caps_new_simple("application/x-rtp",
"media", G_TYPE_STRING, "video",
"payload", G_TYPE_INT, 96,
"encoding-name", G_TYPE_STRING, "H264",
NULL);
(*ptrstats).pipeline = gst_parse_launch("udpsrc name=source ! rtpjitterbuffer mode=0 ! rtph264depay ! h264parse ! avdec_h264 ! textoverlay halignment=center valignment=top name=text ! tee name = t ! queue ! avenc_mpeg4 bitrate=10000000 ! matroskamux name=matrox !filesink name=myFile t. ! queue ! videoconvert ! d3dvideosink name=mysink sync=false", &error);
textoverlay = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "text");
g_object_set(G_OBJECT(textoverlay), "text", m_text.ToStdString(), NULL);
}
if (!(*ptrstats).pipeline) {
outfile << "Save : ", error->message ,"\n";
exit(1);
}
sink = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "mysink");
filesink = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "myFile");
g_object_set(filesink, "location", strPathVideo.c_str(), NULL);
//compression = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "compression");
//g_object_set(G_OBJECT(compression), "bitrate", m_intcompression, NULL);
matrox = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "matrox");
g_object_set(G_OBJECT(matrox), "offset-to-zero", true, NULL);
source = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "source");
g_object_set(G_OBJECT(source), "caps", caps, NULL);
g_object_set(G_OBJECT(source), "port", m_port, NULL);
textoverlay = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "text");
g_object_set(G_OBJECT(textoverlay), "text", m_text.ToStdString(), NULL);
padsink = gst_element_get_static_pad(sink, "sink");
gst_pad_add_probe(padsink, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback)buffer_sink, ptrstats, NULL);
gst_object_unref(padsink);
(*ptrstats).bus = gst_element_get_bus(GST_ELEMENT((*ptrstats).pipeline));
#ifdef __WXGTK__
GstElement* sink = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "mysink");
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink), m_xid);
#elif defined __WXMSW__
WXWidget hwnd = (*ptrstats).m_renderWindow->GetHandle();
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink),
reinterpret_cast<guintptr>(hwnd));
#endif
PlayHelper();
}
void MainWindow::PlayHelper()
{
GstStateChangeReturn ret =
gst_element_set_state((*ptrstats).pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE)
{
outfile << "Playhelper : Unable to set the pipeline to the playing state.\n";
wxLogWarning("Unable to set the pipeline to the playing state.");
gst_object_unref((*ptrstats).pipeline);
(*ptrstats).pipeline = NULL;
}
}
I am trying to create a webcam on an embedded device and learn gstreamer c implementation at the same time. i have dealt with gstreamer launch pipelines for a while so i am somewhat familiar already with gstreamer.
my end goal is to eventually have a pipeline that will dynamically stream video, record video and save pictures all from external commands. I've started small with my implementation and right now I'm focusing on being able to take a picture in one branch of a tee while the other branch is still flowing data. the other branch is just a fakesink right now but eventually it will be an h264 encoder with mux and audio saving videos.
here is a simple view of my pipeline:
v4l2src ! capsfilter ! tee ! queue ! fakesink tee. ! queue ! videoconvert ! pngenc ! filesink
my idea was to dynamically add the picture portion of the pipeline while its running.
the flow of my program goes like this:
picture event is triggered (currently a simple timer)-> add blocking probe on tee -> add picture pipeline and link it to tee -> set to playing -> set blocking probe on filesink to verify it has received data -> send EOS down the pipeline starting at the videoconvert -> set blocking probe on tee pad linked to picture pipeline -> set the picture pipeline to null and remove it and the tee pad
when the program executes, the eos probe on the tee pad for the picture pipeline is never called and instead the whole pipeline goes to EOS and i get an internal data stream error and no picture.
i want to make sure the filesink only gets 1 buffer as i cant stop the v4l2src stream or give it a num-buffers=1. i guess my problem right now is: how do i verify the filesink gets only one buffer? which pad should i send the EOS event on in order for it to properly save the picture? and lastly, how do i make sure only this one branch sees the EOS?
ive poured over all of the gstreamer tutorials and SO questions but most are either not answered or havent helped my situation.
here is my code:
#include <QDebug>
#include <QTimer>
#include "gstpipeline.hpp"
#include "gsttypes.hpp"
using namespace INSP_GST_TYPES;
gstpipeline::gstpipeline()
: mV4l2Src(NULL)
, mEncoder(NULL)
, mPngEncoder(NULL)
, mVideoFileSink(NULL)
, mPictureFileSink(NULL)
, mRawCapsFilter(NULL)
, mEncodedCapsFilter(NULL)
, mEncoderVideoConvert(NULL)
, mPngVideoConvert(NULL)
, mEncoderQueue(NULL)
, mMatroskaMux(NULL)
, mPipeline(NULL)
{
}
void gstpipeline::init()
{
mV4l2Src = gst_element_factory_make("v4l2src", V4L2SOURCE_NAME);
mRawCapsFilter = gst_element_factory_make("capsfilter", RAW_CAPS_NAME);
mRawFakesinkQueue = gst_element_factory_make("queue", RAW_FAKESINK_QUEUE_NAME);
mRawFakeSink = gst_element_factory_make("fakesink", RAW_FAKESINK_NAME);
mRawTee = gst_element_factory_make("tee", RAW_TEE_NAME);
mPipeline = gst_pipeline_new(PIPELINE_NAME);
mRawCaps = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "NV12",
"width", G_TYPE_INT, 1280,
"height", G_TYPE_INT, 720,
"framerate", GST_TYPE_FRACTION, 30, 1,
NULL);
g_object_set(mRawCapsFilter, "caps", mRawCaps, NULL);
if(!mPipeline || !mV4l2Src || !mRawCapsFilter || !mRawTee || !mRawFakesinkQueue || !mRawFakeSink)
{
qCritical() << "Failed to create main gst elements";
return;
}
else
{
qWarning() << "created the initial pipeline";
}
linkRawPipeline();
}
void gstpipeline::linkRawPipeline()
{
gst_bin_add_many(GST_BIN(mPipeline), mV4l2Src, mRawCapsFilter, mRawTee, mRawFakesinkQueue, mRawFakeSink, NULL);
g_object_set(mPipeline, "message-forward", TRUE, NULL);
if(gst_element_link_many(mV4l2Src, mRawCapsFilter, mRawTee, NULL) != TRUE)
{
qCritical() << "Failed to link raw pipeline";
return;
}
if(gst_element_link_many(mRawFakesinkQueue, mRawFakeSink, NULL) != TRUE)
{
qCritical() << "Failed to link fakesink pipeline";
return;
}
/* Manually link the Tee, which has "Request" pads */
GstPad* tee_fakesink_pad = gst_element_get_request_pad (mRawTee, "src_%u");
qWarning ("Obtained request pad %s for fakesink branch.", gst_pad_get_name (tee_fakesink_pad));
GstPad* raw_queue_pad = gst_element_get_static_pad (mRawFakesinkQueue, "sink");
if (gst_pad_link (tee_fakesink_pad, raw_queue_pad) != GST_PAD_LINK_OK)
{
qCritical ("raw Tee could not be linked.");
}
gst_object_unref(tee_fakesink_pad);
gst_object_unref(raw_queue_pad);
if (gst_element_set_state (mPipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE)
{
qCritical() << "Unable to set the pipeline to the ready state";
gst_object_unref (mPipeline);
}
else
{
qWarning() << "set pipeline to playing";
GMainLoop* loop = g_main_loop_new (NULL, FALSE);
gst_bus_add_watch (GST_ELEMENT_BUS (mPipeline), sMainBusCallback, loop);
QTimer::singleShot(1000, this, SLOT(onBusTimeoutExpired()));
}
}
void gstpipeline::onBusTimeoutExpired()
{
blockRawPipeline();
}
void gstpipeline::blockRawPipeline()
{
qWarning() << "Blocking raw pipeline";
GstPad* srcpad = gst_element_get_static_pad(mRawFakesinkQueue, SRC_PAD);
gst_pad_add_probe(srcpad,
(GstPadProbeType)(GST_PAD_PROBE_TYPE_BLOCK | GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_IDLE),
sRawFakesinkQueueBlockedCallback, NULL, NULL);
g_object_unref(srcpad);
qWarning() << "added fakesink queue probe";
}
GstPadProbeReturn gstpipeline::sRawFakesinkQueueBlockedCallback(GstPad * pad, GstPadProbeInfo * info, gpointer user_data)
{
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
//create the picturesink pipeline and link it to a new src pad on the raw tee
mPictureQueue = gst_element_factory_make("queue", RAW_PICTURE_QUEUE_NAME);
mPngEncoder = gst_element_factory_make("pngenc", PNG_ENC_NAME);
mPictureFileSink = gst_element_factory_make("filesink", PICTURESINK_NAME);
mPngVideoConvert = gst_element_factory_make("videoconvert", VIDEOCONVERT_PNG_NAME);
if(!mPngEncoder || !mPictureFileSink || !mPngVideoConvert)
{
qCritical() << "failed to make picturesink elements";
}
g_object_set(G_OBJECT (mPictureFileSink), "location", "/mnt/userdata/pipelinetest.png", NULL);
gst_bin_add_many (GST_BIN (mPipeline), mPictureQueue, mPngVideoConvert,
mPngEncoder, mPictureFileSink, NULL);
if(gst_element_link_many(mPictureQueue, mPngVideoConvert, mPngEncoder, mPictureFileSink, NULL) != TRUE)
{
qCritical() << "failed to link picture pipeline";
}
GstPad* tee_picturesink_pad = gst_element_get_request_pad (mRawTee, "src_%u");
qWarning ("Obtained request pad %s for picturesink branch.", gst_pad_get_name (tee_picturesink_pad));
GstPad* raw_picture_queue_pad = gst_element_get_static_pad (mPictureQueue, "sink");
if (gst_pad_link (tee_picturesink_pad, raw_picture_queue_pad) != GST_PAD_LINK_OK)
{
qCritical ("picture Tee could not be linked.");
}
gst_element_sync_state_with_parent(mPictureQueue);
gst_element_sync_state_with_parent(mPngVideoConvert);
gst_element_sync_state_with_parent(mPngEncoder);
gst_element_sync_state_with_parent(mPictureFileSink);
qWarning() << "done adding picturesink";
//set data block to see when the filesink gets data so we can send an EOS
GstPad* srcpad = gst_element_get_static_pad(mPictureFileSink, SINK_PAD);
gst_pad_add_probe(srcpad, (GstPadProbeType)(GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM),
sPictureSinkDownstreamBlockProbe, NULL, NULL);
g_object_unref(srcpad);
return GST_PAD_PROBE_DROP;
}
GstPadProbeReturn gstpipeline::sPictureSinkDownstreamBlockProbe(GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
{
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
//this is a data blocking pad probe on picture filesink
qWarning() << "setting the EOS event probe on the picturesink";
GstPad* srcpad = gst_element_get_static_pad(mPictureQueue, SRC_PAD);
gst_pad_add_probe(pad, (GstPadProbeType)(GST_PAD_PROBE_TYPE_BLOCK | GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM),sPictureSinkEOSCallback, NULL, NULL);
g_object_unref(srcpad);
qWarning() << "sending eos through videoconvert";
gst_element_send_event(mPngVideoConvert, gst_event_new_eos());
qWarning() << "exiting pad probe";
return GST_PAD_PROBE_PASS;
}
GstPadProbeReturn gstpipeline::sPictureSinkEOSCallback(GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
{
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
if (GST_EVENT_TYPE (GST_PAD_PROBE_INFO_DATA (info)) == GST_EVENT_EOS)
{
qWarning() << "setting raw queue pad block";
GstPad* srcpad = gst_element_get_static_pad(mPictureQueue, SRC_PAD);
gst_pad_add_probe(pad, (GstPadProbeType)(GST_PAD_PROBE_TYPE_IDLE),sRawQueueBlockedCallback, NULL, NULL);
g_object_unref(srcpad);
}
else
{
qCritical() << "picturesink pad probe is NOT EOS";
}
return GST_PAD_PROBE_HANDLED;
}
GstPadProbeReturn gstpipeline::sRawQueueBlockedCallback(GstPad *pad, GstPadProbeInfo *info, gpointer user_data)
{
if (GST_EVENT_TYPE (GST_PAD_PROBE_INFO_DATA (info)) == GST_EVENT_EOS)
{
gst_pad_remove_probe (pad, GST_PAD_PROBE_INFO_ID (info));
gst_element_set_state(mPictureFileSink, GST_STATE_NULL);
gst_element_set_state(mPngEncoder, GST_STATE_NULL);
gst_element_set_state(mPngVideoConvert, GST_STATE_NULL);
gst_element_set_state(mPictureQueue, GST_STATE_NULL);
//unlink the picture pipeline from the src pad of the raw tee and remove that pad
GstPad* tee_picturesink_pad = gst_element_get_static_pad(mRawTee, "src_1");
qWarning ("Obtained request pad %s for picturesink branch.", gst_pad_get_name (tee_picturesink_pad));
GstPad* raw_picture_queue_pad = gst_element_get_static_pad (mPictureQueue, "sink");
if (gst_pad_unlink (tee_picturesink_pad, raw_picture_queue_pad) != GST_PAD_LINK_OK)
{
qCritical ("picture Tee could not be linked.");
}
if(gst_element_remove_pad(mRawTee, tee_picturesink_pad) != TRUE)
{
qCritical("could not remove raw tee pad");
}
g_object_unref(tee_picturesink_pad);
g_object_unref(raw_picture_queue_pad);
gst_bin_remove_many(GST_BIN(mPipeline), mPictureQueue, mPngVideoConvert, mPngEncoder, mPictureFileSink, NULL);
qWarning() << "we have set the fakesink back up";
}
else
{
qCritical() << "picturesink pad probe is NOT EOS";
}
return GST_PAD_PROBE_PASS;
}
gboolean gstpipeline::sMainBusCallback (GstBus*bus, GstMessage *msg, gpointer user_data)
{
GMainLoop *loop = (GMainLoop*)user_data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
{
GError *err = NULL;
gchar *dbg;
gst_message_parse_error (msg, &err, &dbg);
gst_object_default_error (msg->src, err, dbg);
g_clear_error (&err);
g_free (dbg);
g_main_loop_quit (loop);
}
break;
case GST_MESSAGE_EOS:
g_print ("we reached EOS\n");
g_main_loop_quit (loop);
break;
default:
// g_print ("msg: %s\n", GST_MESSAGE_TYPE_NAME(msg));
break;
}
}
so i managed to figure this out myself. here are the steps i took to get this working:
1. blocking probe on the fakesink queue
2. add the picture pipeline
3. put a blocking data probe on the picture files sink
4. wait until a segment buffer reaches the filesink
5. put a blocking probe on the picture piplines queue
6. in the queue blocking probe, send eos event and remove the picture pipeline
I was trying to develop an application for the pipeline:
gst-launch-1.0 rtspsrc location="rtsp://192.168.3.30:8554/rajvi" latency=0 name=demux demux. ! queue ! rtpmp4gdepay ! aacparse ! avdec_aac ! audioconvert ! audioresample ! autoaudiosink demux. ! queue ! rtph264depay ! h264parse ! omxh264dec ! videoconvert ! videoscale ! video/x-raw,width=176, height=144 ! ximagesink
Following is the code which I have implemented:
#include <gst/gst.h>
static void onPadAdded(GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
GstCaps * p_caps = gst_pad_get_pad_template_caps (pad);
gchar * description = gst_caps_to_string(p_caps);
g_free(description);
GstElement *depay = GST_ELEMENT(data);
if(gst_element_link_pads(element, name, depay, "sink") == 0)
{
g_print("cb_new_rtspsrc_pad : failed to link elements \n");
}
g_free(name);
}
int main(int argc, char *argv[]) {
GstElement *source, *videosink, *audio, *video, *convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
GstPad *sinkpad,*ghost_sinkpad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make ("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make ("queue", "audio-queue");
audioDepay = gst_element_factory_make ("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make ("aacparse", "audio-parser");
audioDecode = gst_element_factory_make ("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make ("audioconvert", "aconv");
audioResample = gst_element_factory_make ("audioresample", "audio-resample");
audioSink = gst_element_factory_make ("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), audioDepay);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
video = gst_bin_new ("videobin");
videoQueue = gst_element_factory_make ("queue", "video-queue");
videoDepay= gst_element_factory_make ("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make ("h264parse", "video-parser");
videoDecode = gst_element_factory_make ("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(video),videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert,videosink, capsFilter);
gst_caps_unref (capsFilter);
if (!link_ok) {
g_warning ("Failed to link element1 and element2!");
}
sinkpad = gst_element_get_static_pad (videoConvert, "sink");
ghost_sinkpad = gst_ghost_pad_new ("sink", sinkpad);
gst_pad_set_active (ghost_sinkpad, TRUE);
gst_element_add_pad (video, ghost_sinkpad);
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
gst_bin_add_many (GST_BIN(pipeline), video,NULL);
/* Start playing */
gst_element_set_state ( pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
Getting error to link pipeline->audio->video bins
If you put the video and audio in the pipeline bin all together then you can do it. Figure out what you caps are for the video and audio and should be able to link them.
// ----------------------------------
// pad-added signal
// ----------------------------------
static void onPadAdded(GstElement* element, GstPad* pad, gpointer user_data)
{
gchar *name;
GstCaps * p_caps;
GstElement* nextElement;
GstElement* pipeline = (GstElement*)user_data;
name = gst_pad_get_name(pad);
g_print("A new pad %s was created\n", name);
p_caps = gst_pad_get_pad_template_caps(pad);
if (strstr(name, "[CAPS FOR VIDEO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Video -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "video-depayer");
}
else if (strstr(name, "[CAPS FOR AUDIO CONTAIN]") != NULL)
{
std::cout << std::endl << "------------------------ Audio -------------------------------" << std::endl;
nextElement = gst_bin_get_by_name(GST_BIN(pipeline), "audio-depayer");
}
if (nextElement != NULL)
{
if (!gst_element_link_filtered(element, nextElement, p_caps))
//if (!gst_element_link_pads_filtered(element, name, nextElement, "sink", p_caps))
{
std::cout << std::endl << "Failed to link video element to src to sink" << std::endl;
}
gst_object_unref(nextElement);
}
g_free(name);
gst_caps_unref(p_caps);
}
// ----------------------------------
// main
// ----------------------------------
int main(int argc, char *argv[])
{
GstElement *source, *videosink, *audio,*convert, *pipeline, *audioDepay, *audioQueue, *videoQueue,
*audioParse, *audioDecode, *audioConvert, *audioResample, *audioSink, *videoDepay, *videoParser, *videoDecode, *videoConvert, *videoScale, *videoSink;
GstCaps *capsFilter;
GstBus *bus;
GstMessage *msg;
GstPad *pad;
gboolean link_ok;
GstStateChangeReturn ret;
/* Initialize GStreamer */
gst_init(&argc, &argv);
/* Create Elements */
pipeline = gst_pipeline_new("rtsp-pipeline");
source = gst_element_factory_make("rtspsrc", "source");
/*audio bin*/
audioQueue = gst_element_factory_make("queue", "audio-queue");
audioDepay = gst_element_factory_make("rtpmp4gdepay", "audio-depayer");
audioParse = gst_element_factory_make("aacparse", "audio-parser");
audioDecode = gst_element_factory_make("avdec_aac", "audio-decoder");
audioConvert = gst_element_factory_make("audioconvert", "aconv");
audioResample = gst_element_factory_make("audioresample", "audio-resample");
audioSink = gst_element_factory_make("autoaudiosink", "audiosink");
if (!audioQueue || !audioDepay || !audioParse || !audioConvert || !audioResample || !audioSink)
{
g_printerr("Cannot create audio elements \n");
return 0;
g_object_set(source, "location", "rtsp://192.168.3.30:8554/rajvi", NULL);
g_object_set(source, "latency", 0, NULL);
g_signal_connect(G_OBJECT(source), "pad-added", G_CALLBACK(onPadAdded), pipeline);
gst_bin_add_many(GST_BIN(pipeline), source, audioQueue, audioDepay, audioParse, audioDecode,
audioConvert, audioResample, audioSink, NULL);
if (!gst_element_link_many(audioQueue, audioDepay, audioParse, audioDecode, audioConvert, audioResample, audioSink, NULL))
{
g_printerr("Error linking fields ...1 \n");
return 0;
}
videoQueue = gst_element_factory_make("queue", "video-queue");
videoDepay = gst_element_factory_make("rtph264depay", "video-depayer");
videoParser = gst_element_factory_make("h264parse", "video-parser");
videoDecode = gst_element_factory_make("omxh264dec", "video-decoder");
videoConvert = gst_element_factory_make("videoconvert", "convert");
videoScale = gst_element_factory_make("videoscale", "video-scale");
videoSink = gst_element_factory_make("ximagesink", "video-sink");
capsFilter = gst_caps_new_simple("video/x-raw",
"width", G_TYPE_INT, 176,
"height", G_TYPE_INT, 144,
NULL);
if (!videoQueue || !videoDepay || !videoParser || !videoDecode || !videoConvert || !videoScale || !videoSink || !capsFilter)
{
g_printerr("Cannot create video elements \n");
return 0;
}
gst_bin_add_many(GST_BIN(pipeline), videoQueue, videoDepay, videoParser, videoDecode, videoConvert, videoScale,
videosink, NULL);
/* set property value */
link_ok = gst_element_link_filtered(videoConvert, videosink, capsFilter);
gst_caps_unref(capsFilter);
if (!link_ok) {
g_warning("Failed to link element1 and element2!");
}
if (!gst_element_link_many(videoQueue, videoDepay, videoParser, videoDecode, videoScale, NULL))
{
g_printerr("Error linking fields... 2 \n");
return 0;
}
/* Start playing */
gst_element_set_state(pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,(GstMessageType)( GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref(msg);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
}
Situation:
When I'm trying to mux jpegs to mkv file I'll get a zero sized file. I must put encode and decode elements between parser and muxer for correct output. When I'm muxing a h264 video with same code I'll get correct video file, that means the time setting of buffers should be OK(duration and pts parameter). Anyway after bad buffer settings is size of file not zero.
Matroskamux requires on sink pad for "image/jpeg" only "width" and "heigth" capabilities but it looks like that this is not sufficient. Jpegparse is giving correct values and the program does not work after manual setting of this capabilities too.
Example of pipeline:
This pipeline doesn't work
appsrc ! "image/jpeg" ! jpegparse ! matroskamux ! filesink location=mjpeg.mkv
But this works
appsrc ! "image/jpeg" ! jpegparse ! avdec_mjpeg ! x264enc ! matroskamux ! filesink location=mjpeg.mkv
Example of code:
Working code, but with reencoding
app = new _App();
app->src = (GstAppSrc*)gst_element_factory_make ("appsrc", "source");
if(IsH264Frame(codecType))
app->parser = gst_element_factory_make("h264parse", "parser");
else if(codecType == IMAGE_MJPEG_FRAME)
app->parser = gst_element_factory_make("jpegparse", "parser");
//additional code
app->decoder = gst_element_factory_make("avdec_mjpeg", "decoder");
app->encoder = gst_element_factory_make("x264enc", "encoder");
app->muxer = gst_element_factory_make("matroskamux", "muxer");
app->sink = (GstAppSink*)gst_element_factory_make ("filesink", "sink");
if (!app->pipeline || !app->src || !app->decoder || !app->encoder || !app->muxer || !app->sink || !app->parser)
return;
app->bus = gst_pipeline_get_bus (GST_PIPELINE (app->pipeline));
g_assert(app->bus);
gst_bus_add_watch (app->bus, (GstBusFunc) BusMessage, this);
gst_bin_add_many (GST_BIN (app->pipeline), (GstElement*)app->src, app->decoder, app->encoder, app->muxer, app->sink, app->parser
,NULL);
/* SETUP ELEMENTS */
g_object_set(app->src,
"stream-type", 0,
"format", GST_FORMAT_BUFFERS,
"is-live", true,
"block", true,
NULL);
if(IsH264Frame(codecType)){
g_object_set(app->src, "caps", gst_caps_new_simple("video/x-h264",
NULL), NULL);
} else if(codecType == IMAGE_MJPEG_FRAME) {
g_object_set(app->src, "caps", gst_caps_new_simple("image/jpeg",
"framerate",GST_TYPE_FRACTION,(int)framerate,1,
NULL), NULL);
//additional code
g_object_set(app->decoder, "caps", gst_caps_new_simple("video/x-raw",
NULL), NULL);
g_object_set(app->encoder, "caps", gst_caps_new_simple("video/x-h264",
NULL), NULL);
}
g_signal_connect(app->src, "need-data", G_CALLBACK(StartFeed), this);
g_signal_connect(app->src, "enough-data", G_CALLBACK(StopFeed), this);
g_object_set (app->sink,
"location", GenerateFileName().c_str(),
"buffer-mode", 0,
NULL);
/* LINKING */
GstPad *padDecSrc, *padMuxSink, *parserSrc,
GstPadTemplate *mux_sink_pad_template;
mux_sink_pad_template = gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (app->muxer), "video_%u");
padMuxSink = gst_element_request_pad (app->muxer, mux_sink_pad_template, NULL, NULL);
parserSrc = gst_element_get_static_pad (app->parser, "src");
padEncSrc = gst_element_get_static_pad (app->encoder, "src");
if(!gst_element_link( (GstElement*)app->src, app->parser))
return;
if(IsH264Frame(codecType)){
if(gst_pad_link (parserSrc, padMuxSink) != GST_PAD_LINK_OK)
return;
} else if(codecType == IMAGE_MJPEG_FRAME){
//additional code
if(!gst_element_link( app->parser, app->decoder))
return;
if(!gst_element_link( app->decoder, app->encoder))
return;
if(gst_pad_link (padDecSrc, padMuxSink) != GST_PAD_LINK_OK)
return;
}
if(!gst_element_link( app->muxer, (GstElement*)app->sink))
return;
/* PLAY */
GstStateChangeReturn ret = gst_element_set_state (app->pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE)
{
gst_object_unref (app->pipeline);
return;
}
Question:
What I am doing wrong? Any ideas to solve this problem?
I solved this problem with change of appsrc property "format" from GST_FORMAT_BUFFERS to GST_FORMAT_TIME. Correct timestamps on buffers is not enought.