Related
I render my streaming in a wxWidget ImagePanel (m_renderwindow in my code) like in the picture : Image. The problem is that I have padding at top and bottom and I don't succeed to remove it. I Try gst_video_overlay_set_render_rectangle but the area stay black and don't display the stream. Does anyone have an idea?
GstState state;
GstState pending;
GstPad* pad = nullptr;
GError* error = NULL;
GstElement* source;
GstElement* clocktime;
GstElement* textoverlay;
GstCaps* caps = gst_caps_new_simple("application/x-rtp",
"media", G_TYPE_STRING, "video",
"payload", G_TYPE_INT, 96,
"encoding-name", G_TYPE_STRING, "H264",
NULL);
(*ptrstats).pipeline = gst_parse_launch("udpsrc name=source !rtph264depay !h264parse !avdec_h264 ! videoconvert !d3dvideosink name=mysink sync=false ", &error);
if (!(*ptrstats).pipeline) {
outfile << "Load video : ", error->message, "\n";
exit(1);
}
source = gst_bin_get_by_name(GST_BIN((*ptrstats).pipeline), "source");
g_object_set(G_OBJECT(source), "caps", caps, NULL);
g_object_set(G_OBJECT(source), "port", m_port, NULL);
pad = gst_element_get_static_pad(source, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback)buffer_out_cb, ptrstats, NULL);
gst_object_unref(pad);
#ifdef __WXGTK__
outfile << "__WXGTK__\n";
GstElement* sink = gst_bin_get_by_name((GstBin*)(*ptrstats).pipeline, "mysink");
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink), m_xid);
#elif defined __WXMSW__
GstElement* sink = gst_bin_get_by_name((GstBin*)(*ptrstats).pipeline, "mysink");
WXWidget hwnd = m_renderWindow->GetHandle();
//gst_video_overlay_set_render_rectangle(GST_VIDEO_OVERLAY(sink), 0, 0, 1224, 1024);
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(sink),
reinterpret_cast<guintptr>(hwnd));
#endif
gst_element_set_state((*ptrstats).pipeline, GST_STATE_PLAYING);
SOLUTION : add d3dvideosink force-aspect-ratio=false
I have an USB camera. I have working terminal commands to record or display fullHD video and to save one 4k image. I would like to handle it all via C++ app. If we will concentrate on the video-saving:
gst-launch-1.0 v4l2src device=/dev/video0 num-buffers=900! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location=/mnt/ssd/test.avi
will save 900frames (aka 30s) of video. I would like to have C++ code to record indefinetly (in future maybe in hour-long segments) until I (the app) tell it to end.
I came up with
struct {
GstElement *pipeline_sink, *source, *appsink;
GstElement *pipeline_src, *appsrc, *decoder, *mux, *sink, *encoder;
} usbCam::mGstData;
int usbCam::gstInit(){
GstCaps *caps;
GstStateChangeReturn ret;
// Initialize GStreamer
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
// Create the elements
mGstData.source = gst_element_factory_make ("v4l2src", "source");
g_object_set (mGstData.source, "device", "/dev/video0", NULL);
mGstData.pipeline_sink = gst_pipeline_new ("pipeline_sink");
caps = gst_caps_new_any();
gst_app_sink_set_caps(GST_APP_SINK(mGstData.appsink), caps);
gst_caps_unref (caps);
gst_app_sink_set_emit_signals(GST_APP_SINK(mGstData.appsink), true);
// Build the pipeline
gst_bin_add_many (GST_BIN (mGstData.pipeline_sink), mGstData.source, mGstData.appsink, NULL);
if (gst_element_link_many(mGstData.source, mGstData.appsink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_sink);
return -1;
}
return 0;
}
int usbCam::videoStart(){
GstCaps *caps;
GstStateChangeReturn ret;
if (!mGstData.pipeline_sink || !mGstData.source) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
mGstData.appsrc = gst_element_factory_make ("appsrc", "appsrc");
mGstData.decoder = gst_element_factory_make ("imxvpudec", "transform_enc");
mGstData.mux = gst_element_factory_make ("avimux", "avimux");
mGstData.sink = gst_element_factory_make ("filesink", "sink");
g_object_set (mGstData.sink, "location", "/mnt/ssd/videoTest.avi", NULL);
mGstData.pipeline_src = gst_pipeline_new ("pipeline_src");
if (!mGstData.pipeline_src || !mGstData.appsrc || !mGstData.decoder || !mGstData.mux || !mGstData.sink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
NULL);
gst_app_src_set_caps(GST_APP_SRC(mGstData.appsrc), caps);
gst_caps_unref (caps);
gst_app_src_set_duration(GST_APP_SRC(mGstData.appsrc), GST_TIME_AS_MSECONDS(80));
gst_app_src_set_stream_type(GST_APP_SRC(mGstData.appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(mGstData.appsrc), -1, 0);
gst_bin_add_many (GST_BIN (mGstData.pipeline_src), mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL);
if (gst_element_link_many(mGstData.appsrc, mGstData.decoder, mGstData.sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
ret = gst_element_set_state (mGstData.pipeline_src, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (mGstData.pipeline_src);
return -1;
}
return 0;
}
int usbCam::videoEnd(){
{
gst_app_src_end_of_stream(GST_APP_SRC(mGstData.appsrc));
usleep(500000);
gst_element_set_state (mGstData.pipeline_src, GST_STATE_NULL);
gst_object_unref (mGstData.pipeline_src);
return 0;
}
Now, this code runs. No error in the output, one warning though:
(GLib-GObject-WARNING **: 17:51:34.132: g_object_set_is_valid_property: object class 'GstSplitMuxSink' has no property named 'h}\x9fe h\xe6a_no_\xc1')
.
What actually bothers me is the output file. It is created, but it is an empty file with 0b size. Can anyone point me in the direction of the proper fix?
Edit: Today I came up with two other attempts. The firs one is not that different from the one already posted here. The second gives me pipeline with wrong parameters (different FPS) and I am unable to correctly stop it so that the file have correct EOF.
GstElement *pipeline;
GstBus *bus;
GstMessage *msg;
std::string command = "v4l2src device=/dev/video0 ! image/jpeg, width=1920, height=1080, io-mode=4 ! imxvpudec ! imxvpuenc_mjpeg ! avimux ! filesink location = /mnt/ssd/testPipeline.avi";
/* Build the pipeline */
pipeline =
gst_parse_launch
(command.c_str(),
NULL);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
msg =
gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GstMessageType(
GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Free resources */
if (msg != NULL)
gst_message_unref (msg);
gst_object_unref (bus);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
EDIT2:
OK now my code looks like this:
GstElement *pipeline;
GstElement *tee; //in the future I would like to save video and images AND stream or use thi pipeline data internally.
void gstFail(const gchar* message){
g_printerr(message);
gst_object_unref (pipeline);
return;
}
void videoStart(std::string path){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstCaps *caps;
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", mVideoDevice.toStdString().c_str(), NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
g_object_set (sink, "location", path.c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink, NULL);
if (gst_element_link_filtered(source, muxer, caps) != TRUE) {
gst_caps_unref (caps);
gstFail("Elements could not be linked or caps set.\n");
return;
}
gst_caps_unref (caps);
if (gst_element_link_many(muxer,tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
gstFail("Unable to set the pipeline to the playing state.\n");
return;
}
return;
}
void videoEnd(void)
{
GstMessage *message = gst_message_new_eos(&pipeline->object);
gst_bus_post(pipeline->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PLAYING_TO_PAUSED);
gst_element_change_state(pipeline, GST_STATE_CHANGE_PAUSED_TO_READY);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
}
void takeImage(std::string path){
GstElement *sink = gst_element_factory_make("multifilesink", "multifilesink");
g_object_set (sink, "location", path.c_str(), NULL);
gst_bin_add_many (GST_BIN (pipeline), sink, NULL);
if (gst_element_link_many(tee, sink, NULL) != TRUE) {
gstFail("Elements could not be linked or caps set.\n");
return;
}
return;
}
This saves the video ALMOST ok (VLC does not display correct lenght. But when I see the video file properties via Nautilus in Ubuntu the correct lenght is displayed and the video is playable). It does not save the pictures.
OK, so here's how I solved it: my initial pipeline is split with tee element into two sinks: the original sink that saves the video and appsink. In the callback functuion for the appsink I create new pipeline and push the frame any time I want to save the image. Basically:
...
int saveSampleFromAppsinkJpeg( GstSample *sample){
if (!shouldSaveImage) {
return -2;
}
if (capturing){
return -3;
}
std::thread([=]{
capturing = true;
GstStateChangeReturn ret;
GstElement *appsrc = gst_element_factory_make ("appsrc", "appsrc");
GstElement *sink = gst_element_factory_make ("multifilesink", "sink");
g_object_set (sink, "location", "some/path", NULL);
GstElement *pipeline_img = gst_pipeline_new ("pipeline_img");
if (!pipeline_img || !appsrc || !sink) {
g_printerr ("Not all elements could be created.\n");
capturing = false;
return -1;
}
gst_app_src_set_caps(GST_APP_SRC(appsrc), caps);
gst_app_src_set_duration(GST_APP_SRC(appsrc), GST_TIME_AS_MSECONDS(80)); // TODO 80
gst_app_src_set_stream_type(GST_APP_SRC(appsrc), GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_latency(GST_APP_SRC(appsrc), -1, 0);
gst_bin_add_many (GST_BIN (pipeline_img), appsrc, sink, NULL);
if (gst_element_link_many(appsrc, sink, NULL) != TRUE) {
g_printerr ("Elements could not be linked.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
ret = gst_element_set_state (pipeline_img, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (pipeline_img);
capturing = false;
return -1;
}
//push the image in the pipeline
GstFlowReturn status = GstFlowReturn::GST_FLOW_OK;
status = gst_app_src_push_sample(GST_APP_SRC(appsrc), sample);
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("Sample for saving image not pushed.\n");
status = gst_app_src_end_of_stream(GST_APP_SRC(appsrc));
if (status != GstFlowReturn::GST_FLOW_OK) g_printerr ("EOS for saving image not pushed.\n");
//end the pipeline
usleep(500000); // Important
GstMessage *message = gst_message_new_eos(&pipeline_img->object);
gst_bus_post(pipeline_img->bus, message);
/* Free resources */
if (message != NULL)
gst_message_unref (message);
gst_element_set_state (pipeline_img, GST_STATE_PAUSED);
gst_element_set_state (pipeline_img, GST_STATE_NULL);
gst_object_unref (pipeline_img);
shouldSaveImage = false;
capturing = false;
return 1;
}).detach();
return 1;
}
static GstFlowReturn new_sample_jpeg(GstElement * elt)
{
GstSample *sample;
GstBuffer *buffer;
GstMemory *memory;
GstFlowReturn ret = GST_FLOW_OK;
// get the sample from appsink
sample = gst_app_sink_pull_sample (GST_APP_SINK (elt));
buffer = gst_sample_get_buffer (sample);
if (buffer != NULL) {
memory = gst_buffer_get_memory (buffer, 0);
if (memory != NULL) {
//now all data are image data. If image wanted->image save!
if (wantToSave) saveSampleFromAppsinkJpeg(sample);
}
...
}
}
void startVideo(){
if (!gst_is_initialized()) {
setenv("GST_DEBUG", ("*:" + std::to_string(3)).c_str(), 1);
gst_init(nullptr, nullptr);
}
GstStateChangeReturn ret;
GstElement *source, *muxer, *sink, *queue_rcr, *queue_app, *appsink;
source = gst_element_factory_make ("v4l2src", "source");
g_object_set (source, "device", "/dev/video1", NULL);
muxer = gst_element_factory_make ("avimux", "avimux");
tee = gst_element_factory_make("tee", "tee");
sink = gst_element_factory_make ("filesink", "sink");
queue_rcr = gst_element_factory_make ("queue", "record_queue");
queue_app = gst_element_factory_make ("queue", "app_queue");
appsink = gst_element_factory_make("appsink", "appsink");
g_object_set (sink, "location", path.toStdString().c_str(), NULL);
pipeline = gst_pipeline_new ("pipeline_src");
if (!pipeline || !source || !muxer || !sink || !queue_rcr || !appsink) {
g_printerr ("Not all elements could be created.\n");
return;
}
caps = gst_caps_new_simple ("image/jpeg",
"width", G_TYPE_INT, 1920,
"height", G_TYPE_INT, 1080,
"io-mode", G_TYPE_INT, 4,
"framerate", GST_TYPE_FRACTION, 30, 1,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1,1,
"interlace-mode", G_TYPE_STRING, "progresive",
NULL);
gst_bin_add_many (GST_BIN (pipeline), source, muxer,tee, sink,queue_rcr, appsink, queue_app, NULL);
if (gst_element_link_filtered(source, tee, caps) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_rcr, muxer, sink, NULL) != TRUE) {
//failhandling
}
if (gst_element_link_many(tee, queue_app, appsink, NULL) != TRUE) {
//failhandling
}
gst_app_sink_set_emit_signals(GST_APP_SINK(appsink), true);
g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample_jpeg));
ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
//failhandling
}
// Start playing
recording = true;
return;
}
I'm trying to use the element audiofirfilter in a gstreamer pipeline. For now without luck.
I searched the docs and the mailinglist for examples, but there is only one that, unfortunately, I can't compile due to some missing pieces (I'm on an embedded system).
My pipeline is
if (data.pipeline == NULL) {
data.pipeline = gst_pipeline_new ("pipeline");
data.fakesrc = gst_element_factory_make("fakesrc", NULL);
data.capsfilter = gst_element_factory_make("capsfilter", NULL);
data.audioconvert = gst_element_factory_make("audioconvert", NULL);
data.audiofirfilter = gst_element_factory_make("audiofirfilter", NULL);
data.alsasink = gst_element_factory_make("alsasink", NULL);
gst_bin_add_many (GST_BIN (data.pipeline), data.fakesrc, data.capsfilter, data.audioconvert, data.audiofirfilter, data.alsasink, NULL);
if (!gst_element_link_many (data.fakesrc, data.capsfilter, data.audioconvert, data.audiofirfilter, data.alsasink, NULL) ) {
qDebug() << "Error: not all elements could be linked!";
return;
}
GstCaps* caps = gst_caps_new_simple("audio/x-raw",
"format", G_TYPE_STRING, "S16LE",
"rate", G_TYPE_INT, SAMPLING_FREQUENCY,
"channels", G_TYPE_INT,2,
"layout", G_TYPE_STRING, "interleaved",
NULL);
g_object_set(G_OBJECT(data.capsfilter), "caps", caps, NULL);
g_object_set (G_OBJECT (data.fakesrc),
"sync", TRUE,
"signal-handoffs", TRUE,
"sizemax",BUFFER_SIZE,
"sizetype",2,NULL);
gdouble filter_kernel[16] = {1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
GValueArray *va;
va = g_value_array_new (1);
GValue v = { 0, };
g_value_init (&v, G_TYPE_DOUBLE);
for (int i = 0; i < 16; i++) {
g_value_set_double (&v, filter_kernel[i]);
g_value_array_append (va, &v);
g_value_reset (&v);
}
g_object_set (G_OBJECT (data.audiofirfilter), "kernel", va, NULL);
g_object_set (G_OBJECT (data.audiofirfilter), "latency", (gint64) (16 / 2), NULL);
g_value_array_free (va);
g_signal_connect (data.fakesrc, "handoff", G_CALLBACK (SourceHandoffCallback), /*&data*/ this);
GstBus *bus;
bus = gst_pipeline_get_bus (GST_PIPELINE(data.pipeline));
gst_object_unref (bus);
}
I only want to implement an unitary impulse response for now.
The pipeline won't play at all. It is a stereo pipeline.
Does anyone has a working example of an application of audiofirfilter that doesn't involve fft? the inverse-fft?
Thanks
I tried a simpler pipeline, and this works:
gst_init (NULL,NULL);
cFusionDrumsPlayerData data;
data.pipeline = gst_pipeline_new ("pipeline");
data.src = gst_element_factory_make("audiotestsrc", NULL);
data.audiofirfilter = gst_element_factory_make("audiofirfilter", NULL);
data.sink = gst_element_factory_make("autoaudiosink", NULL);
gst_bin_add_many (GST_BIN (data.pipeline), data.src, data.audiofirfilter, data.sink, NULL);
if (!gst_element_link_many (data.src, data.audiofirfilter, data.sink, NULL) ) {
return;
}
gdouble filter_kernel[16] = {0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
GValueArray *va;
va = g_value_array_new (1);
GValue v = { 0, };
g_value_init (&v, G_TYPE_DOUBLE);
for (int i = 0; i < 16; i++) {
g_value_set_double (&v, filter_kernel[i]);
g_value_array_append (va, &v);
g_value_reset (&v);
}
g_object_set (G_OBJECT (data.audiofirfilter), "kernel", va, NULL);
g_object_set (G_OBJECT (data.audiofirfilter), "latency", (gint64) (16 / 2), NULL);
g_value_array_free (va);
gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
I think that the problem can be the source (fakesrc)
I've got running already a working gstreamer pipeline in an embedded C linux application. The pipeline looks like this:
appsrc-> queue - > h264encode -> queue -> h264parse -> mp4mux -> filesink
The source is a video memory buffer which is pushed into a appscr element using the "need-data" standard method.
The code is similar to the gstreamer examples and looks like this:
static void
cb_need_data (GstElement *appsrc,
guint unused_size,
gpointer user_data)
{
static gboolean white = FALSE;
static GstClockTime timestamp = 0;
GstBuffer *buffer;
guint size;
GstFlowReturn ret;
size = 1024 * 768 * 2;
buffer = gst_buffer_new_and_alloc (size);
/* this makes the image black/white */
memset (buffer->data, white ? 0x55 : 0xaa, size);
white = !white;
GST_BUFFER_TIMESTAMP (buffer) = timestamp;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 2);
timestamp += GST_BUFFER_DURATION (buffer);
g_print("push-buffer\n");
//g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
ret = gst_app_src_push_buffer(GST_APP_SRC(appsrc), buffer);
if (ret != GST_FLOW_OK) {
/* something wrong, stop pushing */
g_print("ret fail\n");
g_main_loop_quit (loop);
}
}
gint
main (gint argc,
gchar *argv[])
{
GstElement *pipeline, *appsrc;
/* init GStreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* setup pipeline */
pipeline = gst_parse_launch("appsrc name=mysource ! fakesink silent=0", NULL);
appsrc = gst_bin_get_by_name_recurse_up (GST_BIN (element), "source");
GstCaps *caps = gst_video_format_new_caps(GST_VIDEO_FORMAT_UYVY, 1024, 768, 0, 1, 4, 3);
gst_app_src_set_caps(GST_APP_SRC(appsrc), caps);
/* setup appsrc */
g_object_set (G_OBJECT (appsrc),
"stream-type", 0,
"format", GST_FORMAT_TIME, NULL);
g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data), NULL);
/* play */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print("PLAY\n");
g_main_loop_run (loop);
/* clean up */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (GST_OBJECT (pipeline));
g_main_loop_unref (loop);
return 0;
}
Now I need to run multiple identical pipelines in parallel. Since the pipeline elements will be identical, I'm planning to create a data structure containing all necessary gstreamer elements, like that I should be able to create different instances of the same pipeline type:
appsrc1-> ...... -> filesink1
appsrc2-> ...... -> filesink2
appsrc3-> ...... -> filesink3
.....
The question:
The problem is that I don't know how to instantiate the cb_need_data callback function for each the different appsrc elements, would it be possible to instantiate this function? Or do I need to create a copy for each pipeline? (cb_need_data1, cb_need_data2,...etc )
I want to play two different local video files at the same time in a single
window. The code below without demux and decoder works fine.
static void play_video(){
GMainLoop *loop;
GstElement *pipeline,*videomixer;
GstElement *src,*sink,*filter,*csp,*videobox;
GstElement *src1,*filter1,*csp1,*videobox1;
GstElement *srcb,*filterb,*cspb,*videoboxb;
GstCaps *filtercaps,*filtercaps1,*filtercapsb;
GstPad *pad,*pad1;
const char pattern = "snow";
loop = g_main_loop_new(NULL, FALSE);
pipeline = gst_pipeline_new("my-pipeline");
src = gst_element_factory_make ("videotestsrc","src");
src1 = gst_element_factory_make ("videotestsrc","src1");
g_object_set (G_OBJECT (src), "pattern", 10, NULL);
filter = gst_element_factory_make("capsfilter","filter");
filter1 = gst_element_factory_make("capsfilter","filter1");
csp = gst_element_factory_make("ffmpegcolorspace","csp");
csp1 = gst_element_factory_make("ffmpegcolorspace","csp1");
/**/
videobox=gst_element_factory_make("videobox","videobox");
g_object_set(videobox,"top",0,"bottom",0,"left",0,"right",0,NULL);
videobox1=gst_element_factory_make("videobox","videobox1");
g_object_set(videobox1,"top",-20,"bottom",0,"left",0,"right",0,NULL);
videomixer=gst_element_factory_make("videomixer","videomixer");
/**/
sink = gst_element_factory_make("xvimagesink","sink");
if(sink == NULL)
sink = gst_element_factory_make("ximagesink","sink");
if(sink == NULL)
g_error("'ximagesink' yaratılamadı.");
gst_bin_add_many(GST_BIN(pipeline),src,filter,videobox,videomixer,csp,sink,
src1,filter1,videobox1,csp1,NULL);
gst_element_link_many(src,filter,csp,videobox,videomixer,NULL);
gst_element_link_many(src1,filter1,csp1,videobox1,videomixer,NULL);
/*
videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=1/1, width=350,
height=250 ! \
textoverlay font-desc="Sans 24" text="CAM2" valign=top halign=left
shaded-background=true ! \
videobox border-alpha=0 top=-200 left=-450 ! mix. \
*/
gst_element_link_many(videomixer,sink,NULL);
filtercaps = gst_caps_new_simple ("video/x-raw-rgb",
"width", G_TYPE_INT, 1024,
"height", G_TYPE_INT, 768,
"framerate", GST_TYPE_FRACTION, 25, 1,
"bpp", G_TYPE_INT, 16,
"depth", G_TYPE_INT, 16,
"endianness", G_TYPE_INT, G_BYTE_ORDER,
NULL);
filtercaps1 = gst_caps_new_simple ("video/x-raw-rgb",
"width", G_TYPE_INT, 200,
"height", G_TYPE_INT, 500,
"framerate", GST_TYPE_FRACTION, 25, 1,
"bpp", G_TYPE_INT, 16,
"depth", G_TYPE_INT, 16,
"endianness", G_TYPE_INT, G_BYTE_ORDER,
NULL);
g_object_set (G_OBJECT (filter), "caps", filtercaps, NULL);
gst_caps_unref (filtercaps);
g_object_set (G_OBJECT (filter1), "caps", filtercaps1, NULL);
gst_caps_unref (filtercaps1);
/*pad = gst_element_get_pad (src, "src");
pad1 = gst_element_get_pad (src1, "src1");
//gst_pad_add_buffer_probe (pad, G_CALLBACK (cb_have_data), NULL);
//gst_pad_add_buffer_probe (pad1, G_CALLBACK (cb_have_data), NULL);
//gst_object_unref (pad);
//gst_object_unref (pad1);*/
/* run */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* wait until it's up and running or failed */
if (gst_element_get_state (pipeline, NULL, NULL, -1) ==
GST_STATE_CHANGE_FAILURE) {
g_error ("Failed to go into PLAYING state");
}
g_print ("Running ...\n");
g_main_loop_run (loop);
/* exit */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
}
Problem is when I replace the videotestsrc with filesrc it fails and the
only error message I get is "Could not lookup object NULL on signal destroy
of object window". I'm not an gstreamer expert and my opinion is I am
failing at setting demux and decoder.
static void play_video5(){
GMainLoop *loop;
GstElement *pipeline,*videomixer;
GstElement *src,*sink,*filter,*csp,*videobox;
GstElement *src1,*filter1,*csp1,*videobox1;
GstElement *srcb,*filterb,*cspb,*videoboxb;
GstCaps *filtercaps,*filtercaps1,*filtercapsb;
GstPad *pad,*pad1;
GstElement *demux,*decoder;
const char pattern = "snow";
loop = g_main_loop_new(NULL, FALSE);
pipeline = gst_pipeline_new("my-pipeline");
//Source
src = gst_element_factory_make ("videotestsrc","src");
src1 = gst_element_factory_make ("filesrc","src1");
g_object_set (G_OBJECT (src1), "location", "file:///root/yu.mp4", NULL);
//Demux
demux = gst_element_factory_make ("mpegdemux", "demux");
//Decoder
decoder = gst_element_factory_make ("decodebin", "decoder");
// decoder = gst_element_factory_make ("ffdec_mpeg4","mpeg4-decoder");
//Filter
filter = gst_element_factory_make("capsfilter","filter");
filter1 = gst_element_factory_make("capsfilter","filter1");
//Colorspace
csp = gst_element_factory_make("ffmpegcolorspace","csp");
csp1 = gst_element_factory_make("ffmpegcolorspace","csp1");
//Videobox
videobox=gst_element_factory_make("videobox","videobox");
g_object_set(videobox,"top",0,"bottom",0,"left",0,"right",0,NULL);
videobox1=gst_element_factory_make("videobox","videobox1");
g_object_set(videobox1,"top",-20,"bottom",0,"left",0,"right",0,NULL);
//videomixer
videomixer=gst_element_factory_make("videomixer","videomixer");
//Sink
sink = gst_element_factory_make("xvimagesink","sink");
if(sink == NULL)
sink = gst_element_factory_make("ximagesink","sink");
if(sink == NULL)
g_error("'ximagesink' yaratılamadı.");
//Add to Bin
gst_bin_add_many(GST_BIN(pipeline),src,filter,videobox,videomixer,csp,
src1,decoder,filter1,videobox1,csp1,sink,NULL);
//Link Elements
gst_element_link(src,filter);
gst_element_link(filter,csp);
gst_element_link(csp,videobox);
gst_element_link(videobox, videomixer);
gst_element_link(src1,decoder);
gst_element_link(decoder,filter1);
// gst_element_link(decoder,csp1);
gst_element_link(filter1,csp1);
gst_element_link(csp1,videobox1);
gst_element_link(videobox1, videomixer);
gst_element_link(videomixer,sink);
//Cap definition
filtercaps = gst_caps_new_simple ("video/x-raw-rgb",
"width", G_TYPE_INT, 1024,
"height", G_TYPE_INT, 768,
"framerate", GST_TYPE_FRACTION, 25, 1,
"bpp", G_TYPE_INT, 16,
"depth", G_TYPE_INT, 16,
"endianness", G_TYPE_INT, G_BYTE_ORDER,
NULL);
filtercaps1 = gst_caps_new_simple ("video/x-raw-yuv",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 25, 1,
/*"bpp", G_TYPE_INT, 16,
"depth", G_TYPE_INT, 16,
"endianness", G_TYPE_INT, G_BYTE_ORDER,*/
NULL);
//Cap to Filter
g_object_set (G_OBJECT (filter), "caps", filtercaps, NULL);
gst_caps_unref (filtercaps);
g_object_set (G_OBJECT (filter1), "caps", filtercaps1, NULL);
gst_caps_unref (filtercaps1);
/* run */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* wait until it's up and running or failed */
if (gst_element_get_state (pipeline, NULL, NULL, -1) ==
GST_STATE_CHANGE_FAILURE) {
g_error ("Failed to go into PLAYING state");
}
g_print ("Running ...\n");
g_main_loop_run (loop);
/* exit */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
}
Any ideas or corrections are welcome.
Several issues:
filesrc does not take uris, but file-paths
/* wait until it's up and running or failed */ + the code below is not needed, better listen on the bus for the error and warning messages
"Could not lookup object NULL on signal destroy of object window" has nothing to do with gstreamer
the whole videobox business is not needed as the pads of videomixer have xpos, ypos and zorder properties