I've been trying to setup a simple sendonly WebRTC client with GStreamer but I'm having issues with getting the actual video to display on the WebRTC receiver side. I am new to both GStreamer and WebRTC.
I'm using the examples from https://gitlab.freedesktop.org/gstreamer/gst-examples/-/tree/master/webrtc to try and come up with a combination of certain parts. I've had 1:1 communication working but I wanted to introduce the rooms so I can have more clients viewing the "view-only" stream from GStreamer.
My current code is based on the multiparty-sendrecv example where I swapped out the audio for video. Furthermore, I'm using a modified version of the signalling server and a modified version of the javascript webrtc client. If necessary I could provide code for all of the above, but to keep things simple I won't. This is because I don't think the problem lies in either the signalling server or webrtc client, because the ICE candidates have been successfully negotiated along with the SDP offer & answer according to chrome://webrtc-internals/. See the image below.
In order to figure out what's going on I've exported a graph that shows the GStreamer pipeline after a user has joined the room and was added to the pipeline. See graph below.
As far as I can tell I should be receiving video data on my frontend, but I'm not. I've had a single weird case where the videotestsrc did show up, but I haven't been able to reproduce it. But because of this, it makes me think that the pipeline itself isn't neccesarily wrong, but perhaps we're dealing with some kind of race condition.
I've added the modified example of multiparty-sendrecv below, please take a look at it. Most of the methods have purposely been left out due to Stackoverflow's character limit.
Main functions
static void
handle_media_stream(GstPad* pad, GstElement* pipe, const char* convert_name,
const char* sink_name)
{
GstPad* qpad;
GstElement* q, * conv, * sink;
GstPadLinkReturn ret;
q = gst_element_factory_make("queue", NULL);
g_assert_nonnull(q);
conv = gst_element_factory_make(convert_name, NULL);
g_assert_nonnull(conv);
sink = gst_element_factory_make(sink_name, NULL);
g_assert_nonnull(sink);
gst_bin_add_many(GST_BIN(pipe), q, conv, sink, NULL);
gst_element_sync_state_with_parent(q);
gst_element_sync_state_with_parent(conv);
gst_element_sync_state_with_parent(sink);
gst_element_link_many(q, conv, sink, NULL);
qpad = gst_element_get_static_pad(q, "sink");
ret = gst_pad_link(pad, qpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
}
static void
on_incoming_decodebin_stream(GstElement* decodebin, GstPad* pad,
GstElement* pipe)
{
GstCaps* caps;
const gchar* name;
if (!gst_pad_has_current_caps(pad)) {
g_printerr("Pad '%s' has no caps, can't do anything, ignoring\n",
GST_PAD_NAME(pad));
return;
}
caps = gst_pad_get_current_caps(pad);
name = gst_structure_get_name(gst_caps_get_structure(caps, 0));
if (g_str_has_prefix(name, "video")) {
handle_media_stream(pad, pipe, "videoconvert", "autovideosink");
}
else if (g_str_has_prefix(name, "audio")) {
handle_media_stream(pad, pipe, "audioconvert", "autoaudiosink");
}
else {
g_printerr("Unknown pad %s, ignoring", GST_PAD_NAME(pad));
}
}
static void
on_incoming_stream(GstElement* webrtc, GstPad* pad, GstElement* pipe)
{
GstElement* decodebin;
GstPad* sinkpad;
if (GST_PAD_DIRECTION(pad) != GST_PAD_SRC)
return;
decodebin = gst_element_factory_make("decodebin", NULL);
g_signal_connect(decodebin, "pad-added",
G_CALLBACK(on_incoming_decodebin_stream), pipe);
gst_bin_add(GST_BIN(pipe), decodebin);
gst_element_sync_state_with_parent(decodebin);
sinkpad = gst_element_get_static_pad(decodebin, "sink");
gst_pad_link(pad, sinkpad);
gst_object_unref(sinkpad);
}
static void
add_peer_to_pipeline(const gchar* peer_id, gboolean offer)
{
int ret;
gchar* tmp;
GstElement* tee, * webrtc, * q;
GstPad* srcpad, * sinkpad;
tmp = g_strdup_printf("queue-%s", peer_id);
q = gst_element_factory_make("queue", tmp);
g_free(tmp);
webrtc = gst_element_factory_make("webrtcbin", peer_id);
g_object_set(webrtc, "bundle-policy", GST_WEBRTC_BUNDLE_POLICY_MAX_BUNDLE, NULL);
gst_bin_add_many(GST_BIN(pipeline), q, webrtc, NULL);
srcpad = gst_element_get_static_pad(q, "src");
g_assert_nonnull(srcpad);
sinkpad = gst_element_get_request_pad(webrtc, "sink_%u");
g_assert_nonnull(sinkpad);
ret = gst_pad_link(srcpad, sinkpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
tee = gst_bin_get_by_name(GST_BIN(pipeline), "videotee");
g_assert_nonnull(tee);
srcpad = gst_element_get_request_pad(tee, "src_%u");
g_assert_nonnull(srcpad);
gst_object_unref(tee);
sinkpad = gst_element_get_static_pad(q, "sink");
g_assert_nonnull(sinkpad);
ret = gst_pad_link(srcpad, sinkpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
/* This is the gstwebrtc entry point where we create the offer and so on. It
* will be called when the pipeline goes to PLAYING.
* XXX: We must connect this after webrtcbin has been linked to a source via
* get_request_pad() and before we go from NULL->READY otherwise webrtcbin
* will create an SDP offer with no media lines in it. */
if (offer)
g_signal_connect(webrtc, "on-negotiation-needed",
G_CALLBACK(on_negotiation_needed), (gpointer)peer_id);
/* We need to transmit this ICE candidate to the browser via the websockets
* signalling server. Incoming ice candidates from the browser need to be
* added by us too, see on_server_message() */
g_signal_connect(webrtc, "on-ice-candidate",
G_CALLBACK(send_ice_candidate_message), (gpointer)peer_id);
/* Incoming streams will be exposed via this signal */
g_signal_connect(webrtc, "pad-added", G_CALLBACK(on_incoming_stream),
pipeline);
/* Set to pipeline branch to PLAYING */
ret = gst_element_sync_state_with_parent(q);
g_assert_true(ret);
ret = gst_element_sync_state_with_parent(webrtc);
g_assert_true(ret);
GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
}
static gboolean
start_pipeline(void)
{
GstStateChangeReturn ret;
GError* error = NULL;
/* NOTE: webrtcbin currently does not support dynamic addition/removal of
* streams, so we use a separate webrtcbin for each peer, but all of them are
* inside the same pipeline. We start by connecting it to a fakesink so that
* we can preroll early. */
/*pipeline = gst_parse_launch("tee name=videotee ! queue ! fakesink "
"videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! "
"queue ! " RTP_CAPS_VP8 "96 ! videotee. ", &error);*/
pipeline = gst_parse_launch("tee name=videotee ! queue ! fakesink "
"videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! "
"queue ! " RTP_CAPS_VP8 "96 ! videotee. ", &error);
if (error) {
g_printerr("Failed to parse launch: %s\n", error->message);
g_error_free(error);
goto err;
}
g_print("Starting pipeline, not transmitting yet\n");
ret = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE)
goto err;
return TRUE;
err:
g_print("State change failure\n");
if (pipeline)
g_clear_object(&pipeline);
return FALSE;
}
/*
* When we join a room, we are responsible for calling by starting negotiation
* with each peer in it by sending an SDP offer and ICE candidates.
*/
static void
do_join_room(const gchar* text)
{
gint ii, len;
gchar** peer_ids;
if (app_state != ROOM_JOINING) {
cleanup_and_quit_loop("ERROR: Received ROOM_OK when not calling",
ROOM_JOIN_ERROR);
return;
}
app_state = ROOM_JOINED;
g_print("Room joined\n");
/* Start recording, but not transmitting */
if (!start_pipeline()) {
cleanup_and_quit_loop("ERROR: Failed to start pipeline", ROOM_CALL_ERROR);
return;
}
peer_ids = g_strsplit(text, " ", -1);
g_assert_cmpstr(peer_ids[0], == , "ROOM_OK");
len = g_strv_length(peer_ids);
/* There are peers in the room already. We need to start negotiation
* (exchange SDP and ICE candidates) and transmission of media. */
if (len > 1 && strlen(peer_ids[1]) > 0) {
g_print("Found %i peers already in room\n", len - 1);
app_state = ROOM_CALL_OFFERING;
for (ii = 1; ii < len; ii++) {
gchar* peer_id = g_strdup(peer_ids[ii]);
g_print("Negotiating with peer %s\n", peer_id);
/* This might fail asynchronously */
call_peer(peer_id);
peers = g_list_prepend(peers, peer_id);
}
}
g_strfreev(peer_ids);
return;
}
int
main(int argc, char* argv[])
{
GOptionContext* context;
GError* error = NULL;
context = g_option_context_new("- gstreamer webrtc sendrecv demo");
g_option_context_add_main_entries(context, entries, NULL);
g_option_context_add_group(context, gst_init_get_option_group());
if (!g_option_context_parse(context, &argc, &argv, &error)) {
g_printerr("Error initializing: %s\n", error->message);
return -1;
}
if (!check_plugins())
return -1;
if (!room_id) {
g_printerr("--room-id is a required argument\n");
return -1;
}
if (!local_id)
local_id = g_strdup_printf("%s-%i", g_get_user_name(),
g_random_int_range(10, 10000));
/* Sanitize by removing whitespace, modifies string in-place */
g_strdelimit(local_id, " \t\n\r", '-');
g_print("Our local id is %s\n", local_id);
if (!server_url)
server_url = g_strdup(default_server_url);
/* Don't use strict ssl when running a localhost server, because
* it's probably a test server with a self-signed certificate */
{
GstUri* uri = gst_uri_from_string(server_url);
if (g_strcmp0("localhost", gst_uri_get_host(uri)) == 0 ||
g_strcmp0("127.0.0.1", gst_uri_get_host(uri)) == 0)
strict_ssl = FALSE;
gst_uri_unref(uri);
}
loop = g_main_loop_new(NULL, FALSE);
connect_to_websocket_server_async();
g_main_loop_run(loop);
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_NULL);
g_print("Pipeline stopped\n");
gst_object_unref(pipeline);
g_free(server_url);
g_free(local_id);
g_free(room_id);
return 0;
}
Related
My Gstreamer version is 1.17, cross compiled using instructions from here.
Here is my gstreamer pipeline,
appsrc name=framesrc0 do-timestamp=true format=time ! video/x-raw,width=640,height=480,framerate=30/1,format=NV12 ! queue ! x264enc ! queue ! h264parse ! mpegtsmux ! filesink name=mysink location=./myfile.ts
I feed NV12 frames to appsrc using the below function (6404801.5 = 460800 bytes)
bool BelGst::FeedData0(uint8_t *buf, uint32_t len)
{
GstFlowReturn ret;
GstBuffer *buffer;
GstMapInfo info;
timespec ts_beg, ts_end;
uint32_t time_ms;
clock_gettime(CLOCK_MONOTONIC, &ts_beg);
ret = gst_buffer_pool_acquire_buffer (pool0, &buffer, NULL);
if (G_UNLIKELY (ret != GST_FLOW_OK)) {
cout << "BufferPool pool0 failed" << endl;
return FALSE;
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_ms = (ts_end.tv_sec - ts_beg.tv_sec)*1000 + (ts_end.tv_nsec - ts_beg.tv_nsec) / 1e6;
cout << "Buffer pool acquire time = " << time_ms << "ms" << endl;
/* Set its timestamp and duration */
GST_BUFFER_TIMESTAMP(buffer) = timestamp0;
GST_BUFFER_DURATION(buffer) = gst_util_uint64_scale(1, GST_SECOND, 30);
GST_BUFFER_OFFSET(buffer) = offset0++;
timestamp0 += GST_BUFFER_DURATION(buffer);
gst_buffer_map(buffer, &info, GST_MAP_WRITE);
memcpy(info.data, buf, len);
gst_buffer_unmap(buffer, &info);
g_signal_emit_by_name(app_source0, "push-buffer", buffer, &ret);
gst_buffer_unref(buffer);
return TRUE;
}
I've setup the buffer pool as shown below,
void BufferPoolSetup(GstBufferPool *&pool)
{
GstStructure *config;
int size, min, max;
GstCaps *caps;
pool = gst_buffer_pool_new();
/* get config structure */
config = gst_buffer_pool_get_config(pool);
size = 640*480*1.5;
min = 1;
max = 4;
caps = gst_caps_from_string("video/x-raw");
/* set caps, size, minimum and maximum buffers in the pool */
gst_buffer_pool_config_set_params (config, caps, size, min, max);
gst_caps_unref(caps);
gst_buffer_pool_set_config (pool, config);
/* and activate */
gst_buffer_pool_set_active (pool, TRUE);
return;
}
When I run the pipeline, I see that the function gst_buffer_pool_acquire_buffer is taking somewhere between 20ms to 60ms. Could someone point if there is something wrong in my approach? Am I missing something?
bool demuxDone = false;
gboolean
autopluggerCallback (GstElement * elem, GstPad *pad, GstCaps * caps)
{
if (cmpType(caps, "video/x-h264")) {
relayVideoPad = pad;
demuxDone = true;
}
if (cmpType(caps, "audio/x-ac3")) {
relayAudioPad = pad;
demuxDone = true;
}
if (demuxDone) {
return FALSE;
}
return TRUE;
}
I connected the autoplug-continue signal handler to uridecodebin. My goal is to prevent it from creating anything after the tsdemux and then connect video/audio to flvmux.
But the problem I am having is that one more element is still created, the multiqueue that is connected right after the tsdemux0. Why? I tried to detect the creation of a demuxer by catching the element-added signal instead of waiting for video/x-h264, but the result is the same.
The resulting pipeline is dumped to dot:
http://pastebin.com/acBUdfpi
Well, I can probably just connect multiqueue to the flvmux, but then I do not know how to get the multiqueue pointer. I tried gst_pad_get_peer->gst_get_pad_parent_element, (to go from demuxer src-video-pad to the next element), but gst_get_pad_parent_element returns 0 even though the peer is non 0.
I need to mux klv metadata into the h264 stream. I have created application. But the stream is playing only as long as klv-data is being inserted. When i stop pushing klv-data the whole stream stops. What is the right method to mux asynchronous klv data by mpegtsmux?
Klv-data need to be inserted into the following working pipeline:
v4l2src input-src=Camera ! videorate drop-only=true ! 'video/x-raw, format=(string)NV12, width=1920, height=1088, framerate=25/1' ! ce_h264enc target-bitrate=6000000 idrinterval=25 intraframe-interval=60 ! queue ! mpegtsmux alignment=7 ! udpsink host=192.168.0.1 port=3000 -v
This pipeline is collected in the application. To insert klv-metedata appsrc is created:
appSrc = gst_element_factory_make("appsrc", nullptr);
gst_app_src_set_caps (GST_APP_SRC (appSrc), gst_caps_new_simple("meta/x-klv", "parsed", G_TYPE_BOOLEAN, TRUE, "sparse", G_TYPE_BOOLEAN, TRUE, nullptr));
g_object_set(appSrc, "format", GST_FORMAT_TIME, nullptr);
Then appsrc is linked to the pipeline:
gst_bin_add(GST_BIN(pipeline), appSrc);
gst_element_link(appSrc, mpegtsmux);
Here is push function:
void AppSrc::pushData(const std::string &data)
{
GstBuffer *buffer = gst_buffer_new_allocate(nullptr, data.size(), nullptr);
GstMapInfo map;
GstClock *clock;
GstClockTime abs_time, base_time;
gst_buffer_map (buffer, &map, GST_MAP_WRITE);
memcpy(map.data, data.data(), data.size());
gst_buffer_unmap (buffer, &map);
GST_OBJECT_LOCK (element);
clock = GST_ELEMENT_CLOCK (element);
base_time = GST_ELEMENT (element)->base_time;
gst_object_ref (clock);
GST_OBJECT_UNLOCK (element);
abs_time = gst_clock_get_time (clock);
gst_object_unref (clock);
GST_BUFFER_PTS (buffer) = abs_time - base_time;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 1);
gst_app_src_push_buffer(GST_APP_SRC(element), buffer);
}
Gstreamer version is 1.6.1.
What can be wrong with my code? I'd appreciate your help.
I can push dummy klv-packets to maintain video stream. But i don't want to pollute upcomming stream and i am sure there should be more delicate solution.
I have found that i can send event with GST_STREAM_FLAG_SPARSE, which should be appropriate for subtitles. But as a result i have no output at all.
GstEvent* stream_start = gst_event_new_stream_start("klv-04");
gst_event_set_stream_flags(stream_start, GST_STREAM_FLAG_SPARSE);
GstPad* pad = gst_element_get_static_pad(GST_ELEMENT(element), "src");
gst_pad_push_event (pad, stream_start);
While debugging i have found that after applying the following patch to the gstreamer and using GST_STREAM_FLAG_SPARSE, the stream doesn't stop when the appsrc stops pushing packets.
diff --git a/libs/gst/base/gstcollectpads.c b/libs/gst/base/gstcollectpads.c
index 8edfe41..14f9926 100644
--- a/libs/gst/base/gstcollectpads.c
+++ b/libs/gst/base/gstcollectpads.c
## -1440,7 +1440,8 ## gst_collect_pads_recalculate_waiting (GstCollectPads * pads)
if (!GST_COLLECT_PADS_STATE_IS_SET (data, GST_COLLECT_PADS_STATE_WAITING)) {
/* start waiting */
gst_collect_pads_set_waiting (pads, data, TRUE);
- result = TRUE;
+ if (!GST_COLLECT_PADS_STATE_IS_SET (data, GST_COLLECT_PADS_STATE_LOCKED))
+ result = TRUE;
}
}
}
Anyway, the receiver stops updating screen 10 seconds after the last klv packet.
This is a bit of an old thread but,
In my experience though, if there is no queue between the appsrc and the muxer, you will get this behavior. I would change your:
gst_element_link(appSrc, mpegtsmux);
To this:
gst_element_link(appSrc, appSrcQueue);
gst_element_link(appSrcQueue, mpegtsmux);
And I'm not sure if the mpegtsmux has the capability for it or not but the muxer that we have used has a property called do-timestamping and when that was set to TRUE we had a better experience.
Another tip I would give is to use the gst-inspect tool to see what options each elements have.
I have written a code for appsrc to appsink and it works. I see the actual buffer. It's encoded in H264(vpuenc=avc). Now I want to save it in a file(filesink). How I approach it?
app:
int main(int argc, char *argv[]) {
gst_init (NULL, NULL);
GstElement *pipeline, *sink;
gchar *descr;
GError *error = NULL;
GstAppSink *appsink;
descr = g_strdup_printf (
"mfw_v4lsrc device=/dev/video1 capture_mode=0 ! " // grab from mipi camera
"ffmpegcolorspace ! vpuenc codec=avc ! "
"appsink name=sink"
);
pipeline = gst_parse_launch (descr, &error);
if (error != NULL) {
g_print ("could not construct pipeline: %s\n", error->message);
g_error_free (error);
exit (-1);
}
gst_element_set_state(pipeline, GST_STATE_PAUSED);
sink = gst_bin_get_by_name (GST_BIN (pipeline), "sink");
appsink = (GstAppSink *) sink;
gst_app_sink_set_max_buffers ( appsink, 2); // limit number of buffers queued
gst_app_sink_set_drop( appsink, true ); // drop old buffers in queue when full
gst_element_set_state (pipeline, GST_STATE_PLAYING);
int i = 0;
while( !gst_app_sink_is_eos(appsink) )
{
GstBuffer *buffer = gst_app_sink_pull_buffer(appsink);
uint8_t* data = (uint8_t*)GST_BUFFER_DATA(buffer);
uint32_t size = GST_BUFFER_SIZE(buffer);
gst_buffer_unref(buffer);
}
return 0; }
If as mentioned in the comments, what you actually want to know is how to do a network video stream in GStreamer, you should probably close this question because you're on the wrong path. You don't need to use an appsink or filesink for that. What you'll want to investigate are the GStreamer elements related to RTP, RTSP, RTMP, MPEGTS, or even MJPEGs (if your image size is small enough).
Here are two basic send/receive video stream pipelines:
gst-launch-0.10 v4l2src ! ffmpegcolorspace ! videoscale ! video/x-raw-yuv,width=640,height=480 ! vpuenc ! h264parse ! rtph264pay ! udpsink host=localhost port=5555
gst-launch-0.10 udpsrc port=5555 ! application/x-rtp,encoding-name=H264,payload=96 ! rtph264depay ! h264parse ! ffdec_h264 ! videoconvert ! ximagesink
In this situation you don't write your own while loop. You register callbacks and wait for buffers (GStreamer 0.10) to arrive. If you're using GStreamer 1.0, you use samples instead of buffers. Samples are a huge pain in the ass compared to buffers but oh well.
Register the callback:
GstAppSinkCallbacks* appsink_callbacks = (GstAppSinkCallbacks*)malloc(sizeof(GstAppSinkCallbacks));
appsink_callbacks->eos = NULL;
appsink_callbacks->new_preroll = NULL;
appsink_callbacks->new_sample = app_sink_new_sample;
gst_app_sink_set_callbacks(GST_APP_SINK(appsink), appsink_callbacks, (gpointer)pointer_to_data_passed_to_the_callback, free);
And your callback:
GstFlowReturn app_sink_new_sample(GstAppSink *sink, gpointer user_data) {
prog_data* pd = (prog_data*)user_data;
GstSample* sample = gst_app_sink_pull_sample(sink);
if(sample == NULL) {
return GST_FLOW_ERROR;
}
GstBuffer* buffer = gst_sample_get_buffer(src);
GstMemory* memory = gst_buffer_get_all_memory(buffer);
GstMapInfo map_info;
if(! gst_memory_map(memory, &map_info, GST_MAP_READ)) {
gst_memory_unref(memory);
gst_sample_unref(sample);
return GST_FLOW_ERROR;
}
//render using map_info.data
gst_memory_unmap(memory, &map_info);
gst_memory_unref(memory);
gst_sample_unref(sample);
return GST_FLOW_OK;
}
You can keep your while loop as it is--using gst_app_sink_is_eos()--but make sure to put a sleep in it. Most of the time I use something like the following instead:
GMainLoop* loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop);
g_main_loop_unref(loop);
Note: Unless you need to do something special with the data you can use the "filesink" element directly.
Simpler option would be write to the file directly in the appsink itself ie when you get a callback when the buffer is done write to the file and make sure you close it on eos.
Hope that helps.
I have written gstreamer code for camera live-streaming and recording at the same time.
My pipeline looks like that:
/ [ queue1 | videosink ]
v4l2src | tee
\ [ queue2 | filesink ]
Currently both live streaming and file recording is working together.
Now I need to start the pipeline with only one queue i.e. queue1 (live streaming queue), After a while I need to add the recording queue and remove it dynamically too.
My working code are given below :
pipeline = gst_pipeline_new ("rv_camera");
/*Create source element. We use mfw_v4lsrc from Freescale as source */
source= gst_element_factory_make (GSTREAMER_SOURCE,"camera-source");
g_object_set(G_OBJECT(source),"device",camDeviceName, (char *)0);
/*Set default properties of mfw_v4lsrc */
g_object_set(G_OBJECT(source),"capture-width", CAMERA_CAPTURE_WIDTH,
"capture-height", CAMERA_CAPTURE_HEIGHT,
"sensor-width", CAMERA_SENSOR_WIDTH,
"sensor-height", CAMERA_SENSOR_HEIGHT,
"preview", CAMERA_PREVIEW_DISPLAY,
"preview-width",CAMERA_PREVIEW_WIDTH,
"preview-height",CAMERA_PREVIEW_HEIGHT,
"fps-n",CAMERA_FRAMERATE,
"rotate",mirror_effect,
(char *)0);
/* Tee that copies the stream to multiple outputs */
tee = gst_element_factory_make("tee", "tee");
/* Queue creates new thread for the stream */
screen_queue = gst_element_factory_make("queue", "screen_queue");
/*Create sink element. We use mfw_v4lsink from Freescale as sink. fbdevsink is not used as
it directly writes into framebuffer which is not desired*/
sink= gst_element_factory_make (GSTREAMER_SINK,"video-output");
capture_queue = gst_element_factory_make("queue", "capture_queue");
encoder = gst_element_factory_make("mfw_vpuencoder", "encoder");
g_object_set(G_OBJECT(encoder),"codec-type",0,
"mirror-direction",0,
(char *)0);
clockoverlay = gst_element_factory_make("clockoverlay", "Timestamp");
g_object_set(G_OBJECT(clockoverlay),"time-format","%R %d-%b-%Y", (char *)0);
avimux = gst_element_factory_make("avimux", "avimux");
filesink = gst_element_factory_make("filesink", "file-output");
g_object_set(G_OBJECT(filesink),"location","/KPIT/OBITS/Blackbox/OBITS-SCNLog.avi", (char *)0);
/* Check if all elements are created or not*/
if (!pipeline || !source || !tee || !screen_queue || !sink || !capture_queue || !clockoverlay || !encoder || !avimux || !filesink) {
LOGERR((TEXT("GstreamerStream :: camInit: 1 One or more element(s) could not be created .... logerr\n")));
return CAM_STATUS_INIT_FAIL;
}
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),source,tee,screen_queue, sink, capture_queue,clockoverlay,encoder,avimux,filesink, (char *)0);
/* we link the elements together */
if( gst_element_link_many( source, tee, NULL ) && gst_element_link_many( tee,screen_queue,sink, NULL ) &&
gst_element_link_many( tee,capture_queue,clockoverlay,encoder,avimux,filesink, NULL ))
{
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
/*Add watch to look for error events */
gst_bus_add_watch(bus, process_events, this);
gst_object_unref(bus);
}
gst_element_set_state (pipeline, GST_STATE_PLAYING);
Kindly let me know the way, I can add or remove any queue dynamically.
I'd appreciate your help if someone can provide sample code related to this.
Keep the tee in the pipeline and you can request/release pads from the tee at any time during playback. Request the pad and add your new elements to the pipeline, link them, and set them to playing, too. When you are done, unlink this branch and remember to send EOS to it to have the recording properly finished. After you receive the EOS message from the filesink you can shutdown, remove and unref the branch you unlinked.
If you are using 0.10 (don't use it, move to 1.0), then you might need to send a segment event to the new branch once you add it.