place external live video frames from non supported V4L file into Gstreamer Qt , C++ Qthreads - c++

OS: Ubuntu 14.04
SDK: Qt
GStreamer: > 1.0
I am wondering how would I put continuously captured frames from a non supported V4L camera into GStreamer.
Actually my task is to grab frames from the camera and use only GStreamer to send them to different computer via UDP. But at the moment, I just want to display it on my machine.
What I did so far:
a) Implemented code in Qt for an IDS camera that captures frames and displays then on Qt as live streaming.
b) Separately, I have written ( or rather copied ) code that displays live streaming via gstreamer using a webcam that supports V4L file.
Now as I mentioned, I want to use gstreamer to display capture frames in Qt environment.
I have developed code in qt 5.5 which makes use of multithreading to run separate threads for gstreamer, capturing frames and GUI. The code has become quite long but I will try best to place minimum code here.
Issue: when I try to run the code and added debug message, I can see frames are continuously coming from another thread into main but gstreamer function start successfully and at the very first time I get debug message from cb_need_data` but nothing after data.
Source code is shown below.
streaming gstream class:
class StreamG : public QObject
{
Q_OBJECT
public:
explicit StreamG(QObject *parent = 0);
bool addLinkElements();
static void cb_need_data (GstElement *appsrc,
guint unused_size,
gpointer user_data);
static GMainLoop *loop;
static char* bufferFrame;
signals:
void sigFinish();
public slots:
void start();
void stop();
private:
GstElement *pipeline, *source, *sink, *convert;
GstBus *bus;
GstMessage *msg;
GstStateChangeReturn ret;
};
Streaming using gstreaming cpp file below
GMainLoop* StreamG::loop;
char* StreamG::bufferFrame = NULL; // this will take buffer frames from other function
void StreamG::cb_need_data (GstElement *appsrc,
guint unused_size,
gpointer user_data )
{
qDebug()<< " cb_need_data is called ...";
static GstClockTime timestamp = 0;
GstBuffer *buffer;
guint size;
GstFlowReturn ret;
guchar *data1;
GstMapInfo map;
data1 = (guchar *)bufferFrame;
size = 385*288*2;
if( data1 )
{
buffer = gst_buffer_new_allocate (NULL, size, NULL);
gst_buffer_map (buffer, &map, GST_MAP_WRITE);
memcpy( (guchar *)map.data, data1, gst_buffer_get_size( buffer ) );
GST_BUFFER_PTS (buffer) = timestamp;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 2);
timestamp += GST_BUFFER_DURATION (buffer);
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK)
{
// something wrong, stop pushing //
g_debug("push buffer returned %d for %d bytes \n", ret, size);
g_main_loop_quit (loop);
}
}
}
static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
qDebug() <<" end of msg in gstreamer";
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
qDebug() <<" end of msg in gstreamer";
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
StreamG::StreamG(QObject *parent) : QObject(parent)
{
// Initialize GStreamer /
gst_init( NULL, NULL );
loop = g_main_loop_new( NULL, FALSE );
// Create the elements
source = gst_element_factory_make ("appsrc", "source");
sink = gst_element_factory_make ("autovideosink", "sink");
convert =gst_element_factory_make("videoconvert","convert");
g_assert( convert );
pipeline = gst_pipeline_new ("test-pipeline");
/* g_object_set (G_OBJECT (source), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 360,
"framerate", GST_TYPE_FRACTION, 1, 1,
NULL), NULL);*/
g_object_set (G_OBJECT (source), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 360, NULL), NULL);
}
void StreamG::start()
{
addLinkElements();
gst_element_set_state (pipeline, GST_STATE_PLAYING);
// Iterate
g_print ("Running...Gstreamer\n");
g_main_loop_run (loop);
// Out of the main loop, clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
}
void StreamG::stop()
{
g_print ("Deleting pipeline\n");
g_main_loop_quit(loop);
gst_object_unref(GST_OBJECT(pipeline));
gst_object_unref (bus);
g_main_loop_unref (loop);
emit sigFinish();
}
bool StreamG::addLinkElements()
{
if (!pipeline || !source || !sink || !convert )
{
g_printerr ("Not all elements could be created.\n");
return false;
}
// g_object_set (G_OBJECT ( source ), "device", "/dev/video0", NULL);
gst_bin_add_many( GST_BIN (pipeline), source , sink, convert, NULL );
if (gst_element_link (convert, sink) != TRUE)
{
g_printerr ("Elements could not be linked confert sink.\n");
gst_object_unref (pipeline);
return false;
}
if (gst_element_link (source, convert) != TRUE)
{
g_printerr ("Elements could not be linked source -convert.\n");
gst_object_unref (pipeline);
return false;
}
g_print("Linked all the Elements together\n");
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
gst_bus_add_watch (bus, bus_call, loop);
g_object_set (G_OBJECT (source),
"stream-type", 0,
"format", GST_FORMAT_TIME, NULL);
g_signal_connect (source, "need-data", G_CALLBACK (cb_need_data), NULL);
return true;
}
Function in MainWidget .. I have places only imp member variables and functions
class UEYEMain : public QWidget
{
Q_OBJECT
public:
int openCamera( bool bStartLive );
INT _GetImageID (char* pbuf);//
bool _AllocImages(); //function for IDS camera
void onLive(); // function for IDS camera
void transferLastFrameToGstream();
private slots:
void eventreceived (int event); // this is slot which receives frames and copied into StreamingG static varibale
private:
Ui::UEYEMain *ui;
.......
.......
StreamG* StreamingG;
QElapsedTimer m_Time;
QRgb m_table[256];
int m_nUpdateTicks;
QThread* threadForStream;
char *m_pLastBuffer;
EventThread *m_pEvFrame; // Another thread to recive frames
void ProcessFrame(); // function on receiving frames
void DrawImage (char *pBuffer); // this draw image to Qt widget , I use it for testing purpose
};
void UEYEMain::eventreceived (int event)
{
bool bUpdateCameraList = false;
switch (event)
{... some other cases
case IS_SET_EVENT_FRAME:
qDebug() << " new frame received";
if (!m_hCamera)
{
break;
}
ProcessFrame ();
break;
default:
break;
}
}
void UEYEMain::transferLastFrameToGstream()
{
//memcpy( StreamingG->bufferFrame, m_pLastBuffer, sizeof(m_pLastBuffer) );
if(m_pLastBuffer ) // just pointing buffer to streamG variable
{
StreamingG->bufferFrame = m_pLastBuffer;
}
}
void UEYEMain::ProcessFrame ()
{
INT dummy = 0;
char *pLast = NULL, *pMem = NULL;
qDebug() << " counter for frame recv -->" << countFrameDebug;
countFrameDebug++;
is_GetActSeqBuf (m_hCamera, &dummy, &pMem, &pLast);
m_pLastBuffer = pLast;
if (m_bReady)
{
m_bReady = FALSE;
update();
if (m_pLastBuffer )
{
int nTicks = 0;
// Frame rate limit ?
if (m_nUpdateTicks > 0)
{
nTicks = m_Time.elapsed();
bDraw = (nTicks >= m_nUpdateTicks) ? true : false;
}
if (bDraw)
{
nDisplayed++;
m_Time.restart();
transferLastFrameToGstream();
//DrawImage(m_pLastBuffer); // this func succesffully stream video on Qt widget
}
}
}
}
void UEYEMain::onLive()
{
INT nRet = 1;
time_t start;
static char str[64];
if (!m_bLive)
{
m_bLive = TRUE;
m_bReady = TRUE;
is_CaptureVideo (m_hCamera, IS_DONT_WAIT);
threadForStream->start();
}
}
The above function onLive() is called from another thread workThreadFinished
connect(m_workThread, SIGNAL(finished()), this, SLOT(workThreadFinished()), Qt::QueuedConnection);
The following is the output I get and I dont see StreamG::cb_need_data has been called more than once.
no of camera detected : 1
started event 2 detection!// this thread acquire frames
started event 8 detection!
Linked all the Elements together // gst
Running...Gstreamer //gstreamer
cb_need_data is called ... // gstreamer
new frame received //
counter for frame recv --> 0
new frame received
counter for frame recv --> 1
new frame received
counter for frame recv --> 2
new frame received
........... and so on

Related

How to feed an Opencv frame into Nvidia Deepstream pipeline?

I am struggling to find a away to input a single cv::Mat frame into a Nvidia Deepstream Pipeline using c++. I tried the code below but I received the following Error message:
ERROR from element gstappsrc: Internal data stream error.
Error details: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:dst_opencv/GstAppSrc:source:
streaming stopped, reason not-negotiated (-4)
Returned, stopping playback
Deleting pipeline
If anyone have an idea how to do it or show me where I am doing wrong, I will be very thankful.
#include <gst/gst.h>
#include <glib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
#include <gst/app/gstappsrc.h>
/* The muxer output resolution must be set if the input streams will be of
* different resolution. The muxer will scale all the input frames to this
* resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080
#define TILED_OUTPUT_WIDTH 1920
#define TILED_OUTPUT_HEIGHT 1080
/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
* based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 4000000
/* NVIDIA Decoder source pad memory feature. This feature signifies that source
* pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"
// detection models
#define MODEL_CONFIG "dstest3_pgie_config.txt"
//#define MODEL_CONFIG "yoloV2_pgie_config.txt"
//#define MODEL_CONFIG "fd_lpd_config.txt"
#define FPS_PRINT_INTERVAL 300
static gboolean bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_WARNING:
{
gchar *debug;
GError *error;
gst_message_parse_warning (msg, &error, &debug);
g_printerr ("WARNING from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
g_free (debug);
g_printerr ("Warning: %s\n", error->message);
g_error_free (error);
break;
}
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr ("ERROR from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr ("Error details: %s\n", debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
//-------------------------------------------------------
static void cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{
g_print ("In cb_newpad\n");
GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
const GstStructure *str = gst_caps_get_structure (caps, 0);
const gchar *name = gst_structure_get_name (str);
GstElement *source_bin = (GstElement *) data;
GstCapsFeatures *features = gst_caps_get_features (caps, 0);
/* Need to check if the pad created by the decodebin is for video and not
* audio. */
if (!strncmp (name, "video", 5)) {
/* Link the decodebin pad only if decodebin has picked nvidia
* decoder plugin nvdec_*. We do this by checking if the pad caps contain
* NVMM memory features. */
if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
/* Get the source bin ghost pad */
GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
decoder_src_pad)) {
g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
}
gst_object_unref (bin_ghost_pad);
} else {
g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
}
}
}
//-------------------------------------------------------
static void decodebin_child_added (GstChildProxy * child_proxy, GObject * object,gchar * name, gpointer user_data)
{
g_print ("Decodebin child added: %s\n", name);
if (g_strrstr (name, "decodebin") == name) {
g_signal_connect (G_OBJECT (object), "child-added",
G_CALLBACK (decodebin_child_added), user_data);
}
if (g_strstr_len (name, -1, "nvv4l2decoder") == name) {
g_print ("Seting bufapi_version\n");
g_object_set (object, "bufapi-version", TRUE, NULL);
}
}
//-------------------------------------------------------
void buffer_destroy(gpointer data) {cv::Mat* done = (cv::Mat*)data; delete done;}
//-----------------------------------------------------
static gboolean cb_need_data(GstElement* appsrc,guint unused_size,gpointer user_data)
{
g_print("cb_need_data function \n");
GstBuffer* buffer;
GstMapInfo map;
guint size,depth,height,width,step,channels;
GstFlowReturn ret;
guchar *data1;
g_print("userdata: %s \n",user_data);
cv::Mat frame=cv::imread((const char*)user_data, CV_LOAD_IMAGE_COLOR);
height = frame.size().height;
width = frame.size().width;
channels = frame.channels();
data1 = (guchar *)frame.data;
gsize sizeInBytes = height*width*channels;
g_print("frame_height: %d \n",height);
g_print("frame_width: %d \n",width);
g_print("frame_channels: %d \n",channels);
g_print("frame_size: %d \n",sizeInBytes);
buffer=gst_buffer_new_allocate(NULL,sizeInBytes,NULL);
gst_buffer_map(buffer,&map,GST_MAP_WRITE);
memcpy( (guchar *)map.data, data1, gst_buffer_get_size( buffer ) );
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {g_print("cv 2 gst got an error"); return false;}
gst_buffer_unref(buffer);
//gst_buffer_unmap (buffer, &map);
g_print("cv converted to gst \n ");
return true;
}
//-------------------------------------------------------
static GstPadProbeReturn tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,gpointer u_data)
{
char *msg;
g_object_get(G_OBJECT(u_data),"last-message",&msg,NULL);
if (msg!=NULL) {g_print("FPS =%s \n",msg);}
return GST_PAD_PROBE_OK;
}
//-------------------------------------------------------
//------------------MAIN---------------------------------
//-------------------------------------------------------
int main(int argc,char** argv)
{
GMainLoop *loop;
GstElement *pipeline,*sink,*tiler,*nvvidconv,*nvosd,*nvsink,*pgie; //,*streammux
GstElement* appsrc,*conv;
GstBus *bus;
guint bus_watch_id;
GstPad *tiler_src_pad;
guint num_sources;
guint tiler_rows,tiler_columns;
guint pgie_batch_size;
GstCaps *caps;
//check input args
if(argc <2) {g_printerr("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]); return -1;}
num_sources=argc-1;
//start gstreamer
gst_init(&argc,&argv);
loop=g_main_loop_new(NULL,FALSE);
//Creating pipeline
pipeline=gst_pipeline_new("dst_opencv");
//streammux=gst_element_factory_make("nvstreammux","nvstream-muxer");
if(!pipeline){g_printerr("pipeline could not be created");}
//if(!streammux){g_printerr("Streammux could not be created");}
//gst_bin_add(GST_BIN(pipeline),streammux);
// Creating bin with all sources
appsrc=gst_element_factory_make("appsrc","gstappsrc");
conv=gst_element_factory_make("videoconvert","conv");
g_object_set (G_OBJECT (appsrc), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "I420",
"width", G_TYPE_INT, 1200,
"height", G_TYPE_INT, 600,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
NULL), NULL);
g_object_set(G_OBJECT(appsrc),"stream-type",0,"format",GST_FORMAT_TIME,NULL);
/* Use nvinfer to infer on batched frame. */
pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");
/* Use nvtiler to composite the batched frames into a 2D tiled array based
* on the source of the frames. */
tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");
nvvidconv=gst_element_factory_make ("nvvideoconvert","nvvideo-converter");
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
// nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
nvosd=gst_element_factory_make("nvdsosd","nv-onscreendisplay");
nvsink=gst_element_factory_make ("nveglglessink", "nvvideo-renderer"); //show on display
//nvsink=gst_element_factory_make("fakesink","nvvideo-render"); //Dont show frames on screen
sink=gst_element_factory_make("fpsdisplaysink","fps_display");
//sink=gst_element_factory_make("autovideosink","videosink");
//check if all plugin were created
if(!appsrc){g_printerr("appsrc could not be created"); return -1;}
if(!conv){g_printerr("conv could not be created"); return -1;}
if(!tiler){g_printerr("tiler could not be created"); return -1;}
if(!sink){g_printerr("sink could not be created"); return -1;}
if(!nvvidconv){g_printerr("nvvidconv could not be created"); return -1;}
if(!pgie){g_printerr("pgie could not be created"); return -1;}
if(!nvosd){g_printerr("nvosd could not be created"); return -1;}
//set streammux
/* Configure the nvinfer element using the nvinfer config file. */
g_object_set (G_OBJECT (pgie),"config-file-path", MODEL_CONFIG, NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",pgie_batch_size, num_sources);
g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);}
//g_print("Flag \n");
//set tiler
tiler_rows = (guint) sqrt (num_sources);
tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
/* we set the tiler properties here */
g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
"width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//set fps sink
g_object_set (G_OBJECT (sink), "text-overlay", FALSE, "video-sink", nvsink, "sync", FALSE, NULL);
//linking all elements
gst_bin_add_many(GST_BIN(pipeline),appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL);
if (!gst_element_link_many(appsrc,conv,pgie,tiler,nvvidconv,nvosd,sink,NULL)){g_printerr("Elements could not be linked"); return -1;}
tiler_src_pad = gst_element_get_static_pad (pgie, "src");
if (!tiler_src_pad) {g_print ("Unable to get src pad\n");}
else{gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,tiler_src_pad_buffer_probe, (gpointer)sink, NULL);}
g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data),(gpointer)argv[1]);
/* Set the pipeline to "playing" state */
g_print ("Now playing:");
for (int i = 0; i < num_sources; i++) {g_print (" %s,", argv[i + 1]);}
g_print ("\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Gstreamer EOS message handling in filesink to change location on the fly

Trying to switch output files on the fly, but can't handle EOS.
http://gstreamer-devel.966125.n4.nabble.com/Dynamically-updating-filesink-location-at-run-time-on-the-fly-td4660569.html
Quote:
Assuming you have a pipeline that looks like this:
audiosrc --> encoder --> mux --> filesink
then you'll need to change it to:
audiosrc --> encoder --> queue --> muxsink_bin
where muxsink_bin is a bin
ghostpad --> mux --> filesink
then the procedure is:
1 - Block the queue srcpad using gst_pad_set_blocked_async()
2 - In the blocked callback:
2a - unlink muxsink_bin with gst_pad_unlink()
2b - send an EOS event to the muxsink_bin sink pad with gst_pad_send_event()
2b - create a new muxsink_bin
2c - set filesink location
2d - add the new bin to the pipeline with gst_bin_add()
2e - sync with parent using gst_element_sync_state_with_parent()
2f - link it to the queue srcpad with gst_pad_link()
2g - unblock the queue srcpad with gst_pad_set_blocked_async(). When the unblocked callback occurs you're recording again & no data has been lost. No action is required in the unblocked callback
3 - handle the EOS & delete the old muxsink_bin. I had a msg handler that I installed in my bin_init() function using "gstbin_class->handle_message = GST_DEBUG_FUNCPTR(msg_handler)" & in the handler:
3a - lock the bin state with gst_element_set_locked_state()
3b - set the state to NULL with gst_element_set_state()
3c - remove it from the pipeline with gst_bin_remove()
That's it. The only thing to be mindful of is that data must be flowing thru the pipeline for this to work.
Paddy
The main sequence works except for the finalization of the old pipeline.
The difficulty is with the point 3: I can send EOS to the ghostpad, and the filesink gets it. But how to catch that EOS?
What does it mean "install msg handler using gstbin_class->handle_message = GST_DEBUG_FUNCPTR(msg_handler)"?
There is message forwarding.
Must be enabled on the bus:
g_object_set(G_OBJECT(bin), "message-forward", TRUE, 0);
Handling:
case GST_MESSAGE_ELEMENT:
{
const GstStructure *s = gst_message_get_structure (msg);
if (gst_structure_has_name (s, "GstBinForwarded"))
{
GstMessage *forward_msg = NULL;
gst_structure_get (s, "message", GST_TYPE_MESSAGE, &forward_msg, NULL);
if (GST_MESSAGE_TYPE (forward_msg) == GST_MESSAGE_EOS)
{
g_print ("EOS from element %s\n",
GST_OBJECT_NAME (GST_MESSAGE_SRC (forward_msg)));
DestroyBin();
CreateNewBin();
RemovePad();
}
gst_message_unref (forward_msg);
}
}
Full code:
#include <gst/gst.h>
#include <iostream>
#include <cstring>
#include <cstdio>
static gchar *opt_effects = NULL;
#define DEFAULT_EFFECTS "identity,exclusion,navigationtest," \
"agingtv,videoflip,vertigotv,gaussianblur,shagadelictv,edgetv"
static GstElement *pipeline;
static GstElement * muxer;
static GstElement * sink;
static GstElement * q2;
static int i=0;
GstElement * bin;
GstPad * muxerSinkPad;
gulong probeId;
static GQueue effects = G_QUEUE_INIT;
void CreateNewBin();
void DestroyBin();
void ChangeLocation();
void RemovePad();
static GstPadProbeReturn
pad_probe_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data)
{
GstPad *sinkPad = gst_element_get_static_pad(bin, "sink");
gst_pad_unlink(pad, sinkPad);
gst_pad_send_event(sinkPad, gst_event_new_eos());
gst_object_unref(sinkPad);
return GST_PAD_PROBE_OK;
}
static gboolean
timeout_cb (gpointer user_data)
{
static int i=0;
if(i==0)
{
GstPad * q2SrcPad;
q2SrcPad = gst_element_get_static_pad(q2, "src");
std::cout << "Timeout: " << q2SrcPad << std::endl;
probeId = gst_pad_add_probe (q2SrcPad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM,
pad_probe_cb, user_data, NULL);
gst_object_unref(q2SrcPad);
return TRUE;
}
return FALSE;
}
static gboolean
bus_cb (GstBus * bus, GstMessage * msg, gpointer user_data)
{
GMainLoop *loop = (GMainLoop*)user_data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:{
GError *err = NULL;
gchar *dbg;
gst_message_parse_error (msg, &err, &dbg);
gst_object_default_error (msg->src, err, dbg);
g_error_free (err);
g_free (dbg);
g_main_loop_quit (loop);
break;
}
case GST_EVENT_EOS:
std::cout << "EOS message is got" << std::endl;
break;
case GST_MESSAGE_ELEMENT:
{
const GstStructure *s = gst_message_get_structure (msg);
if (gst_structure_has_name (s, "GstBinForwarded"))
{
GstMessage *forward_msg = NULL;
gst_structure_get (s, "message", GST_TYPE_MESSAGE, &forward_msg, NULL);
if (GST_MESSAGE_TYPE (forward_msg) == GST_MESSAGE_EOS)
{
g_print ("EOS from element %s\n",
GST_OBJECT_NAME (GST_MESSAGE_SRC (forward_msg)));
DestroyBin();
CreateNewBin();
RemovePad();
}
gst_message_unref (forward_msg);
}
}
break;
default:
break;
}
return TRUE;
}
int
main (int argc, char **argv)
{
GError *err = NULL;
GMainLoop *loop;
GstElement *src, *q1,/* *q2,*/ /**effect,*/ /**filter1*//*, *filter2*/ *encoder;/*, *sink*/;
gst_init(&argc, &argv);
pipeline = gst_pipeline_new ("pipeline");
src = gst_element_factory_make ("videotestsrc", NULL);
//Create a caps filter between videosource videoconvert
std::string capsString = "video/x-raw,format=YV12,width=320,height=240,framerate=30/1";
GstCaps * dataFilter = gst_caps_from_string(capsString.c_str());
q1 = gst_element_factory_make ("queue", NULL);
encoder = gst_element_factory_make ("x264enc", NULL);
q2 = gst_element_factory_make("queue", NULL);
gst_bin_add_many(GST_BIN(pipeline), src, q1, encoder, q2, 0);
gboolean link = gst_element_link_filtered(src, q1, dataFilter);
link &= gst_element_link(q1, encoder);
link &= gst_element_link(encoder, q2);
CreateNewBin();
gst_element_set_state (pipeline, GST_STATE_PLAYING);
loop = g_main_loop_new (NULL, FALSE);
gst_bus_add_watch (GST_ELEMENT_BUS (pipeline), bus_cb, loop);
g_timeout_add_seconds (10, timeout_cb, loop);
g_main_loop_run (loop);
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (pipeline);
return 0;
}
void RemovePad()
{
GstPad * q2SrcPad;
q2SrcPad = gst_element_get_static_pad(q2, "src");
gst_pad_remove_probe(q2SrcPad, probeId);
gst_object_unref(q2SrcPad);
}
void DestroyBin()
{
gst_element_set_state(bin, GST_STATE_NULL);
gst_bin_remove(GST_BIN(pipeline), bin);
}
void CreateNewBin()
{
static std::string fileLocPattern = "deneme%d.mkv";
char buffer[12];
memset(buffer, 0, sizeof(buffer));
sprintf(buffer, fileLocPattern.c_str(), i++);
//Create Muxer Element
muxer = gst_element_factory_make("matroskamux", "MatroskaMuxer");
//Create File Sink Element
sink = gst_element_factory_make("filesink", buffer);
g_object_set(G_OBJECT(sink), "location", buffer, 0);
//Create muxsinkBin
bin = gst_bin_new(buffer);
g_object_set(G_OBJECT(bin), "message-forward", TRUE, 0);
//Add a src pad to the bin
gst_bin_add_many(GST_BIN(bin), muxer, sink, 0);
gboolean linkState = TRUE;
//Connect elements within muxsink_bin
//Link: matroskamuxer -> filesink
linkState &= gst_element_link_many(muxer, sink, 0);
//Add this bin to pipeline
gst_bin_add(GST_BIN(pipeline), bin);
//Create ghostpad and manually link muxsinkBin and remaining part of the pipeline
{
GstPadTemplate * muxerSinkPadTemplate;
if( !(muxerSinkPadTemplate = gst_element_class_get_pad_template(GST_ELEMENT_GET_CLASS(muxer), "video_%u")) )
{
std::cout << "Unable to get source pad template from muxing element" << std::endl;
}
//Obtain dynamic pad from element
muxerSinkPad = gst_element_request_pad(muxer, muxerSinkPadTemplate, 0, 0);
//Add ghostpad
GstPad * ghostPad = gst_ghost_pad_new("sink", muxerSinkPad);
gst_element_add_pad(bin, ghostPad);
gst_object_unref(GST_OBJECT(muxerSinkPad));
gst_element_sync_state_with_parent(bin);
//Get src pad from queue element
GstPad * queueBeforeBinSrcPad = gst_element_get_static_pad(q2, "src");
//Link queuebeforebin to ghostpad
if (gst_pad_link(queueBeforeBinSrcPad, ghostPad) != GST_PAD_LINK_OK )
{
std::cout << "QueueBeforeBin cannot be linked to MuxerSinkPad." << std::endl;
}
gst_object_unref(queueBeforeBinSrcPad);
}
}
http://gstreamer-devel.966125.n4.nabble.com/Listening-on-EOS-events-for-GstBin-td4669126.html
http://gstreamer-devel.966125.n4.nabble.com/file/n4669476/main.cpp
Depending on your use case you can use multifilesink element. It will switch files on the fly on certain events. A file for each buffer, a file for each segment... Check its properties and see if there is anything that would work for you.
It also serves as a good code base in case you want to write something similar (or maybe extend it?)
I'll post the code of actual custom GstBin aka 'muxsink_bin' that I ended up implementing to do that forwarding and EOS handling for the 'detachable sink part' of the pipeline.
plisolatedbin.h:
#pragma once
#include <gst/gst.h>
#include <gst/gstbin.h>
G_BEGIN_DECLS
#define PL_TYPE_ISOLATED_BIN (pl_isolated_bin_get_type ())
#define PL_IS_ISOLATED_BIN(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), PL_TYPE_ISOLATED_BIN))
#define PL_IS_ISOLATED_BIN_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), PL_TYPE_ISOLATED_BIN))
#define PL_ISOLATED_BIN_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), PL_TYPE_ISOLATED_BIN, PlIsolatedBinClass))
#define PL_ISOLATED_BIN(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), PL_TYPE_ISOLATED_BIN, PlIsolatedBin))
#define PL_ISOLATED_BIN_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), PL_TYPE_ISOLATED_BIN, PlIsolatedBinClass))
#define PL_ISOLATED_BIN_CAST(obj) ((PlIsolatedBin*)(obj))
typedef struct _PlIsolatedBin PlIsolatedBin;
typedef struct _PlIsolatedBinClass PlIsolatedBinClass;
/**
* Does not forward EOS to parent by default.
*/
struct _PlIsolatedBin
{
GstBin bin;
};
struct _PlIsolatedBinClass
{
GstBinClass parent_class;
};
GType pl_isolated_bin_get_type();
GstElement* pl_isolated_bin_new();
G_END_DECLS
plisolatedbin.c:
#include "plisolatedbin.h"
#include <assert.h>
G_DEFINE_TYPE(PlIsolatedBin, pl_isolated_bin, GST_TYPE_BIN)
static void pl_isolated_bin_init(PlIsolatedBin *plisolatedbin)
{
}
static void pl_isolated_bin_handle_message_func(GstBin *bin, GstMessage *message)
{
if (GST_MESSAGE_TYPE(message) != GST_MESSAGE_EOS)
{
GST_BIN_CLASS(pl_isolated_bin_parent_class)->handle_message(bin, message);
}
else
{
GstMessage *forwarded = gst_message_new_element(GST_OBJECT_CAST(bin), gst_structure_new("PlIsolatedBinForwarded", "message", GST_TYPE_MESSAGE, message, NULL));
gst_element_post_message(GST_ELEMENT_CAST(bin), forwarded);
}
}
static void pl_isolated_bin_class_init(PlIsolatedBinClass *class)
{
class->parent_class.handle_message = GST_DEBUG_FUNCPTR(pl_isolated_bin_handle_message_func);
}
GstElement* pl_isolated_bin_new()
{
return g_object_new(PL_TYPE_ISOLATED_BIN, NULL);
}

C++ (Ubuntu): load audio file (wav, mp3, aiff) to array/vector with gstreamer

how can I decode with C++ (Ubuntu) an audio file (wav, mp3, aiff) and store it (PCM/int) in a vector/array?
What I did so far: I used gstreamer (I'm a very beginner) to decode the file and I can play it and get data with pull-buffer, however I did't find a method to get the whole audio data at ones to store in an array.
Is there such a method in gstreamer? Or exists there an other C++ library to decode audio files and get the raw (PCM/int) data?
edit: change frequency to PCM
I solved the problem by myself with gstreamer. The trick is to use giostreamsink as a sink, this stores the data into a G_MEMORY_OUTPUT_STREAM.
The full code sample:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
static void on_pad_added(GstElement *decodebin,
GstPad *pad,
gpointer data) {
GstElement *convert = (GstElement *) data;
GstCaps *caps;
GstStructure *str;
GstPad *audiopad;
audiopad = gst_element_get_static_pad(convert, "sink");
if (GST_PAD_IS_LINKED(audiopad)) {
g_object_unref(audiopad);
return;
}
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
printf("here %s\n",gst_structure_get_name(str));
if (!g_strrstr(gst_structure_get_name(str), "audio")) {
gst_caps_unref(caps);
gst_object_unref(audiopad);
return;
}
gst_caps_unref(caps);
gst_pad_link(pad, audiopad);
g_object_unref(audiopad);
}
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *decode, *sink, *convert;
int rate = 44100;
int channels = 1;
int depth = 16;
bool output_signed = true;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
GMemoryOutputStream *stream;
gpointer out_data;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
// sink
stream = G_MEMORY_OUTPUT_STREAM(g_memory_output_stream_new(NULL, 0, (GReallocFunc)g_realloc, (GDestroyNotify)g_free));
sink = gst_element_factory_make ("giostreamsink", "sink");
g_object_set(G_OBJECT(sink), "stream", stream, NULL);
// source
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", "/home/sam/Desktop/audio/audio.wav", NULL);
// convert
convert = gst_element_factory_make("audioconvert", "convert");
// decode
decode = gst_element_factory_make("decodebin", "decoder");
// link decode to convert
g_signal_connect(decode, "pad-added", G_CALLBACK(on_pad_added), convert);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, decode, convert, sink, NULL);
// link source to decode
gst_element_link(source, decode);
// caps
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw-int",
"rate", G_TYPE_INT, rate,
"channels", G_TYPE_INT, channels,
"width", G_TYPE_INT, depth,
"depth", G_TYPE_INT, depth,
"signed", G_TYPE_BOOLEAN, output_signed,
NULL);
// link convert to sink
gst_element_link_filtered(convert, sink, caps);
gst_caps_unref(caps);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
// get data
g_print("get data\n");
out_data = g_memory_output_stream_get_data(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long size = g_memory_output_stream_get_size(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long sizeData = g_memory_output_stream_get_data_size(G_MEMORY_OUTPUT_STREAM(stream));
std::cout << "stream size: " << size << std::endl;
std::cout << "stream data size: " << sizeData << std::endl;
// access data and store in vector
std::vector<int16_t> data;
for (unsigned long i = 0; i < sizeData/2; ++i) {
data.push_back(((gint16*)out_data)[i]);
}
return 0;
}

gstreamer appsrc video streaming over the network

I'm trying to use gstreamer appsrc to play video stream over the network.
I found good examples here.
gstreamer appsrc test application
http://amarghosh.blogspot.kr/2012/01/gstreamer-appsrc-in-action.html
Using examples above I can play a video in X Window using Xlib. When pipeline is set PLAYING state, then somehow "need-data" signal emitted and in the start_feed callback function data read from a video file are injected to the appsrc GstBuffer and play sample video.
I'm trying to get data from the network instead of a file, so I think simple echo server reads a video file exactly the same way above and send data to the client when connection is occurred. The client should get these data and put in the appsrc.
My question is how to put stream data to appsrc pipeline? Does anybody give any suggession or good reference?
Here's the working sample code using above links' examples.
// http://amarghosh.blogspot.kr/2012/01/gstreamer-appsrc-in-action.html
// http://www.cs.odu.edu/~cs476/Xlib/xlines.c
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <stdio.h>
#include <unistd.h> // sleep()
#include <stdbool.h>
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappbuffer.h>
#include <gst/interfaces/xoverlay.h>
#define BUFF_SIZE (640*480*3)//(1024)
#define BORDER_WIDTH 2
#define DEBUG printf
typedef unsigned int uint32;
typedef unsigned char uint8;
typedef struct {
GstPipeline *pipeline;
GstAppSrc *src;
GstElement *sink;
GstElement *decoder;
GstElement *ffmpeg;
GstElement *videosink;
GMainLoop *loop;
guint sourceid;
FILE *file;
} gst_app_t;
static gst_app_t gst_app;
static Window child_window = 0;
static Window window = 0;
static gboolean read_data(gst_app_t *app)
{
GstBuffer *buffer;
guint8 *ptr;
gint size;
GstFlowReturn ret;
ptr = g_malloc(BUFF_SIZE);
g_assert(ptr);
size = fread(ptr, 1, BUFF_SIZE, app->file);
if(size == 0){
ret = gst_app_src_end_of_stream(app->src);
DEBUG("eos returned %d at %d\n", ret, __LINE__);
return FALSE;
}
buffer = gst_buffer_new();
GST_BUFFER_MALLOCDATA(buffer) = ptr;
GST_BUFFER_SIZE(buffer) = size;
GST_BUFFER_DATA(buffer) = GST_BUFFER_MALLOCDATA(buffer);
ret = gst_app_src_push_buffer(app->src, buffer);
if(ret != GST_FLOW_OK){
DEBUG("push buffer returned %d for %d bytes \n", ret, size);
return FALSE;
}
if(size != BUFF_SIZE){
ret = gst_app_src_end_of_stream(app->src);
DEBUG("eos returned %d at %d\n", ret, __LINE__);
return FALSE;
}
return TRUE;
}
static void start_feed (GstElement * pipeline, guint size, gst_app_t *app)
{
if (app->sourceid == 0) {
DEBUG ("start feeding\n");
app->sourceid = g_idle_add ((GSourceFunc) read_data, app);
}
}
static void stop_feed (GstElement * pipeline, gst_app_t *app)
{
if (app->sourceid != 0) {
DEBUG ("stop feeding\n");
g_source_remove (app->sourceid);
app->sourceid = 0;
}
}
static void on_pad_added(GstElement *element, GstPad *pad)
{
GstCaps *caps;
GstStructure *str;
gchar *name;
GstPad *ffmpegsink;
GstPadLinkReturn ret;
DEBUG("pad added\n");
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
g_assert(str);
name = (gchar*)gst_structure_get_name(str);
DEBUG("pad name %s\n", name);
if(g_strrstr(name, "video")){
ffmpegsink = gst_element_get_pad(gst_app.ffmpeg, "sink");
g_assert(ffmpegsink);
ret = gst_pad_link(pad, ffmpegsink);
DEBUG("pad_link returned %d\n", ret);
gst_object_unref(ffmpegsink);
}
gst_caps_unref(caps);
}
static gboolean bus_callback(GstBus *bus, GstMessage *message, gpointer *ptr)
{
gst_app_t *app = (gst_app_t*)ptr;
switch(GST_MESSAGE_TYPE(message))
{
case GST_MESSAGE_ELEMENT: {
gst_x_overlay_set_window_handle (GST_X_OVERLAY (GST_MESSAGE_SRC(message)), child_window);
}
break;
case GST_MESSAGE_ERROR:
{
gchar *debug;
GError *err;
gst_message_parse_error(message, &err, &debug);
DEBUG("Error %s\n", err->message);
g_error_free(err);
g_free(debug);
g_main_loop_quit(app->loop);
}
break;
case GST_MESSAGE_WARNING:
{
gchar *debug;
GError *err;
gchar *name;
gst_message_parse_warning(message, &err, &debug);
DEBUG("Warning %s\nDebug %s\n", err->message, debug);
name = GST_MESSAGE_SRC_NAME(message);
DEBUG("Name of src %s\n", name ? name : "nil");
g_error_free(err);
g_free(debug);
}
break;
case GST_MESSAGE_EOS:
DEBUG("End of stream\n");
g_main_loop_quit(app->loop);
break;
case GST_MESSAGE_STATE_CHANGED:
break;
default:
DEBUG("got message %s\n", \
gst_message_type_get_name (GST_MESSAGE_TYPE (message)));
break;
}
return TRUE;
}
static gboolean terminate_playback (GstElement * loop)
{
DEBUG ("Terminating playback\n");
g_main_loop_quit ((GMainLoop *)loop);
return FALSE;
}
int gstreamer_init(int argc, char *argv[])
{
gst_app_t *app = &gst_app;
GstBus *bus;
GstStateChangeReturn state_ret;
app->file = fopen(argv[1], "r");
g_assert(app->file);
/* initialization */
gst_init((int)0, NULL);
app->loop = g_main_loop_new(NULL, FALSE);
/* create elements */
app->pipeline = (GstPipeline *)gst_pipeline_new("my_pipeline");
app->src = (GstAppSrc *)gst_element_factory_make("appsrc", "myappsrc");
app->decoder = gst_element_factory_make("decodebin2", "mydecoder");
app->ffmpeg = gst_element_factory_make("ffmpegcolorspace", "myffmpeg");
app->videosink = gst_element_factory_make("autovideosink", "myvideosink");
if (!app->videosink) {
DEBUG ("output could not be found - check your install\n");
}
g_assert(app->src);
g_assert(app->decoder);
g_assert(app->ffmpeg);
g_assert(app->videosink);
bus = gst_pipeline_get_bus(GST_PIPELINE(app->pipeline));
gst_bus_add_watch(bus, (GstBusFunc)bus_callback, app);
gst_object_unref(bus);
g_signal_connect(app->decoder, "pad-added",
G_CALLBACK(on_pad_added), app->ffmpeg);
//gst_app_src_set_emit_signals(app->src, true);
g_signal_connect(app->src, "need-data", G_CALLBACK(start_feed), app);
g_signal_connect(app->src, "enough-data", G_CALLBACK(stop_feed), app);
gst_bin_add_many (GST_BIN (app->pipeline), (GstElement *)app->src,
app->decoder, app->ffmpeg, app->videosink, NULL);
/* link everything together */
if (!gst_element_link((GstElement *)app->src, app->decoder)) {
DEBUG ("Failed to link one or more elements!\n");
return -1;
}
if(!gst_element_link(app->ffmpeg, app->videosink)){
DEBUG("failed to link ffmpeg and videosink");
return -1;
}
state_ret = gst_element_set_state((GstElement *)app->pipeline, GST_STATE_PLAYING);
if (state_ret == GST_STATE_CHANGE_FAILURE) {
DEBUG("Failed to start up pipeline!\n");
return 1;
}
DEBUG("set state returned %d\n", state_ret);
//g_timeout_add (15000, (GSourceFunc) terminate_playback, app->loop);
g_main_loop_run(app->loop);
state_ret = gst_element_set_state((GstElement *)app->pipeline, GST_STATE_NULL);
DEBUG("set state null returned %d\n", state_ret);
gst_object_unref(app->pipeline);
return 1;
}
/*
* gst-launch filesrc location=test.avi ! decodebin2 ! ffmpegcolorspace ! autovideosink
*
* 1. dependency library install
* $ sudo apt-get install gstreamer0.10-plugins-bad
* $ sudo apt-get install gstreamer0.10-ffmpeg
*
* 2. compile
* $ gcc hello.c -o hello -lX11 `pkg-config --cflags --libs gstreamer-0.10 gstreamer-app-0.10` -lgstinterfaces-0.10
*
* 3. how to run program
* $ ./hello <video_file_name>
* $ GST_DEBUG=appsrc:5 ./hello ./hbo_dtc_sd.ts
*/
int main(int argc, char *argv[])
{
Display *disp;
Window root;
long fgcolor, bgcolor;
GC gc;
XGCValues gc_val;
XEvent event;
char *msg = "Hello, World!";
int screen;
disp = XOpenDisplay(NULL);
if (disp == NULL) {
fprintf(stderr, "Cannot open display\n");
exit(1);
}
screen = DefaultScreen(disp);
root = RootWindow(disp, screen);
fgcolor = BlackPixel(disp, screen);
bgcolor = WhitePixel(disp, screen);
window = XCreateSimpleWindow(disp, root, 100, 100, 1000, 840, 1,
fgcolor, bgcolor);
child_window = XCreateSimpleWindow(disp, window, 100, 100, 800, 600, 1,
fgcolor, bgcolor);
gc_val.foreground = fgcolor;
gc_val.background = bgcolor;
gc = XCreateGC(disp, child_window, GCForeground|GCBackground, &gc_val);
XSelectInput(disp, child_window, ExposureMask | KeyPressMask);
g_warning("map xwindow");
//XMapWindow(disp, window);
XMapWindow(disp, window);
XMapWindow(disp, child_window);
XSync(disp, FALSE);
//XDrawLine (disp, window, gc, 0, 0, 1000, 800);
//XDrawLine (disp, child_window, gc, 0, 0, 800, 600);
gstreamer_init(argc, argv);
XDestroyWindow( disp, window );
XDestroyWindow( disp, child_window );
XCloseDisplay( disp );
return 0;
}
You'll want to have at least one other thread (on each end) to handle communication over a socket (like TCP, or UDP if on a local network). This typically has a blocking call to wait for packets. To send data, you can form a gstreamer tee and queue, and then an appsrc to buffer/send data to a socket. To receive, you can pull the data from the socket to a buffer. Keep in mind the OS's socket buffer is relatively small and will drop packets if you don't pull from it fast enough, or push to one too fast. Hence the buffers.
On a NEED_DATA signal, you pull from that buffer to the pipeline using pushBuffer(). And on an ENOUGH_DATA signal, you can just keep buffering or dispose of it, whatever your application needs to do.

running the gstreamer pipeline (not able to get video and audio data in the callback)

I'm a newbie to gstreamer and I wanted to get the audio and video both buffers from a 3gp file and do some processing in the callback.
(I'm starting my pipeline into a separate thread, pipeline gives audio buffers in a callback AudioCallback and video buffers in VideoCallback.)
This is how my pipeline looks:
GstElement* audioQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
GstElement* videoQueue;//global variable , needed in on_pad_added (cant pass both while connecting demuxer to callback)
//static functions
static gboolean
bus_call (GstBus* bus, GstMessage* msg, gpointer data)
{
GMainLoop* loop = (GMainLoop*) data;
switch (GST_MESSAGE_TYPE (msg))
{
case GST_MESSAGE_EOS:
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_free (debug);
g_printerr ("Error: %s\n", error->message);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return true;
}
static void link_two_elements(GstElement* src_element, GstElement* sink_element)
{
if(!gst_element_link(src_element, sink_element))
g_printerr ("Linking Error");
}
static void
on_pad_added (GstElement *element,
GstPad *pad,
gpointer data)
{
GstCaps *caps;
GstStructure *str;
gchar *tex;
GstPad* sinkpad;
/* check media type */
caps = gst_pad_get_caps (pad);
str = gst_caps_get_structure (caps, 0);
tex = (gchar*)gst_structure_get_name(str);
if(g_strrstr(tex,"audio"))
{
//GstElement *audioQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (audioQueue, "sink");
if(sinkpad)
{
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
if(g_strrstr(tex,"video"))
{
//GstElement *videoQueue = (GstElement *) data;
sinkpad = gst_element_get_static_pad (videoQueue, "sink");
GstPadLinkReturn linkReturn = gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
void runPipeline()
{
GMainLoop *loop;
GstElement *__pPipeline, *source, *demuxer, *audioDecoder, *audioConverter, *audioresample, /**audioQueue,*/ *audioSink, *videoDecoder, *videoSink, /**videoQueue,*/ *ffmpegcolorspace, *videoscale;
GstBus* bus;
//Initialisation
gst_init (null,null);
loop = g_main_loop_new (NULL, FALSE);
// Create gstreamer elements
__pPipeline = gst_pipeline_new("test_appsink");
source = gst_element_factory_make ("filesrc", "file-source");
demuxer = gst_element_factory_make("qtdemux", "demuxer");
//audioDecoder = gst_element_factory_make("ffdec_mp3", "audioDecoder");
audioDecoder = gst_element_factory_make("decodebin", "audioDecoder");
audioConverter = gst_element_factory_make("audioconvert", "audioConverter");
audioresample = gst_element_factory_make("audioresample", "audioresample");
audioSink = gst_element_factory_make("appsink", "audioSink");
audioQueue = gst_element_factory_make("queue2", "audioQueue");
//videoDecoder = gst_element_factory_make("ffdec_h264", "videoDecoder");
videoQueue = gst_element_factory_make("queue2", "videoQueue");
videoDecoder = gst_element_factory_make("decodebin ", "videoDecoder");
ffmpegcolorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace");
videoscale = gst_element_factory_make("videoscale", "videoscale");
videoSink = gst_element_factory_make("appsink", "videoSink");
//appsink = gst_element_factory_make("appsink", "sink-buffer");
if (!__pPipeline || !source || !demuxer || !audioDecoder || !audioConverter ||!audioresample || !audioSink || !videoSink || !audioQueue || !videoQueue || !videoDecoder || !ffmpegcolorspace || !videoscale )
{
//return -1;
}
//we set the input filename to the source element
g_object_set (G_OBJECT (source), "location", "/etc/20000101-161404.3gp", NULL);
//Make appsink emit the "new-preroll" and "new-buffer" signals.
gst_app_sink_set_emit_signals ((GstAppSink*) audioSink, TRUE);
gst_app_sink_set_emit_signals ((GstAppSink*) videoSink, TRUE);
//we add a message handler
bus = gst_pipeline_get_bus (GST_PIPELINE (__pPipeline));
gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
//we add all elements into the pipeline
gst_bin_add_many (GST_BIN (__pPipeline),
source, demuxer, videoDecoder, audioDecoder, audioConverter, audioresample, audioSink, videoSink,
audioQueue, videoQueue, ffmpegcolorspace, videoscale, NULL);
//link source and demuxer seperately
link_two_elements(source, demuxer);
//link rest of the elements
int retValVideoLinking = (int)gst_element_link_many (videoQueue, videoDecoder, ffmpegcolorspace, videoscale, videoSink, NULL);
int retValAudioLinking = (int)gst_element_link_many (audioQueue, audioDecoder, audioConverter, audioresample, audioSink, NULL);
gulong sigConRet = g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), null);
_ArAudioIn audioInstance = _ArAudioIn::GetArAudioInstance();
g_signal_connect (videoSink, "new-buffer", G_CALLBACK (AudioCallback), null);//AudioCallback static API
g_signal_connect (audioSink, "new-buffer", G_CALLBACK (VideoCallback), null);//VideoCallback static API
//Set the pipeline to "playing" state
GstStateChangeReturn state = gst_element_set_state (__pPipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
return null;
}
I'm just getting a single video buffer in my Videocallback and also in the on_pad_addded : I'm getting a linking err for audio pad linking.
GST_PAD_LINK_NOFORMAT = -4,
I'm trying to link the queue's sink pad to the pad recieved in on_pad_added, same is working for video but not for audio.
If anybody has any idea about this then please give me some pointers to get rid off this err and make this pipeline work.
It would be nice if you cleanup you code before asking us to debug it. As a general advice, check the return values and either log a warning or simply exit(1) to ensure that your pipeline setup works (E.g. in the pad_added handler). I'd also start using a normal video and audiosink to check that it plays.
Finally, it is usually a bad idea to pull out data from the pipleine. Perhaps you could tell what you want to do with the data once you have it in your callback, so that we can give better advice.