I have an object to generate videos with GStreamer. Each time I want to generate a new video y create one new object and add frames. After video is finished, I delete the object but GStreamer internal memory looks like is not released.
After several videos generated all RAM is allocated and linux kills the process.
Why is this happening? How can I solve this issue? Is there any other way to do it?
GVideo.h:
#ifndef GVIDEO_H
#define GVIDEO_H
#include <gst/gst.h>
#include <string>
class GVideo
{
public:
GVideo();
~GVideo();
void startVideo(std::string filename);
void endVideo();
void addFrame(GstSample* element);
bool isRecording(){return _isRecording;}
bool isDataNeed(){return _dataNeed;}
private:
void setDataNeed(bool dataNeed){_dataNeed = dataNeed;}
protected:
bool _isRecording;
bool _dataNeed;
int _frameRate;
int _duration;
GstClockTime _timestamp;
GstElement *_pipeline;
GstElement *_source;
};
#endif //GVIDEO_H
GVideo.cpp
#include "GVideo.h"
#include <gst/app/gstappsrc.h>
#include <iostream>
static gboolean bus_video_call(GstBus* bus, GstMessage* msg, void* user_data)
{
//std::cout << "BUS_CALL" << std::endl;
GVideo* video = (GVideo*)user_data;
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_EOS:
{
std::cout << "VIDEO GST_MESSAGE_EOS" << std::endl;
video->endVideo();
break;
}
case GST_MESSAGE_ERROR:
{
std::cout << "GST_MESSAGE_ERROR" << std::endl;
GError *err;
gst_message_parse_error(msg, &err, NULL);
g_error("%s", err->message);
g_error_free(err);
video->endVideo();
break;
}
default:
break;
}
return true;
}
GVideo::GVideo()
: _dataNeed(false), _isRecording(false)
{
_pipeline = NULL;
_source = NULL;
}
GVideo::~GVideo()
{
std::cout << "Deleting GstVideo." << std::endl;
if(_pipeline != NULL)
endVideo();
}
void GVideo::startVideo(std::string filename)
{
_isRecording = true;
_frameRate = 2;
_duration = 5;
_timestamp = 0;
_dataNeed = true;
_pipeline = gst_pipeline_new ("video_pipeline");
_source = gst_element_factory_make ("appsrc" , "video_source");
GstElement* _videorate = gst_element_factory_make ("videorate" , "video_vidrate");
GstElement* _capsfilter = gst_element_factory_make ("capsfilter" , "video_capsfilter");
GstElement* _videoconvert = gst_element_factory_make ("videoconvert", "video_conv");
GstElement* _encoder = gst_element_factory_make ("x264enc" , "video_enc");
GstElement* _muxer = gst_element_factory_make ("mp4mux" , "video_mux");
GstElement* _filesink = gst_element_factory_make ("filesink" , "video_filesink");
// g_object_set (G_OBJECT (_source), "num-buffers", _duration*_frameRate, NULL);
g_object_set (G_OBJECT (_source), "caps",
gst_caps_new_simple ( "video/x-raw",
"format", G_TYPE_STRING, "I420",
"width", G_TYPE_INT, 352,
"height", G_TYPE_INT, 288,
"framerate", GST_TYPE_FRACTION, _frameRate, 1,
NULL), NULL);
/* setup appsrc */
g_object_set (G_OBJECT (_source),
// "do-timestamp", TRUE,
"stream-type", GST_APP_STREAM_TYPE_STREAM,
"format", GST_FORMAT_TIME, NULL);
g_object_set (G_OBJECT (_capsfilter), "caps",
gst_caps_new_simple ("video/x-raw",
// "format", G_TYPE_STRING, "I420",
"framerate", GST_TYPE_FRACTION, 30, 1,
NULL), NULL);
gst_bin_add_many (GST_BIN (_pipeline), _source, _videorate, _capsfilter, _videoconvert, _encoder, _muxer, _filesink, NULL);
gst_element_link_many (_source, _videorate, _capsfilter, _videoconvert, _encoder, _muxer, _filesink, NULL);
g_object_set (G_OBJECT (_filesink), "location", filename.c_str(), NULL);
GstBus* bus = gst_pipeline_get_bus(GST_PIPELINE(_pipeline));
gst_bus_add_watch(bus, bus_video_call, this);
gst_object_unref(bus);
gst_element_set_state (_pipeline, GST_STATE_PLAYING);
}
void GVideo::addFrame(GstSample* element)
{
GstBuffer* buf = gst_sample_get_buffer((GstSample*)element);
GST_BUFFER_PTS (buf) = _timestamp;
GST_BUFFER_DURATION (buf) = gst_util_uint64_scale_int (1, GST_SECOND, _frameRate);
_timestamp += GST_BUFFER_DURATION (buf);
gst_app_src_push_sample(GST_APP_SRC(_source), element);
if(_timestamp >= _duration*GST_SECOND)
{
_dataNeed = false;
gst_app_src_end_of_stream(GST_APP_SRC(_source));
}
}
void GVideo::endVideo()
{
std::cout << "gst_element_set_state." << std::endl;
gst_element_set_state (_pipeline, GST_STATE_NULL);
std::cout << "gst_object_unref." << std::endl;
gst_object_unref(_pipeline);
std::cout << "_pipeline= NULL." << std::endl;
_pipeline = NULL;
std::cout << "setDataNeed." << std::endl;
_isRecording = false;
}
I would guess that whatever is calling addFrame() might need to unref the sample.
https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-appsrc.html#gst-app-src-push-sample
In the docs there it indicates "transfer: none" on the sample parameter, which I believe means the caller needs to unref. For some reason the older method gst_app_src_push_buffer has transfer "full". Dunno why.
https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-appsrc.html#gst-app-src-push-buffer
Related
I have found that if I send the same OGG/Vorbis audio into a gstreamer 1.0 pipeline within a program twice I get different audio out (similar, but not identical). I have tried unreferencing the pipeline and rebuilding it between uses, but too no avail. Something seems to maintain some sort of state. The second version is the same as other second versions, with the same differences to the first.
I'm looking for reproducable results from a server processing audio, and this is getting in the way.
I'm sorry this is so long. Getting it to reproduce and rebuild to pipeline seemed like a relevant thing to leave in. I have reproduced this with several OGG/vorbis files (not empty though). Call the file "a.ogg" and then running the program will produce "job1.raw" and "job2.raw" which have been different each time.
Thanks for any help,
Richard.
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <glib.h>
#include <gst/gst.h>
typedef struct _Decoder2Data {
GstElement * appsrc;
GstElement * decodebin;
GstElement * audioconvert;
GstElement * audioresample;
GstElement * queue1;
GstElement * filesink;
GstElement * pipeline;
GstBus *bus;
const char* request_id;
const char* outdir;
GMainLoop * main_loop;
} Decoder2Data;
void start_request(const char* caps_str, Decoder2Data * data);
void process_data(Decoder2Data * obj, char * audio, int audioSize);
void end_request(Decoder2Data* data);
void finish_request(Decoder2Data * data);
int create_pipeline(int argc, char *argv[], Decoder2Data * data);
void closeGstreamer(Decoder2Data * data);
void *g_loop_thread(void *ptr);
void start_request(const char* caps_str, Decoder2Data * data) {
g_printerr("Test %s: Starting request\n", data->request_id);
g_object_set(data->appsrc, "caps", NULL, NULL);
if (data->outdir) {
char path[128];
sprintf(path, "%s/%s.raw", data->outdir, data->request_id);
FILE *fp = fopen(path, "w+");
if(fp != NULL) {
fclose(fp);
gst_element_set_state(data->pipeline, GST_STATE_PAUSED);
gst_element_set_state(data->filesink, GST_STATE_NULL);
g_object_set(data->filesink, "location", path, NULL);
gst_element_set_state(data->filesink, GST_STATE_PLAYING);
} else {
g_warning("Test %s: Unable to open raw audio file %s.\n", data->request_id, path);
}
}
gst_element_set_state(data->pipeline, GST_STATE_PLAYING);
gst_element_set_state(data->filesink, GST_STATE_PLAYING);
g_printerr("Test Started request\n");
}
void process_data(Decoder2Data * obj, char * audio, int audioSize) {
GstFlowReturn ret;
GstBuffer * buf = gst_buffer_new_and_alloc(audioSize);
gst_buffer_fill(buf, 0, audio, audioSize);
g_signal_emit_by_name (obj->appsrc, "push-buffer", buf, &ret);
if(ret != GST_FLOW_OK)
g_warning("Test Pushing audio resulted in flow state %d\n", ret);
}
void end_request(Decoder2Data* data) {
GstFlowReturn ret;
g_signal_emit_by_name (data->appsrc, "end-of-stream", &ret);
}
GstElement * createElement(const char* name, const char* factoryName) {
GstElement * ret = gst_element_factory_make(name, factoryName);
if (!ret)
g_printerr ("Test failed to create element of type '%s'\n", name);
return ret;
}
// Handler for the pad-added signal
static void _connect_decoder(GstElement *src, GstPad *pad, Decoder2Data *data) {
g_debug("Test _connect_decoder\n");
GstPad * sink_pad = gst_element_get_static_pad (data->audioconvert, "sink");
GstPadLinkReturn ret = gst_pad_link(pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret))
g_printerr("Test Link failed with GstPadLinkError %d.\n", ret);
else
g_debug("Test Link succeeded.\n");
g_debug("Test Connected audio decoder\n");
}
void gstLink(GstElement * src, GstElement * target) {
if (!gst_element_link (src, target))
g_printerr ("Test ----------------- elements could not be linked.\n");
}
static void _on_eos(GstElement *src, GstPad *pad, Decoder2Data *data) {
g_debug("Test _on_eos\n");
finish_request(data);
}
static void _on_error(GstElement *src, GstMessage *pad, Decoder2Data *data) {
g_debug("Test _on_error\n");
GError *err = NULL;
gchar *dbg_info = NULL;
gst_message_parse_error (pad, &err, &dbg_info);
if (err) {
size_t len = strlen(err->message);
g_printerr("ERROR: %s", err->message);
}
g_error_free(err);
g_free(dbg_info);
finish_request(data);
}
void create_and_link(Decoder2Data * data) {
data->appsrc = createElement("appsrc", "appsrc");
data->decodebin = createElement("decodebin", "decodebin");
data->audioconvert = createElement("audioconvert", "audioconvert");
data->audioresample = createElement("audioresample", "audioresample");
data->queue1 = createElement("capsfilter", "capsfilter");
data->filesink = createElement("filesink", "filesink");
g_object_set (data->appsrc, "is-live", TRUE, NULL);
const gchar *caps_str = "audio/x-raw, channels=1, rate=16000, format=S16LE";
GstCaps * caps = gst_caps_from_string(caps_str);
g_object_set (data->queue1, "caps", caps, NULL);
g_object_set (data->filesink, "location", "/dev/null", NULL);
g_debug("Test Created GStreamer elements");
data->pipeline = gst_pipeline_new("pipeline");
if (!data->pipeline) {
g_printerr ("Test pipe line could not be created.\n");
}
// Add all elements to the pipeline
gst_bin_add_many (GST_BIN (data->pipeline), data->appsrc, data->decodebin, data->audioconvert, data->audioresample, data->queue1, data->filesink, NULL);
gstLink(data->appsrc, data->decodebin);
g_signal_connect(data->decodebin, "pad-added", G_CALLBACK (_connect_decoder), data);
gstLink(data->audioconvert, data->audioresample);
gstLink(data->audioresample, data->queue1);
gstLink(data->queue1, data->filesink);
g_debug("Linked GStreamer elements\n");
// Create bus
data->bus = gst_element_get_bus(data->pipeline);
gst_bus_add_signal_watch(data->bus);
gst_bus_enable_sync_message_emission(data->bus);
gst_pipeline_use_clock((GstPipeline*)data->pipeline, (GstClock*)NULL);
g_signal_connect(data->bus, "message::eos", G_CALLBACK(_on_eos), data);
g_signal_connect(data->bus, "message::error", G_CALLBACK(_on_error), data);
}
void finish_request(Decoder2Data * data) {
g_printerr("Test finish_request %s\n", data->request_id);
if (data->outdir) {
gst_element_set_state(data->filesink, GST_STATE_NULL);
g_object_set(data->filesink, "location", "/dev/null", NULL);
gst_element_set_state(data->filesink, GST_STATE_PLAYING);
}
gst_element_set_state(data->pipeline, GST_STATE_NULL);
// Destroy the old pipeline.
gst_element_set_state(data->appsrc, GST_STATE_NULL);
gst_element_set_state(data->decodebin, GST_STATE_NULL);
gst_element_set_state(data->audioconvert, GST_STATE_NULL);
gst_element_set_state(data->audioresample, GST_STATE_NULL);
gst_element_set_state(data->queue1, GST_STATE_NULL);
gst_element_set_state(data->filesink, GST_STATE_NULL);
gst_object_unref(data->pipeline);
// Build a new pipeline
create_and_link(data);
gst_element_set_state(data->pipeline, GST_STATE_READY);
g_printerr("Rebuilt pipeline.");
g_printerr("Finished request complete.\n");
}
int create_pipeline(int argc, char *argv[], Decoder2Data * data) {
g_printerr("Test create_pipeline\n");
gst_init (&argc, &argv);
data->request_id = "<undefined>";
data->outdir = "./";
create_and_link(data);
g_debug("Setting pipeline to READY\n");
gst_element_set_state(data->pipeline, GST_STATE_READY);
g_debug("Set pipeline to READY\n");
return 0;
}
void closeGstreamer(Decoder2Data * data) {
gst_object_unref (data->bus);
gst_element_set_state (data->pipeline, GST_STATE_NULL);
gst_object_unref (data->pipeline);
}
#include <pthread.h>
void *g_loop_thread(void *ptr) {
g_debug("Test main loop thread started\n");
Decoder2Data * data = (Decoder2Data*) ptr;
data->main_loop = g_main_loop_new(NULL, FALSE);
g_debug("Test main loop created, executing g_main_loop_run\n");
g_main_loop_run(data->main_loop); // This is blocking
g_debug("Test main loop thread ENDED\n");
return NULL;
}
int main(int argc, char *argv[]) {
Decoder2Data data;
memset (&data, 0, sizeof (data));
create_pipeline(argc, argv, &data);
pthread_t thread;
int ret = pthread_create(&thread, NULL, g_loop_thread, &data);
if (ret != 0) {
g_printerr("Test Thread not started");
return -1;
}
usleep(250000); // Wait a bit to make sure the thread started
g_printerr("Test starting test\n");
data.request_id = "job1";
start_request("", &data);
FILE * file = fopen("./a.ogg", "rb");
int size = 86*1024/8/4;
char buffer[size];
int n;
while ((n = fread(buffer, 1, size, file)) > 0) {
g_printerr("read %d of data\n", n);
process_data(&data, buffer, n);
}
fclose(file);
g_printerr("finished reading data\n");
end_request(&data);
usleep(250000);
finish_request(&data);
// Switch to second request and do it again.
data.request_id = "job2";
start_request("", &data);
file = fopen("./a.ogg", "rb");
while ((n = fread(buffer, 1, size, file)) > 0) {
g_printerr("read %d of data\n", n);
process_data(&data, buffer, n);
}
fclose(file);
g_printerr("finished reading data again\n");
end_request(&data);
usleep(250000);
finish_request(&data);
g_printerr("waiting for the gstreamer thread to end...\n");
g_main_loop_quit (data.main_loop);
pthread_join(thread, NULL);
g_printerr("Closing\n");
closeGstreamer(&data);
g_printerr("Exit OK\n");
return 0;
}
I am facing issue in opening the raw h264 stream of 8MP resolution over tcp server from Android in Qt Application. To open the stream in ffplay, I give following command in terminal and it is able to play it
ffplay -f h264 -codec:v h264 -probesize 32M <tcp://ipaddress:port>
But when I try to open the stream in Qt Application, avformat_open_input() gives error Invalid data found while processing input.
Below is the code I am using in Qt Application:
av_register_all();
avcodec_register_all();
avformat_network_init();
AVFormatContext *refrenceFormatCtx = NULL;
SwsContext *img_convert_ctx;
AVIOContext *avio_ctx = NULL;
int video_stream_index = 0;
AVCodecContext* codec_ctx = NULL;
AVSampleFormat *fmt = NULL;
char errorsdef[100];
AVDictionary *options = NULL;
av_dict_set(&options, "video_size","3264x2448",0);
av_dict_set(&options,"pixel_format","yuv420p",0);
av_dict_set(&options, "f", "h264", 0);
av_dict_set(&options, "codec:v", "h264", 0);
av_dict_set(&options, "codec:a", "aac", 0);
av_dict_set(&options, "probesize", "32M", 0);
int err = avformat_open_input(&refrenceFormatCtx,"tcp://192.168.42.129:2226", NULL, &options);
av_strerror(err,errorsdef,100);
qDebug() << "OPening Stream error: "<< err << " "<< errorsdef;
if(err<0)
abort();
av_dict_free(&options);
Is the path to set the options in avformat_open_input is correct? Are parameters set by me are correct?
I got the answer for my above asked question. Code for the issue and getting rgb frames from raw H.264 Frame for 8MP resolution is as follows:
avcodec_register_all();
av_register_all();
avformat_network_init();
AVDictionary *options = NULL;
AVFormatContext *refrenceFormatCtx = NULL;
AVInputFormat *fmts = av_find_input_format("h264");
char errorsdef[100];
AVCodecContext* codec_ctx = NULL;
int video_stream_index = 0;
SwsContext *img_convert_ctx = NULL;
AVFrame* picture_yuv = NULL;
AVFrame* picture_rgb = NULL;
uint8_t* picture_buffer_rgb;
uint8_t *rgb_image_data;
int sizeofrgbpicture = 0;
int initialize_rgb_requirements=1;
picture_yuv = av_frame_alloc();
av_dict_set(&options, "flags", "bicubic", 0);
av_opt_set(refrenceFormatCtx,"f","h264", AV_OPT_SEARCH_CHILDREN);
av_opt_set(refrenceFormatCtx,"codec:v","h264",AV_OPT_SEARCH_CHILDREN);
av_opt_set(refrenceFormatCtx,"probesize","32M", AV_OPT_SEARCH_CHILDREN);
// Open video file
int err = avformat_open_input(&refrenceFormatCtx,"tcp://192.168.42.129:2226", fmts, &options);
if (!options) {
int dict_count = av_dict_count(options);
qDebug() << "dict_count " << dict_count;
}
av_strerror(err,errorsdef,100);
qDebug() << "OPening Stream error: "<< err << " "<< errorsdef;
if (refrenceFormatCtx!=NULL){
err = avformat_find_stream_info(refrenceFormatCtx, &options);
if( err< 0){
av_strerror(err,errorsdef,100);
qDebug() << "Not able to find stream: "<< err << " "<< errorsdef;
}
}else{
qDebug() << "referencecontext null";
exit(1);
}
//search video stream
for (int i = 0; i < (int)refrenceFormatCtx->nb_streams; i++) {
AVStream* s = refrenceFormatCtx->streams[i];
if (s->codec == NULL){
continue;
}
codec_ctx = (s->codec);
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO){
video_stream_index = i;
}
}
AVPacket packet;
av_init_packet(&packet);
//open output file
AVFormatContext* output_ctx = avformat_alloc_context();
AVStream* stream = NULL;
//start reading packets from stream and emit data pointer to slot
av_read_play(refrenceFormatCtx); //play RTSP
avcodec_copy_context(codec_ctx, refrenceFormatCtx->streams[video_stream_index]->codec);
if (avcodec_open2(codec_ctx, avcodec_find_decoder(AV_CODEC_ID_H264), NULL) < 0){
qDebug() << "avcodec_open2 null";
}
while (av_read_frame(refrenceFormatCtx, &packet) >= 0) {
if (packet.stream_index == video_stream_index) { //packet is video
if (stream == NULL) { //create stream in file
stream = avformat_new_stream(output_ctx, refrenceFormatCtx->streams[video_stream_index]->codec->codec);
avcodec_copy_context(stream->codec, refrenceFormatCtx->streams[video_stream_index]->codec);
stream->sample_aspect_ratio = refrenceFormatCtx->streams[video_stream_index]->codec->sample_aspect_ratio;
}
int check = 0;
packet.stream_index = stream->id;
int result = avcodec_decode_video2(codec_ctx, picture_yuv, &check, &packet);
av_free_packet(&packet);
av_packet_unref(&packet);
if(result <= 0 || check == 0){
continue;
}
if(initialize_rgb_requirements)
{
sizeofrgbpicture = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);
picture_rgb = av_frame_alloc();
picture_buffer_rgb = (uint8_t*) (av_malloc(sizeofrgbpicture));
avpicture_fill((AVPicture *) picture_rgb, picture_buffer_rgb, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
initialize_rgb_requirements=0;
}
int height = 0;
if(picture_yuv->data != NULL)
{
height = sws_scale(img_convert_ctx, ((AVPicture*)picture_yuv)->data, ((AVPicture*)picture_yuv)->linesize, 0, codec_ctx->height, ((AVPicture*)picture_rgb)->data,((AVPicture*)picture_rgb)->linesize);
}
rgb_image_data = (uint8_t *)malloc(sizeofrgbpicture * sizeof(uint8_t));
int ret = avpicture_layout((AVPicture *)picture_rgb, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, rgb_image_data, sizeofrgbpicture);
emit imageQueued(rgb_image_data, codec_ctx->width,codec_ctx->height);
}
msleep(1);
}
av_freep(picture_buffer_rgb);
av_frame_free(&picture_rgb);
avio_close(output_ctx->pb);
avformat_free_context(output_ctx);
avformat_close_input(&refrenceFormatCtx);
I came to know that for raw H.264 stream we have to tell ffmpeg that the format is h264. For that I have used AVInputFormat, to set other options like video codec and probesize, I have used av_op_set(). To set the default flags in ffmpeg, I have used av_dict_set(). I have emitted the data pointer to my required slot. If any one wants to create a file from it, then it can generate .ppm file by writing this pointer into file.
I´m trying to play a mp3 song after another from a list, the problem is that "wait" command "freeze" all the program until all the songs finish, and what I want is the other functions, as "pause" or "stop", to still work while the song is playing. I don´t have any trouble when I play one song individually.
I read some documentation and it looks like "status" command is the solution, but I don´t understand how to use it.
Here is the code of "case IDC_Play:"
if ((SendDlgItemMessage(hDlg, IDC_CHECK1, BM_GETSTATE, NULL, NULL)) == BST_CHECKED)
{//here goes the code for play only one song}
else {
int cuenta = SendDlgItemMessage(hDlg, IDC_LIST1, LB_GETCOUNT, NULL, NULL);
int indice = 0;
while (indice != cuenta) {
char auxi[10] = "";
UINT index = SendDlgItemMessage(hDlg, IDC_LIST1, LB_GETCURSEL, 0);
SendDlgItemMessage(hDlg, IDC_LIST1, LB_GETTEXT, index, (LPARAM)auxi);
if (strcmp(auxi, "") == 0) {
MessageBox(NULL, "No se selecciono cancion", "ERROR", MB_ICONERROR);
}
else {
char Cnum[10];
aux = inicio;
aux = aux->sig;
do {
_itoa_s(aux->folio, Cnum, 10);
if (strcmp(auxi, Cnum) == 0) {
strcpy_s(szFileName, aux->mptres);
bmp1 = (HBITMAP)SendDlgItemMessage(hDlg, IDC_Imagen1, STM_GETIMAGE, IMAGE_BITMAP, 0);
bmp2 = (HBITMAP)LoadImage(NULL, aux->imagen, IMAGE_BITMAP, 140, 120, LR_LOADFROMFILE);
SendDlgItemMessage(hDlg, IDC_Imagen1, STM_SETIMAGE, IMAGE_BITMAP, (LPARAM)bmp2);
}
else {
aux = aux->sig;
}
} while (strcmp(auxi, Cnum) == -1 || strcmp(auxi, Cnum) == 1);
ShowWindow(GetDlgItem(hDlg, IDC_Play1), SW_HIDE);
ShowWindow(GetDlgItem(hDlg, IDC_Pause1), SW_SHOW);
char comillas[MAX_PATH] = "\"";
char comillas2[MAX_PATH] = "\"";
strcat_s(comillas, szFileName);
strcat_s(comillas, comillas2);
char musica[MAX_PATH] = "open ";
strcat_s(musica, comillas);
strcat_s(musica, " type mpegvideo");
mciSendString(musica, NULL, 0, 0);
char musica1[MAX_PATH] = "play ";
char esperar[MAX_PATH] = " wait";
strcat_s(musica1, comillas);
strcat_s(musica1, esperar);
mciSendString(musica1, NULL, 0, 0);
char parar[MAX_PATH] = "stop ";
strcat_s(parar, comillas);
mciSendString(parar, NULL, 0, 0);
char cerrar[MAX_PATH] = "close ";
strcat_s(cerrar, comillas);
mciSendString(cerrar, NULL, 0, 0);
index++;
SendDlgItemMessage(hDlg, IDC_LIST1, LB_SETCURSEL, index, NULL);
SendDlgItemMessage(hDlg, IDC_LIST2, LB_SETCURSEL, index, NULL);
SendDlgItemMessage(hDlg, IDC_LIST3, LB_SETCURSEL, index, NULL);
SendDlgItemMessage(hDlg, IDC_LIST4, LB_SETCURSEL, index, NULL);
SendDlgItemMessage(hDlg, IDC_LIST5, LB_SETCURSEL, index, NULL);
indice = index;
} //else
} //while
}//else
Maybe you should use MM_MCINOTIFY flag and callback to avoid the blocking call that freezes the whole application?
Please see an example of using the callback and flag there at CodeProject: http://www.codeproject.com/Articles/17279/Using-mciSendString-to-play-media-files
I want to decode an audio file and store the PCM/int values into an array. For that I use gstreamer and the giostreamsink, which gives me a GMemoryOutputStream. So far so good, but how can I now access or loop through the GMemoryOutputStream?
What I did to get the gpointer:
gpointer out_data = g_memory_output_stream_get_data(G_MEMORY_OUTPUT_STREAM(stream));
but what can I do now with that gpointer? How can I access the stream data?
The full code that I have so far:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
static void on_pad_added(GstElement *decodebin,
GstPad *pad,
gpointer data) {
GstElement *convert = (GstElement *) data;
GstCaps *caps;
GstStructure *str;
GstPad *audiopad;
audiopad = gst_element_get_static_pad(convert, "sink");
if (GST_PAD_IS_LINKED(audiopad)) {
g_object_unref(audiopad);
return;
}
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
printf("here %s\n",gst_structure_get_name(str));
if (!g_strrstr(gst_structure_get_name(str), "audio")) {
gst_caps_unref(caps);
gst_object_unref(audiopad);
return;
}
gst_caps_unref(caps);
gst_pad_link(pad, audiopad);
g_object_unref(audiopad);
}
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *decode, *sink, *convert;
int rate = 44100;
int channels = 1;
int depth = 16;
bool output_signed = true;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
GMemoryOutputStream *stream;
gpointer out_data;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
// sink
stream = G_MEMORY_OUTPUT_STREAM(g_memory_output_stream_new(NULL, 0, (GReallocFunc)g_realloc, (GDestroyNotify)g_free));
sink = gst_element_factory_make ("giostreamsink", "sink");
g_object_set(G_OBJECT(sink), "stream", stream, NULL);
// source
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", "/home/sam/Desktop/audio/audio.wav", NULL);
// convert
convert = gst_element_factory_make("audioconvert", "convert");
// decode
decode = gst_element_factory_make("decodebin", "decoder");
// link decode to convert
g_signal_connect(decode, "pad-added", G_CALLBACK(on_pad_added), convert);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, decode, convert, sink, NULL);
// link source to decode
gst_element_link(source, decode);
// caps
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw-int",
"rate", G_TYPE_INT, rate,
"channels", G_TYPE_INT, channels,
"width", G_TYPE_INT, depth,
"depth", G_TYPE_INT, depth,
"signed", G_TYPE_BOOLEAN, output_signed,
NULL);
// link convert to sink
gst_element_link_filtered(convert, sink, caps);
gst_caps_unref(caps);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
// get data
g_print("get data\n");
out_data = g_memory_output_stream_get_data(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long size = g_memory_output_stream_get_size(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long sizeData = g_memory_output_stream_get_data_size(G_MEMORY_OUTPUT_STREAM(stream));
std::cout << "stream size: " << size << std::endl;
std::cout << "stream data size: " << sizeData << std::endl;
for (int i = 0; i < 5; ++i) {
// std::cout << out_data[i] << std::endl; // not working
}
return 0;
}
I solved the problem, I had to cast the gpointer to gint16*:
std::vector<int16_t> data;
for (unsigned long i = 0; i < sizeData/2; ++i) {
data.push_back(((gint16*)out_data)[i]);
}
The casting has to be changed, depending on the depth you use e.g. depth 8 and unsigned guint8* and change also the for loop limit.
The full code for those interested:
#include <string>
#include <stdio.h>
#include <gst/gst.h>
#include <gio/gio.h>
#include <boost/thread.hpp>
static void on_pad_added(GstElement *decodebin,
GstPad *pad,
gpointer data) {
GstElement *convert = (GstElement *) data;
GstCaps *caps;
GstStructure *str;
GstPad *audiopad;
audiopad = gst_element_get_static_pad(convert, "sink");
if (GST_PAD_IS_LINKED(audiopad)) {
g_object_unref(audiopad);
return;
}
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
printf("here %s\n",gst_structure_get_name(str));
if (!g_strrstr(gst_structure_get_name(str), "audio")) {
gst_caps_unref(caps);
gst_object_unref(audiopad);
return;
}
gst_caps_unref(caps);
gst_pad_link(pad, audiopad);
g_object_unref(audiopad);
}
static gboolean bus_call(GstBus *bus,
GstMessage *msg,
gpointer data) {
GMainLoop *loop = (GMainLoop*)data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return true;
}
int main (int argc, char **argv) {
gst_init(&argc, &argv);
GstElement *pipeline, *source, *decode, *sink, *convert;
int rate = 44100;
int channels = 1;
int depth = 16;
bool output_signed = true;
GMainLoop *loop;
GstBus *bus;
guint bus_watch_id;
GMemoryOutputStream *stream;
gpointer out_data;
// loop
loop = g_main_loop_new(NULL, false);
// pipeline
pipeline = gst_pipeline_new("test_pipeline");
// sink
stream = G_MEMORY_OUTPUT_STREAM(g_memory_output_stream_new(NULL, 0, (GReallocFunc)g_realloc, (GDestroyNotify)g_free));
sink = gst_element_factory_make ("giostreamsink", "sink");
g_object_set(G_OBJECT(sink), "stream", stream, NULL);
// source
source = gst_element_factory_make("filesrc", "source");
g_object_set(G_OBJECT(source), "location", "/home/sam/Desktop/audio/audio.wav", NULL);
// convert
convert = gst_element_factory_make("audioconvert", "convert");
// decode
decode = gst_element_factory_make("decodebin", "decoder");
// link decode to convert
g_signal_connect(decode, "pad-added", G_CALLBACK(on_pad_added), convert);
// bus
bus = gst_pipeline_get_bus(GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
// add elements into pipeline
gst_bin_add_many(GST_BIN(pipeline), source, decode, convert, sink, NULL);
// link source to decode
gst_element_link(source, decode);
// caps
GstCaps *caps;
caps = gst_caps_new_simple("audio/x-raw-int",
"rate", G_TYPE_INT, rate,
"channels", G_TYPE_INT, channels,
"width", G_TYPE_INT, depth,
"depth", G_TYPE_INT, depth,
"signed", G_TYPE_BOOLEAN, output_signed,
NULL);
// link convert to sink
gst_element_link_filtered(convert, sink, caps);
gst_caps_unref(caps);
// start playing
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
// iterate
g_print("Running...\n");
g_main_loop_run(loop);
// out of the main loop, clean up nicely
g_print("Returned, stopping playback\n");
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline\n");
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref(loop);
// get data
g_print("get data\n");
out_data = g_memory_output_stream_get_data(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long size = g_memory_output_stream_get_size(G_MEMORY_OUTPUT_STREAM(stream));
unsigned long sizeData = g_memory_output_stream_get_data_size(G_MEMORY_OUTPUT_STREAM(stream));
std::cout << "stream size: " << size << std::endl;
std::cout << "stream data size: " << sizeData << std::endl;
// access data and store in vector
std::vector<int16_t> data;
for (unsigned long i = 0; i < sizeData/2; ++i) {
data.push_back(((gint16*)out_data)[i]);
}
return 0;
}
I am having one heck of a time reaching my end goal with this one and really hope someone can help me out. I am very new to gstreamer and have been working on this issue for several days. The end goal of everything is to have a piece of software that will take video from my Point Grey Blackfly USB 3 camera and compress it then send it out over the UDPSink through gstreamer. As of now I am attempting to simply show that this is possible by getting the video from the camera to display via gstreamer. In other words, I am trying to do something like this
Appsrc(camera)->ffmpegcolorspace->xvimagesink
just to get some sort of result...
My setup is as follows:
Nvidia Jetson TK1,
Ubunutu 14.04,
Gstreamer0.10,
OpenCV(installed but not currently used, have been considering throwing that in too)
Below is the code I currently have, it does not work but it will compile and run.
#include <stdio.h>
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/video/video.h>
#include "FlyCapture2.h"
#include <iostream>
#include <sstream>
using namespace FlyCapture2;
using namespace std;
typedef struct {
GstPipeline *pipeline;
GstAppSrc *src;
GstElement *sink;
GstElement *encoder;
GstElement *decoder;
GstElement *ffmpeg;
GstElement *xvimagesink;
GMainLoop *loop;
guint sourceid;
FILE *file;
}gst_app_t;
static gst_app_t gst_app;
Camera camera;
#define BUFF_SIZE (1024)
void getImagePtr(guint8 * &ptr, gint &size);
static gboolean read_data(gst_app_t *app)
{
GstBuffer *buffer;
guint8 *ptr;
gint size;
GstFlowReturn ret;
getImagePtr(ptr, size);
//cout << size << endl;
// size = fread(ptr, 1, BUFF_SIZE, app->file);
if(size == 0){
ret = gst_app_src_end_of_stream(app->src);
g_debug("eos returned %d at %d\n", ret, __LINE__);
return FALSE;
}
GstCaps *caps = NULL;
std::stringstream video_caps_text;
video_caps_text << "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255,width=(int)1288,height=(int)964,framerate=(fraction)0/1";
caps = gst_caps_from_string( video_caps_text.str().c_str() );
g_object_set( G_OBJECT(app->src), "caps", caps, NULL);
buffer = gst_buffer_new();
GST_BUFFER_MALLOCDATA(buffer) = ptr;
GST_BUFFER_SIZE(buffer) = size;
GST_BUFFER_DATA(buffer) = GST_BUFFER_MALLOCDATA(buffer);
{
GstCaps *caps_source = NULL;
std::stringstream video_caps_text;
video_caps_text << "video/x-raw-rgb,bpp=(int)24,depth=(int)24,endianness=(int)4321,red_mask=(int)16711680,green_mask=(int)65280,blue_mask=(int)255,width=(int)1288,height=(int)964,framerate=(fraction)0/1";
caps_source = gst_caps_from_string( video_caps_text.str().c_str() );
cout<<video_caps_text.str()<<endl;
if( !GST_IS_CAPS( caps_source) ){
cout<<"ERROR MAKING CAPS"<<endl;
exit(1);
}
gst_app_src_set_caps( GST_APP_SRC( app->src ), caps_source);
gst_buffer_set_caps( buffer, caps_source);
gst_caps_unref( caps_source );
}
ret = gst_app_src_push_buffer(app->src, buffer);
if(ret != GST_FLOW_OK){
g_debug("push buffer returned %d for %d bytes \n", ret, size);
return FALSE;
}
else if(ret == GST_FLOW_OK){
//cout<<"FLOW OK"<<endl;
}
if(!(size > BUFF_SIZE)){
cout<<"ISSUE FOUND"<<endl;
ret = gst_app_src_end_of_stream(app->src);
g_debug("eos returned %d at %d\n", ret, __LINE__);
return FALSE;
}
return TRUE;
}
static void start_feed (GstElement * pipeline, guint size, gst_app_t *app)
{
if (app->sourceid == 0) {
GST_DEBUG ("start feeding");
app->sourceid = g_idle_add ((GSourceFunc) read_data, app);
}
}
static void stop_feed (GstElement * pipeline, gst_app_t *app)
{
if (app->sourceid != 0) {
GST_DEBUG ("stop feeding");
g_source_remove (app->sourceid);
app->sourceid = 0;
}
}
static void on_pad_added(GstElement *element, GstPad *pad)
{
cout<<"PAD ADDED"<<endl;
GstCaps *caps;
GstStructure *str;
gchar *name;
GstPad *ffmpegsink;
GstPadLinkReturn ret;
g_debug("pad added");
caps = gst_pad_get_caps(pad);
str = gst_caps_get_structure(caps, 0);
cout<<"CAPS: "<<str<<endl;
g_assert(str);
name = (gchar*)gst_structure_get_name(str);
cout<<"NAME IS: "<<name<<endl;
g_debug("pad name %s", name);
if(g_strrstr(name, "video")){
ffmpegsink = gst_element_get_pad(gst_app.ffmpeg, "sink");
g_assert(ffmpegsink);
ret = gst_pad_link(pad, ffmpegsink);
g_debug("pad_link returned %d\n", ret);
gst_object_unref(ffmpegsink);
}
gst_caps_unref(caps);
}
static gboolean bus_callback(GstBus *bus, GstMessage *message, gpointer *ptr)
{
gst_app_t *app = (gst_app_t*)ptr;
switch(GST_MESSAGE_TYPE(message)){
case GST_MESSAGE_ERROR:{
gchar *debug;
GError *err;
gst_message_parse_error(message, &err, &debug);
g_print("Error %s\n", err->message);
g_error_free(err);
g_free(debug);
g_main_loop_quit(app->loop);
}
break;
case GST_MESSAGE_WARNING:{
gchar *debug;
GError *err;
const gchar *name;
gst_message_parse_warning(message, &err, &debug);
g_print("Warning %s\nDebug %s\n", err->message, debug);
name = GST_MESSAGE_SRC_NAME(message);
g_print("Name of src %s\n", name ? name : "nil");
g_error_free(err);
g_free(debug);
}
break;
case GST_MESSAGE_EOS:
g_print("End of stream\n");
g_main_loop_quit(app->loop);
break;
case GST_MESSAGE_STATE_CHANGED:
break;
default:
g_print("got message %s\n", \
gst_message_type_get_name (GST_MESSAGE_TYPE (message)));
break;
}
return TRUE;
}
void PrintError( Error error )
{
error.PrintErrorTrace();
}
void connectCamera(){
cout<<"STARTING CONNECTION FUNCTION"<<endl;
Error error;
BusManager busMgr;
unsigned int numCameras;
PGRGuid guid;
error = busMgr.GetNumOfCameras(&numCameras);
if (error != PGRERROR_OK)
{
PrintError (error);
}
cout << "Number of cameras detected: " << numCameras << endl;
for (unsigned int i=0; i < numCameras; i++)
{
error = busMgr.GetCameraFromIndex(i, &guid);
if (error != PGRERROR_OK)
{
PrintError( error );
}
}
// Connect the camera
error = camera.Connect( &guid );
if ( error != PGRERROR_OK )
{
std::cout << "Failed to connect to camera" << std::endl;
return;
}
else
std::cout << "CONNECTED!" << std::endl;
}
void getImagePtr( guint8 * &ptr, gint &size){
// Get the image
Image rawImage;
Error error = camera.RetrieveBuffer( &rawImage );
if ( error != PGRERROR_OK )
{
std::cout << "capture error" << std::endl;
}
// convert to rgb
Image bgrImage;
rawImage.Convert( FlyCapture2::PIXEL_FORMAT_BGR, &bgrImage );
// cout << rawImage.GetDataSize() << endl;
ptr = (guint8*)g_malloc(bgrImage.GetDataSize());
g_assert(ptr);
memcpy( ptr,bgrImage.GetData(), bgrImage.GetDataSize() );
size = bgrImage.GetDataSize();
// ptr = bgrImage.GetData();
}
int main(int argc, char *argv[])
{
gst_app_t *app = &gst_app;
GstBus *bus;
GstStateChangeReturn state_ret;
if(argc != 2){
printf("File name not specified\n");
return 1;
}
connectCamera();
camera.StartCapture();
app->file = fopen(argv[1], "r");
g_assert(app->file);
gst_init(NULL, NULL);
app->pipeline = (GstPipeline*)gst_pipeline_new("mypipeline");
bus = gst_pipeline_get_bus(app->pipeline);
gst_bus_add_watch(bus, (GstBusFunc)bus_callback, app);
gst_object_unref(bus);
app->src = (GstAppSrc*)gst_element_factory_make("appsrc", "mysrc");
//app->encoder = gst_element_factory_make("nv_omx_h264enc", "nvidEnc");
//app->decoder = gst_element_factory_make("decodebin", "mydecoder");
app->ffmpeg = gst_element_factory_make("ffmpegcolorspace", "myffmpeg");
app->xvimagesink = gst_element_factory_make("xvimagesink", "myvsink");
g_assert(app->src);
//g_assert(app->encoder);
//g_assert(app->decoder);
g_assert(app->ffmpeg);
g_assert(app->xvimagesink);
g_signal_connect(app->src, "need-data", G_CALLBACK(start_feed), app);
g_signal_connect(app->src, "enough-data", G_CALLBACK(stop_feed), app);
//g_signal_connect(app->decoder, "pad-added",
// G_CALLBACK(on_pad_added), app->decoder);
//gst_bin_add_many(GST_BIN(app->pipeline), (GstElement*)app->src, app->encoder,
//app->decoder, app->ffmpeg, app->xvimagesink, NULL);
gst_bin_add_many(GST_BIN(app->pipeline), (GstElement*)app->src, app->ffmpeg, app->xvimagesink, NULL);
//if(!gst_element_link((GstElement*)app->src, app->encoder)){
//g_warning("failed to link src anbd decoder");
//}
//if(!gst_element_link(app->encoder, app->decoder)){
// g_warning("failed to link encoder and decoder");
//}
if(!gst_element_link(app->ffmpeg, app->xvimagesink)){
g_warning("failed to link ffmpeg and xvsink");
}
state_ret = gst_element_set_state((GstElement*)app->pipeline, GST_STATE_PLAYING);
g_warning("set state returned %d\n", state_ret);
app->loop = g_main_loop_new(NULL, FALSE);
//GstCaps *appsrcCaps = NULL;
//appsrcCaps = gst_video_format_new_caps(GST_VIDEO_FORMAT_BGR, 1288, 964, 0, 1, 4, 3);
//gst_app_src_set_caps(GST_APP_SRC(app->src), appsrcCaps);
g_main_loop_run(app->loop);
camera.StopCapture();
camera.Disconnect();
state_ret = gst_element_set_state((GstElement*)app->pipeline, GST_STATE_NULL);
g_warning("set state null returned %d\n", state_ret);
return 0;
}
I keep getting an Internal data flow error on every run and I am not sure from what. I think one of my issues may be the "caps" but like I said, I am very new to this.
Any help will be greatly appreciated. Let me know if there is anything else I can add to be more clear. Thank you so much!
For the first look (I haven't gone into details) seems you link ffmpeg with xvimagesink only, try to link your src element also:
gst_element_link(app->src, app->ffmpeg);
You can read about linking elements more here: http://gstreamer.freedesktop.org/data/doc/gstreamer/head/manual/html/section-elements-link.html