Related
So I have a program that reads an opengl window and encodes the read data as a video. Now through a series of experimentation I have learned that the bit format of my glfw window is 8:8:8 as returned by glfwGetVideoMode(monitor). So I use this function to read the window:
glReadPixels(0, 0,gl_width, gl_height,GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*) Buffer);
and I simply encode it in the AV_PIX_FMT_YUV420P format.
Under normal circumstances this method works just fine. However, when I actually run the program, the output I get, as opposed to what I can see in the glfw window, is really low resolution and a bit pixelated.
Here is what my GLFW window looks like:
Now this is what I want it to look like. It looks just fine on the opengl window, and I encode it directly without altering Buffer.
And here is what the encoded result, test.mp4 looks like when I run it using mplayer or similar software:
It's a lot more blurry and pixelated compare to the GLFW window. With some experimentation and following an answer to another question I asked, I us avcodec_find_best_pix_fmt_of_list((*codec)->pix_fmts, AV_PIX_FMT_RGBA, 1, &ret) and it returned 13. Which led me to believe using AV_PIX_FMT_YUVJ422P is the best option for this convertion to not have a blurry/pixelated result. However, no matter which function I pass, every single format gives off an error except AV_PIX_FMT_YUV420P. The error is:
[mpeg4 # 0x558e74f47900] Specified pixel format yuvj422p is invalid or not supported
I have no idea why this is happening, as the format is bound to a define and it is changed throughout the entire program when I change the define.
Here is my encoder so far (I have trimmed some parts):
video_encoder.cpp:
int video_encoder::write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
AVStream *st, AVFrame *frame, AVPacket *pkt)
{
int ret;
// Conditional jump or move depends on uninitialised value
// Use of uninitialised value of size 8
// send the frame to the encoder
// Error is about c.
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
std::cout << "Error sending a frame to the encoder: " << ret << std::endl;
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
else if (ret < 0) {
std::cout << "Error encoding a frame: " << ret << std::endl;
exit(1);
}
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
//log_packet(fmt_ctx, pkt);
//std::cout << "Packet: " << pkt << std::endl;
ret = av_interleaved_write_frame(fmt_ctx, pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) {
std::cout << "Error while writing output packet: " << ret << std::endl;
exit(1);
}
}
return ret == AVERROR_EOF ? 1 : 0;
}
/* Add an output stream. */
void video_encoder::add_stream(OutputStream *ost, AVFormatContext *oc,
const AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
ost->tmp_pkt = av_packet_alloc();
if (!ost->tmp_pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
...
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 10000;
/* Resolution must be a multiple of two. */
c->width = width;
c->height = height;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE }; // *frame_rate
c->time_base = ost->st->time_base;
c->gop_size = 7; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
//if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
// c->max_b_frames = 2;
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
if ((*codec)->pix_fmts){
//c->pix_fmt = (*codec)->pix_fmts[0];
std::cout << "NEW FORMAT : " << c->pix_fmt << std::endl;
}
int ret;
avcodec_find_best_pix_fmt_of_list((*codec)->pix_fmts, AV_PIX_FMT_RGBA, 1, &ret);
std::cout << "Desired format is: " << ret << std::endl;
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* video output */
AVFrame* video_encoder::alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
void video_encoder::open_video(AVFormatContext *oc, const AVCodec *codec,
OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", ret);
exit(1);
}
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
void video_encoder::set_frame_yuv_from_rgb(AVFrame *frame, struct SwsContext *sws_context) {
const int in_linesize[1] = { 4 * width };
//uint8_t* dest[4] = { rgb_data, NULL, NULL, NULL };
sws_context = sws_getContext(
width, height, AV_PIX_FMT_RGBA,
width, height, STREAM_PIX_FMT,
SCALE_FLAGS, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb_data, in_linesize, 0,
height, frame->data, frame->linesize);
}
AVFrame* video_encoder::get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
(float) STREAM_DURATION / 1000, (AVRational){ 1, 1 }) > 0)
return NULL;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->frame) < 0)
exit(1);
set_frame_yuv_from_rgb(ost->frame, ost->sws_ctx);
ost->frame->pts = ost->next_pts++;
return ost->frame;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
int video_encoder::write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
}
void video_encoder::close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
av_packet_free(&ost->tmp_pkt);
//sws_freeContext(ost->sws_ctx);
//swr_free(&ost->swr_ctx);
}
/**************************************************************/
/* media file output */
void video_encoder::set_encode_framebuffer(uint8_t* data, bool audio_only)
{
rgb_data = data;
}
video_encoder::~video_encoder()
{
av_write_trailer(enc_inf.oc);
/* Close each codec. */
if (enc_inf.have_video)
close_stream(enc_inf.oc, &enc_inf.video_st);
if (!(enc_inf.fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&enc_inf.oc->pb);
/* free the stream */
avformat_free_context(enc_inf.oc);
std::cout << "Done, closing." << std::endl;
}
bool video_encoder::encode_one_frame()
{
if (enc_inf.encode_video || enc_inf.encode_audio) {
/* select the stream to encode */
if (enc_inf.encode_video &&
(!enc_inf.encode_audio || av_compare_ts(enc_inf.video_st.next_pts, enc_inf.video_st.enc->time_base,
enc_inf.audio_st.next_pts, enc_inf.audio_st.enc->time_base) <= 0)) {
enc_inf.encode_video = !write_video_frame(enc_inf.oc, &enc_inf.video_st);
return true;
}
}
return false;
}
video_encoder::video_encoder(int w, int h, float fps, unsigned int duration)
:width(w), height(h), STREAM_FRAME_RATE(fps), STREAM_DURATION(duration)
{
//std::filesystem::create_directory("media");
//std::string as_str = "./output/" + std::string(getenv ("OUTPUT_UUID")) + ".mp4";
std::string as_str = "./output/video.mp4";
char* filename = const_cast<char*>(as_str.c_str());
enc_inf.video_st, enc_inf.audio_st = (struct OutputStream) { 0 };
enc_inf.video_st.next_pts = 1;
enc_inf.audio_st.next_pts = 1;
enc_inf.encode_audio, enc_inf.encode_video = 0;
int ret;
int i;
//rgb_data = (uint8_t*)malloc( 48 * sizeof(uint8_t) );
/* allocate the output media context */
avformat_alloc_output_context2(&enc_inf.oc, NULL, NULL, filename);
if (!enc_inf.oc) {
//VI_ERROR("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&enc_inf.oc, NULL, "mpeg", filename);
}
if (!enc_inf.oc)
std::cout << "FAILED" << std::endl;
//return 1;
enc_inf.fmt = enc_inf.oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (enc_inf.fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&enc_inf.video_st, enc_inf.oc, &video_codec, enc_inf.fmt->video_codec);
enc_inf.have_video = 1;
enc_inf.encode_video = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (enc_inf.have_video)
open_video(enc_inf.oc, video_codec, &enc_inf.video_st, opt);
/* open the output file, if needed */
if (!(enc_inf.fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&enc_inf.oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
//VI_ERROR("Could not open '%s': %s\n", filename, ret);
//return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(enc_inf.oc, &opt);
if (ret < 0) {
VI_ERROR("Error occurred when opening output file:");
//return 1;
}
//return 0;
}
video_encoder.h:
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_SPLINE
/* The output bit rate in bit/s */
#define OUTPUT_BIT_RATE 96000
/* The number of output channels */
#define OUTPUT_CHANNELS 2
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
AVPacket *tmp_pkt;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
typedef struct {
OutputStream video_st, audio_st;
const AVOutputFormat *fmt;
AVFormatContext *oc;
int have_video, have_audio, encode_video, encode_audio;
std::string name;
} encode_info;
Again, changing STREAM_PIX_FMT anything other than AV_PIX_FMT_YUV420P causes the program to give the error.
What is the cause of this and how can I fix this? Also am I on the right track for fixing the pixelation problem? I'm using ubuntu.
I'm trying to direct the output from opengl into a mp4 file.
Currently, I'm getting the error "Invalid input" from the call avcodec_send_frame(c, frame). Why am I getting this error?
class VideoCapture2
{
public:
VideoCapture2(const char *filename, unsigned int width, unsigned int height, int framerate, unsigned int bitrate){
avformat_alloc_output_context2(&avFormatContext, NULL, NULL, filename);
if (!avFormatContext) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&avFormatContext, NULL, "mpeg", filename);
}
if (!avFormatContext)
exit(1);
avOutputFormat = avFormatContext->oformat;
// Video Stream
/* find the encoder */
AVCodecID codec_id = AV_CODEC_ID_H264;
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
avStream = avformat_new_stream(avFormatContext, NULL);
if (!avStream) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
avStream->id = avFormatContext->nb_streams-1;
codec_ctx = avcodec_alloc_context3(codec);
if (!codec_ctx) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
codec_ctx->codec_id = codec_id;
/* put sample parameters */
codec_ctx->bit_rate = bitrate;
/* resolution must be a multiple of two */
if(width % 2 != 0)
throw std::invalid_argument( "The width must be devisible by two" );
if(height % 2 != 0)
throw std::invalid_argument( "The height must be devisible by two" );
codec_ctx->width = width;
codec_ctx->height = height;
/* frames per second */
codec_ctx->framerate = (AVRational){framerate, 1};
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
avStream->time_base = (AVRational){ 1, framerate };
codec_ctx->time_base = avStream->time_base;
codec_ctx->gop_size = 10; /* emit one intra frame every twelve frames at most */
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
/* Some formats want stream headers to be separate. */
if (avOutputFormat->flags & AVFMT_GLOBALHEADER)
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
frame = alloc_frame(codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height);
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(avStream->codecpar, codec_ctx);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
// Color format Conversion
sws = sws_getContext( codec_ctx->width
, codec_ctx->height
, AV_PIX_FMT_RGB32
, codec_ctx->width
, codec_ctx->height
, AV_PIX_FMT_YUV420P
, SWS_FAST_BILINEAR // Change this???
, 0, 0, 0);
// Check output file
av_dump_format(avFormatContext, 0, filename, 1);
/* open the output file, if needed */
if (!(avOutputFormat->flags & AVFMT_NOFILE)) {
ret = avio_open(&avFormatContext->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
exit(1);
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(avFormatContext, &avDict);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
exit(1);
}
}
void addFrame(){
fflush(stdout);
/* Make sure the frame data is writable.
On the first round, the frame is fresh from av_frame_get_buffer()
and therefore we know it is writable.
But on the next rounds, encode() will have called
avcodec_send_frame(), and the codec may have kept a reference to
the frame in its internal structures, that makes the frame
unwritable.
av_frame_make_writable() checks that and allocates a new buffer
for the frame only if necessary.
*/
ret = av_frame_make_writable(frame);
if (ret < 0){
fprintf(stderr, "Could not make the frame writable\n");
exit(1); // Wait... you should throw error instead!
}
size_t nvals = 4 * codec_ctx->width * codec_ctx->height; //GL_BGRA
pixels = (GLubyte *) realloc(pixels, nvals * sizeof(GLubyte)); // I don't think I need to do this every time since the size is constant
glReadPixels(0, 0, codec_ctx->width, codec_ctx->height, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
// CONVERT TO YUV AND ENCODE
ret = av_image_alloc(frame->data, frame->linesize, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, 32);
if (ret < 0){
fprintf(stderr, "Could not allocate the image\n");
exit(1); // Wait... you should throw error instead!
}
// Compensate for OpenGL y-axis pointing upwards and ffmpeg y-axis pointing downwards
uint8_t *in_data[1] = {(uint8_t *) pixels + (codec_ctx->height-1)*codec_ctx->width*4}; // address of the last line
int in_linesize[1] = {- codec_ctx->width * 4}; // negative stride
sws_scale(sws, in_data, in_linesize, 0, codec_ctx->height, frame->data, frame->linesize);
frame->pts = frame_order;
frame_order++;
/* encode the image */
write_frame(avFormatContext, codec_ctx, avStream, frame, pkt);
}
void close()
{
write_frame(avFormatContext, codec_ctx, avStream, NULL, pkt);
av_write_trailer(avFormatContext);
avcodec_free_context(&codec_ctx);
av_frame_free(&frame);
sws_freeContext(sws);
if (!(avFormatContext->oformat->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&avFormatContext->pb);
avformat_free_context(avFormatContext);
}
private:
AVOutputFormat *avOutputFormat;
AVFormatContext* avFormatContext = NULL;
AVStream* avStream;
AVDictionary *avDict = NULL; // "create" an empty dictionary
GLubyte *pixels = NULL;
struct SwsContext *sws;
const AVCodec *codec;
AVCodecContext *codec_ctx= NULL;
// Should be ref counted??? https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html
AVFrame *frame;
AVPacket *pkt;
//
int frame_order, ret;
int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
AVStream *st, AVFrame *frame, AVPacket *pkt)
{
int ret;
// ERROR OCCURS HERE
ret = avcodec_send_frame(c, frame);
// ERROR OCCURS HERE
if (ret < 0) {
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
av_err2str(ret));
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
else if (ret < 0) {
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
exit(1);
}
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
ret = av_interleaved_write_frame(fmt_ctx, pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) {
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
exit(1);
}
}
return ret == AVERROR_EOF ? 1 : 0;
}
void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *frame;
int ret;
frame = av_frame_alloc();
if (!frame)
return NULL;
frame->format = pix_fmt;
frame->width = width;
frame->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return frame;
}
};
The issue turned out to be that I had missed calling avcodec_open2(..) now it works :)
/* open the codec */
AVDictionary *opt = NULL;
av_dict_copy(&opt, avDict, 0);
ret = avcodec_open2(codec_ctx, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
// Then allocate frame...
frame = alloc_frame(codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height);
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
For reference, this is the full code (but the video quality is shit so you will have to tune that yourself)
#ifndef VIDEO_CAPTURE2_H
#define VIDEO_CAPTURE2_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "../include/glad/glad.h"
#include "finite_math.hpp"
#include <stdexcept>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
}
// These exist to patch three functions for which gcc gets compiler errors
#ifdef av_err2str
#undef av_err2str
#include <string>
av_always_inline std::string av_err2string(int errnum) {
char str[AV_ERROR_MAX_STRING_SIZE];
return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
}
#define av_err2str(err) av_err2string(err).c_str()
#endif
#ifdef av_ts2str
#undef av_ts2str
#include <string>
av_always_inline std::string av_ts2string(int ts) {
char str[AV_TS_MAX_STRING_SIZE];
return av_ts_make_string(str, ts);
}
#define av_ts2str(ts) av_ts2string(ts).c_str()
#endif
#ifdef av_ts2timestr
#undef av_ts2timestr
#include <string>
av_always_inline std::string av_ts2timestring(int ts, AVRational *tb) {
char str[AV_TS_MAX_STRING_SIZE];
return av_ts_make_time_string(str, ts, tb);
}
#define av_ts2timestr(ts, tb) av_ts2timestring(ts, tb).c_str()
#endif
class VideoCapture2
{
public:
VideoCapture2(const char *filename, unsigned int width, unsigned int height, int framerate, unsigned int bitrate){
avformat_alloc_output_context2(&avFormatContext, NULL, NULL, filename);
if (!avFormatContext) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&avFormatContext, NULL, "mpeg", filename);
}
if (!avFormatContext)
exit(1);
avOutputFormat = avFormatContext->oformat;
// Video Stream
/* find the mpeg1video encoder */
/* find the encoder */
AVCodecID codec_id = AV_CODEC_ID_H264;
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
avStream = avformat_new_stream(avFormatContext, NULL);
if (!avStream) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
avStream->id = avFormatContext->nb_streams-1;
codec_ctx = avcodec_alloc_context3(codec);
if (!codec_ctx) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
codec_ctx->codec_id = codec_id;
/* put sample parameters */
codec_ctx->bit_rate = bitrate;
/* resolution must be a multiple of two */
if(width % 2 != 0)
throw std::invalid_argument( "The width must be devisible by two" );
if(height % 2 != 0)
throw std::invalid_argument( "The height must be devisible by two" );
codec_ctx->width = width;
codec_ctx->height = height;
/* frames per second */
codec_ctx->framerate = (AVRational){framerate, 1};
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
avStream->time_base = (AVRational){ 1, framerate };
codec_ctx->time_base = avStream->time_base;
codec_ctx->gop_size = 10; /* emit one intra frame every twelve frames at most */
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
/* Some formats want stream headers to be separate. */
if (avOutputFormat->flags & AVFMT_GLOBALHEADER)
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/* open the codec */
AVDictionary *opt = NULL;
av_dict_copy(&opt, avDict, 0);
ret = avcodec_open2(codec_ctx, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
frame = alloc_frame(codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height);
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(avStream->codecpar, codec_ctx);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
// Color fromat COnversion
sws = sws_getContext( codec_ctx->width
, codec_ctx->height
, AV_PIX_FMT_RGB32
, codec_ctx->width
, codec_ctx->height
, AV_PIX_FMT_YUV420P
, SWS_FAST_BILINEAR // Change this???
, 0, 0, 0);
// Check output file
av_dump_format(avFormatContext, 0, filename, 1);
/* open the output file, if needed */
if (!(avOutputFormat->flags & AVFMT_NOFILE)) {
ret = avio_open(&avFormatContext->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
exit(1);
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(avFormatContext, &avDict);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
exit(1);
}
}
void addFrame(){
fflush(stdout);
/* Make sure the frame data is writable.
On the first round, the frame is fresh from av_frame_get_buffer()
and therefore we know it is writable.
But on the next rounds, encode() will have called
avcodec_send_frame(), and the codec may have kept a reference to
the frame in its internal structures, that makes the frame
unwritable.
av_frame_make_writable() checks that and allocates a new buffer
for the frame only if necessary.
*/
ret = av_frame_make_writable(frame);
if (ret < 0){
fprintf(stderr, "Could not make the frame writable\n");
exit(1); // Wait... you should throw error instead!
}
size_t nvals = 4 * codec_ctx->width * codec_ctx->height; //GL_BGRA
pixels = (GLubyte *) realloc(pixels, nvals * sizeof(GLubyte)); // I don't think I need to do this every time since the size is constant
glReadPixels(0, 0, codec_ctx->width, codec_ctx->height, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
// CONVERT TO YUV AND ENCODE
ret = av_image_alloc(frame->data, frame->linesize, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, 32);
if (ret < 0){
fprintf(stderr, "Could not allocate the image\n");
exit(1); // Wait... you should throw error instead!
}
// Compensate for OpenGL y-axis pointing upwards and ffmpeg y-axis pointing downwards
uint8_t *in_data[1] = {(uint8_t *) pixels + (codec_ctx->height-1)*codec_ctx->width*4}; // address of the last line
int in_linesize[1] = {- codec_ctx->width * 4}; // negative stride
sws_scale(sws, in_data, in_linesize, 0, codec_ctx->height, frame->data, frame->linesize);
frame->pts = frame_order;
frame_order++;
/* encode the image */
write_frame(avFormatContext, codec_ctx, avStream, frame, pkt);
}
void close()
{
write_frame(avFormatContext, codec_ctx, avStream, NULL, pkt);
av_write_trailer(avFormatContext);
avcodec_free_context(&codec_ctx);
av_frame_free(&frame);
sws_freeContext(sws);
if (!(avFormatContext->oformat->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&avFormatContext->pb);
avformat_free_context(avFormatContext);
}
private:
AVOutputFormat *avOutputFormat;
AVFormatContext* avFormatContext = NULL;
AVStream* avStream;
AVDictionary *avDict = NULL; // "create" an empty dictionary
GLubyte *pixels = NULL;
struct SwsContext *sws;
const AVCodec *codec;
AVCodecContext *codec_ctx= NULL;
// Should be ref counted??? https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html
AVFrame *frame;
AVPacket *pkt;
//
int frame_order, ret;
int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
AVStream *st, AVFrame *frame, AVPacket *pkt)
{
int ret;
// send the frame to the encoder
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
av_err2str(ret));
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
else if (ret < 0) {
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
exit(1);
}
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
ret = av_interleaved_write_frame(fmt_ctx, pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) {
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
exit(1);
}
}
return ret == AVERROR_EOF ? 1 : 0;
}
void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *frame;
int ret;
frame = av_frame_alloc();
if (!frame)
return NULL;
frame->format = pix_fmt;
frame->width = width;
frame->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return frame;
}
};
#endif
Here is an example of the problem I'm trying to solve I get a buffer from the microphone and try and process it content. as kindly guided from this question Im trying to convert a char* to float*
the logic I declare a vector to hold my desired float then resize it to that of ArBuffer() and then copy to the vector.
ArBuffer() is a void gonna have to cast this to memcpy?
#include "Lib_api.h"
#include <alsa/asoundlib.h>
#include <stdio.h>
#include "audiorecorder.h"
#include "Globals.h"
#include <iostream>
#include <inttypes.h>
#include <string.h>
#include <stdlib.h>
#include <vector>
#include <cstring>
using namespace std;
//Declare Creation
void* mCore;
int main(void)
{
// recorder
int rc;
int mode = 3;
const float sampleRate = 44100; //max 22Hz
int bufferSize = 1024; //Check this should be good 1024
//initialise
mCore = OXY_Create();
//initialise audio recorder
rc = arInitialise();
OXY_Configure(mode, sampleRate, bufferSize, mCore);
//initialise check hardware
if(rc)
{
std::cerr << "Fatal error: Audio could not be initialised" << rc << std::endl <<std::endl;
arFree();
exit(1);
}
//start recording
rc = arStartRecording();
//application loop
while(arIsRunning())
{
//declare vector
std::vector<float> values;
//resize values to size of arbuffersize
values.resize(arBufferSize(), sizeof(float));
//arBufferSize()/sizeof(float);
//need to cast this arBuffer() to memcpy?
std::memcpy(arBuffer(), &values[0], sizeof(values[0]));
// values[0] this will hold the latest data from the microphone?
int ret = OXY_DecodeAudioBuffer(&values[0], values.size(), mCore);
if (ret == -2)
{
std::cerr << "FOUND_TOKEN ---> -2 " << std::endl << std::endl;
}
else if(ret>=0)
{
std::cerr << "Decode started ---> -2 " << ret << std::endl << std::endl;
}
else if (ret == -3)
{
//int sizeStringDecoded = OXY_GetDecodedData(mStringDecoded, mCore);
std::cerr << "STRING DECODED ---> -2 " << std::endl << std::endl;
// ...
}
else
{
std::cerr << "No data found in this buffer" << std::endl << std::endl;
//no data found in this buffer
}
}
//Clean up
arFree();
return 0;
}
I change the format to SND_PCM_FORMAT_FLOAT_LE from SND_PCM_FORMAT_S16_LE as kindly suggested from another SO question.
* Use the newer ALSA API */
#define ALSA_PCM_NEW_HW_PARAMS_API
#include <stdlib.h>
#include <alsa/asoundlib.h>
#include <pthread.h>
#include "settings.h"
#include "audiorecorder.h"
pthread_t thr;
pthread_mutex_t mutex;
snd_pcm_t *handle;
snd_pcm_uframes_t frames;
unsigned char* buffer;
BOOL running;
size_t buffersize;
BOOL arIsRunning(void)
{
return running;
}
void arAcquireBuffer(void)
{
//printf("Acquired buffer\n");
pthread_mutex_lock(&mutex);
}
void arReleaseBuffer(void)
{
//printf("Released buffer\n");
pthread_mutex_unlock(&mutex);
}
const unsigned char* arBuffer(void)
{
return buffer;
}
const size_t arBufferSize(void)
{
return buffersize;
}
void* entry_point(void *arg)
{
int rc;
fprintf(stderr, "Listening...\n");
while (running)
{
arAcquireBuffer();
rc = snd_pcm_readi(handle, buffer, frames);
//stream to stdout - useful for testing/debugging
//write(1, buffer, buffersize);
arReleaseBuffer();
if (rc == -EPIPE) {
/* EPIPE means overrun */
fprintf(stderr, "overrun occurred\n");
snd_pcm_prepare(handle);
}
else if (rc < 0) {
fprintf(stderr, "error from read: %s\n", snd_strerror(rc));
running = FALSE;
}
else if (rc != (int)frames) {
fprintf(stderr, "short read, read %d frames\n", rc);
}
}
return NULL;
}
int arInitialise(void)
{
snd_pcm_hw_params_t *params;
unsigned int val;
int rc, dir;
running = FALSE;
/* Open PCM device for recording (capture). */
rc = snd_pcm_open(&handle, RECORDER_DEVICE, SND_PCM_STREAM_CAPTURE, 0);
if (rc < 0) {
fprintf(stderr, "unable to open pcm device: %s\n", snd_strerror(rc));
return rc;
}
else
{
fprintf(stderr, "Successfully opened default capture device.\n");
}
/* Allocate a hardware parameters object. */
snd_pcm_hw_params_alloca(¶ms);
/* Fill it in with default values. */
snd_pcm_hw_params_any(handle, params);
/* Set the desired hardware parameters. */
/* Interleaved mode */
snd_pcm_hw_params_set_access(handle, params, SND_PCM_ACCESS_RW_INTERLEAVED);
/* Signed 16-bit little-endian format */
snd_pcm_hw_params_set_format(handle, params, SND_PCM_FORMAT_FLOAT_LE)
/* Channels */
snd_pcm_hw_params_set_channels(handle, params, NUM_CHANNELS);
fprintf(stderr, "Channels set to %d.\n", NUM_CHANNELS);
/* sampling rate */
val = SAMPLE_RATE;
snd_pcm_hw_params_set_rate_near(handle, params, &val, &dir);
fprintf(stderr, "Samplerate set to %d.\n", val);
/* Set period to FRAMES_PER_BUFFER frames. */
frames = FRAMES_PER_BUFFER;
snd_pcm_hw_params_set_period_size_near(handle, params, &frames, &dir);
/* Write the parameters to the driver */
rc = snd_pcm_hw_params(handle, params);
if (rc < 0) {
fprintf(stderr, "unable to set hw parameters: %s\n", snd_strerror(rc));
return rc;
}
/* Use a buffer large enough to hold one period */
snd_pcm_hw_params_get_period_size(params, &frames, &dir);
buffersize = frames * 2 * NUM_CHANNELS; /* 2 bytes/sample * channels */
buffer = (unsigned char*) malloc(buffersize);
/* We want to loop forever */
//snd_pcm_hw_params_get_period_time(params, &val, &dir);
return 0;
}
int arStartRecording(void)
{
if(running) return 1;
if(pthread_mutex_init(&mutex, NULL))
{
printf("Unable to initialize mutex\n");
return -1;
}
if(pthread_create(&thr, NULL, &entry_point, NULL))
{
fprintf(stderr, "Could not create recorder thread!\n");
running = FALSE;
return -1;
}
running = TRUE;
return 0;
}
void arStopRecording(void)
{
running = FALSE;
}
void arFree(void)
{
running = FALSE;
sleep(500);
snd_pcm_drain(handle);
snd_pcm_close(handle);
pthread_mutex_destroy(&mutex);
free(buffer);
}
values.resize(arBufferSize(), sizeof(float))
Well, that wasn't what I wrote in the other comment. You need to divide the buffersize (in bytes) by the number of bytes per float to get the number of floats: arBufferSize() / sizeof(float)
std::memcpy(arBuffer(), &values[0], sizeof(values[0]));
memcpy for historical reasons has its destination and source reversed. The const* error is because you're asking memcpy to write to arBuffer.
Also, sizeof(values[0]) is the size of one float, in bytes. You already have arBufferSize(), which is exactly the size that memcpy needs.
Im trying to record audio using ALSA and pass it to be processed. The audio sample is returned from this which is char* to a float*
Ive tried so many solutions I think I understand that it's not really a char buffer but a byte buffer but how I get it a float.
This returns the buffer:
const unsigned char* arBuffer(void)
{
return buffer;
}
I need to consume the output of the microphone as a float
int32_t O_DecodeAudioBuffer(float *audioBuffer, int size, void *oxyingObject)
{
Core *oxying = (COxyCore*)oxyingObject;
//Decode audioBuffer to check if begin token is found, we should keep previous buffer to check if token was started in previous
//var mDecoding > 0 when token has been found, once decoding is finished, mDecoding = 0
return oxying->mDecoder->DecodeAudioBuffer(audioBuffer, size);
}
Im writing a program to consume the the above as api:
void* mOxyCore; is declared
I then try and pass the arBuffer() which wouldn't work as expected.
while(arIsRunning())
{
int ret = DecodeAudioBuffer(arBuffer(), arBufferSize(), mCore);
}
The Alsa:
/* Use the newer ALSA API */
#define ALSA_PCM_NEW_HW_PARAMS_API
#include <stdlib.h>
#include <alsa/asoundlib.h>
#include <pthread.h>
#include "settings.h"
#include "audiorecorder.h"
pthread_t thr;
pthread_mutex_t mutex;
snd_pcm_t *handle;
snd_pcm_uframes_t frames;
unsigned char* buffer;
BOOL running;
size_t buffersize;
BOOL arIsRunning(void)
{
return running;
}
void arAcquireBuffer(void)
{
//printf("Acquired buffer\n");
pthread_mutex_lock(&mutex);
}
void arReleaseBuffer(void)
{
//printf("Released buffer\n");
pthread_mutex_unlock(&mutex);
}
const unsigned char* arBuffer(void)
{
return buffer;
}
const size_t arBufferSize(void)
{
return buffersize;
}
void* entry_point(void *arg)
{
int rc;
fprintf(stderr, "Listening...\n");
while (running)
{
arAcquireBuffer();
rc = snd_pcm_readi(handle, buffer, frames);
//stream to stdout - useful for testing/debugging
//write(1, buffer, buffersize);
arReleaseBuffer();
if (rc == -EPIPE) {
/* EPIPE means overrun */
fprintf(stderr, "overrun occurred\n");
snd_pcm_prepare(handle);
}
else if (rc < 0) {
fprintf(stderr, "error from read: %s\n", snd_strerror(rc));
running = FALSE;
}
else if (rc != (int)frames) {
fprintf(stderr, "short read, read %d frames\n", rc);
}
}
return NULL;
}
int arInitialise(void)
{
snd_pcm_hw_params_t *params;
unsigned int val;
int rc, dir;
running = FALSE;
/* Open PCM device for recording (capture). */
rc = snd_pcm_open(&handle, RECORDER_DEVICE, SND_PCM_STREAM_CAPTURE, 0);
if (rc < 0) {
fprintf(stderr, "unable to open pcm device: %s\n", snd_strerror(rc));
return rc;
}
else
{
fprintf(stderr, "Successfully opened default capture device.\n");
}
/* Allocate a hardware parameters object. */
snd_pcm_hw_params_alloca(¶ms);
/* Fill it in with default values. */
snd_pcm_hw_params_any(handle, params);
/* Set the desired hardware parameters. */
/* Interleaved mode */
snd_pcm_hw_params_set_access(handle, params, SND_PCM_ACCESS_RW_INTERLEAVED);
/* Signed 16-bit little-endian format */
snd_pcm_hw_params_set_format(handle, params, SND_PCM_FORMAT_S16_LE);
fprintf(stderr, "Format set to PCM Signed 16bit Little Endian.\n");
/* Channels */
snd_pcm_hw_params_set_channels(handle, params, NUM_CHANNELS);
fprintf(stderr, "Channels set to %d.\n", NUM_CHANNELS);
/* sampling rate */
val = SAMPLE_RATE;
snd_pcm_hw_params_set_rate_near(handle, params, &val, &dir);
fprintf(stderr, "Samplerate set to %d.\n", val);
/* Set period to FRAMES_PER_BUFFER frames. */
frames = FRAMES_PER_BUFFER;
snd_pcm_hw_params_set_period_size_near(handle, params, &frames, &dir);
/* Write the parameters to the driver */
rc = snd_pcm_hw_params(handle, params);
if (rc < 0) {
fprintf(stderr, "unable to set hw parameters: %s\n", snd_strerror(rc));
return rc;
}
/* Use a buffer large enough to hold one period */
snd_pcm_hw_params_get_period_size(params, &frames, &dir);
buffersize = frames * 2 * NUM_CHANNELS; /* 2 bytes/sample * channels */
buffer = (unsigned char*) malloc(buffersize);
/* We want to loop forever */
//snd_pcm_hw_params_get_period_time(params, &val, &dir);
return 0;
}
int arStartRecording(void)
{
if(running) return 1;
if(pthread_mutex_init(&mutex, NULL))
{
printf("Unable to initialize mutex\n");
return -1;
}
if(pthread_create(&thr, NULL, &entry_point, NULL))
{
fprintf(stderr, "Could not create recorder thread!\n");
running = FALSE;
return -1;
}
running = TRUE;
return 0;
}
void arStopRecording(void)
{
running = FALSE;
}
void arFree(void)
{
running = FALSE;
sleep(500);
snd_pcm_drain(handle);
snd_pcm_close(handle);
pthread_mutex_destroy(&mutex);
free(buffer);
}
The problem here isn't a cast, but a representation issue.
Audio is generally represented as a series of samples. There are quite a few ways to represent each sample: on a scale from -1.0f to +1.0f, or -32767 to +32767, or many others.
Alsa supports in fact many formats, and you chose SND_PCM_FORMAT_S16_LE so that's -32767 to +32767. You could cast that to std::int16_t*, assuming your C++ environment is Little-Endian (almost certain). You can't cast it to float*, for that you'd need to ask for SND_PCM_FORMAT_FLOAT_LE
I am looking to write a code to open a file.jpg and load the data into a buffer without actually decompressing the data. I need to send the data as is.
I have found a code that reads an image and decompresses it. I don't know how to modify the code to just get the raw bytes not decompressed version.
struct jpeg_decompress_struct cinfo;
struct my_error_mgr jerr;
FILE * infile; /* source file */
JSAMPARRAY buffer; /* Output row buffer */
int row_stride; /* physical row width in output buffer */
if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
return 0;
}
/* Now we can initialize the JPEG decompression object. */
jpeg_create_decompress(&cinfo);
/* Step 2: specify data source (eg, a file) */
jpeg_stdio_src(&cinfo, infile);
(void) jpeg_read_header(&cinfo, TRUE);
// Here I want to only get raw bytes
(void) jpeg_start_decompress(&cinfo);
row_stride = cinfo.output_width * cinfo.output_components;
/* Make a one-row-high sample array that will go away when done with image */
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
(void) jpeg_read_scanlines(&cinfo, buffer, 1);
/* Assume put_scanline_someplace wants a pointer and sample count. */
// put_scanline_someplace(buffer[0], row_stride);
}
/* Step 7: Finish decompression */
(void) jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
After exchanging comments below the question post, it turned out that if we don't care about decompression, simple byte reading would suffice. Here is the code that solved the problem:
char* data = (char*)malloc(max_w*max_h*3 +100);
if (data == NULL) {
printf("Input data must not be NULL.\n");
return 0;
}
FILE *fp = fopen(filename, "rb"); /* b - binary mode */
if (fp == NULL) {
printf("Error opening file %s\n", filename);
return 0;
}
struct stat filestatus;
stat(filename, &filestatus);
size_t data_size = filestatus.st_size;
size_t len = fread(data, 1, data_size , fp);
if (len != data_size) {
printf("Error reading file %s\n", filename);
return 0;
}
fclose(fp);
return len;
// free(data) when you are done.