I am looking to write a code to open a file.jpg and load the data into a buffer without actually decompressing the data. I need to send the data as is.
I have found a code that reads an image and decompresses it. I don't know how to modify the code to just get the raw bytes not decompressed version.
struct jpeg_decompress_struct cinfo;
struct my_error_mgr jerr;
FILE * infile; /* source file */
JSAMPARRAY buffer; /* Output row buffer */
int row_stride; /* physical row width in output buffer */
if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
return 0;
}
/* Now we can initialize the JPEG decompression object. */
jpeg_create_decompress(&cinfo);
/* Step 2: specify data source (eg, a file) */
jpeg_stdio_src(&cinfo, infile);
(void) jpeg_read_header(&cinfo, TRUE);
// Here I want to only get raw bytes
(void) jpeg_start_decompress(&cinfo);
row_stride = cinfo.output_width * cinfo.output_components;
/* Make a one-row-high sample array that will go away when done with image */
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
(void) jpeg_read_scanlines(&cinfo, buffer, 1);
/* Assume put_scanline_someplace wants a pointer and sample count. */
// put_scanline_someplace(buffer[0], row_stride);
}
/* Step 7: Finish decompression */
(void) jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
After exchanging comments below the question post, it turned out that if we don't care about decompression, simple byte reading would suffice. Here is the code that solved the problem:
char* data = (char*)malloc(max_w*max_h*3 +100);
if (data == NULL) {
printf("Input data must not be NULL.\n");
return 0;
}
FILE *fp = fopen(filename, "rb"); /* b - binary mode */
if (fp == NULL) {
printf("Error opening file %s\n", filename);
return 0;
}
struct stat filestatus;
stat(filename, &filestatus);
size_t data_size = filestatus.st_size;
size_t len = fread(data, 1, data_size , fp);
if (len != data_size) {
printf("Error reading file %s\n", filename);
return 0;
}
fclose(fp);
return len;
// free(data) when you are done.
Related
After experimenting with the examples on the FFmpeg documentation, I was finally able to create a short program that extracts every nth frame from a video. However, the output files that it produces are huge at over 15mb for each image. How can I change this to produce lower quality images?
The result I am trying to get is done easily on the command line with:
ffmpeg -i [input video] -vf "select=not(mod(n\,10))" -fps_mode vfr img_%03d.jpg
For a video with about 500 frames, this creates 50 images that are only about 800kb each; how am would I be able to mimic this in my program?
My code consists of opening the input file, decoding the packets, then saving the frames:
#include <cstdio>
#include <cstdlib>
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
static int video_stream_index = -1;
// OPEN THE INPUT FILE
static int open_input_file(const char *filename) {
// INIT VARS AND FFMPEG OBJECTS
int ret;
const AVCodec *dec;
// OPEN INPUT FILE
if((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
printf("ERROR: failed to open input file\n");
return ret;
}
// FIND STREAM INFO BASED ON INPUT FILE
if((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
printf("ERROR: failed to find stream information\n");
return ret;
}
// FIND THE BEST VIDEO STREAM FOR THE INPUT FILE
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if(ret < 0) {
printf("ERROR: failed to find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
// ALLOCATE THE DECODING CONTEXT FOR THE INPUT FILE
dec_ctx = avcodec_alloc_context3(dec);
if(!dec_ctx) {
printf("ERROR: failed to allocate decoding context\n");
// CAN NOT ALLOCATE MEMORY ERROR
return AVERROR(ENOMEM);
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
// INIT THE VIDEO DECODER
if((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
printf("ERROR: failed to open video decoder\n");
return ret;
}
return 0;
}
// SAVE THE FILE
static void save(unsigned char *buf, int wrap, int x_size, int y_size, char *file_name) {
// INIT THE EMPTY FILE
FILE *file;
// OPEN AND WRITE THE IMAGE FILE
file = fopen(file_name, "wb");
fprintf(file, "P6\n%d %d\n%d\n", x_size, y_size, 255);
for(int i = 0; i < y_size; i++) {
fwrite(buf + i * wrap, 1, x_size * 3, file);
}
fclose(file);
}
// DECODE FRAME AND CONVERT IT TO AN RGB IMAGE
static void decode(AVCodecContext *cxt, AVFrame *frame, AVPacket *pkt,
const char *out_file_name, const char *file_ext, int mod=1) {
// INIT A BLANK CHAR TO HOLD THE FILE NAME AND AN EMPTY INT TO HOLD FUNCTION RETURN VALUES
char buf[1024];
int ret;
// SEND PACKET TO DECODER
ret = avcodec_send_packet(cxt, pkt);
if(ret < 0) {
printf("ERROR: error sending packet for decoding\n");
exit(1);
}
// CREATE A SCALAR CONTEXT FOR CONVERSION
SwsContext *sws_ctx = sws_getContext(dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, dec_ctx->width,
dec_ctx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
// CREATE A NEW RGB FRAME FOR CONVERSION
AVFrame* rgb_frame = av_frame_alloc();
rgb_frame->format = AV_PIX_FMT_RGB24;
rgb_frame->width = dec_ctx->width;
rgb_frame->height = dec_ctx->height;
// ALLOCATE A NEW BUFFER FOR THE RGB CONVERSION FRAME
av_frame_get_buffer(rgb_frame, 0);
// WHILE RETURN COMES BACK OKAY (FUNCTION RETURNS >= 0)...
while(ret >= 0) {
// GET FRAME BACK FROM DECODER
ret = avcodec_receive_frame(cxt, frame);
// IF "RESOURCE TEMP NOT AVAILABLE" OR "END OF FILE" ERROR...
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return;
} else if(ret < 0) {
printf("ERROR: error during decoding\n");
exit(1);
}
// IF FRAME NUMBER IF THE (MOD)TH FRAME...
if(cxt->frame_number % mod == 0){
// OUTPUT WHICH FRAME IS BEING SAVED
printf("saving frame %03d\n", cxt->frame_number);
// REMOVES TEMPORARY BUFFERED DATA
fflush(stdout);
// SCALE (CONVERT) THE OLD FRAME TO THE NEW RGB FRAME
sws_scale(sws_ctx, frame->data, frame->linesize, 0, frame->height,
rgb_frame->data, rgb_frame->linesize);
// SET "BUF" TO THE OUTPUT FILE PATH (SAVES TO "out_file_name_###.file_ext")
snprintf(buf, sizeof(buf), "%s_%03d.%s", out_file_name, cxt->frame_number, file_ext);
// SAVE THE FRAME
save(rgb_frame->data[0], rgb_frame->linesize[0], rgb_frame->width, rgb_frame->height, buf);
}
}
}
int main() {
// SIMULATE COMMAND LINE ARGUMENTS
char argv0[] = "test";
char argv1[] = "/User/Desktop/frames/test_video.mov";
char *argv[] = {argv0, argv1, nullptr};
// INIT VARS AND FFMPEG OBJECTS
int ret;
AVPacket *packet;
AVFrame *frame;
// ALLOCATE FRAME AND PACKET
frame = av_frame_alloc();
packet = av_packet_alloc();
if (!frame || !packet) {
fprintf(stderr, "Could not allocate frame or packet\n");
exit(1);
}
// IF FILE DOESN'T OPEN, GO TO THE END
if((ret = open_input_file(argv[1])) < 0) {
goto end;
}
// READ ALL THE PACKETS - simple
while(av_read_frame(fmt_ctx, packet) >= 0) {
// IF PACKET INDEX MATCHES VIDEO INDEX...
if (packet->stream_index == video_stream_index) {
// SEND PACKET TO THE DECODER and SAVE
std::string name = "/User/Desktop/frames/img";
std::string ext = "bmp";
decode(dec_ctx, frame, packet, name.c_str(), ext.c_str(), 5);
}
// UNREFERENCE THE PACKET
av_packet_unref(packet);
}
// END MARKER
end:
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_packet_free(&packet);
// FINAL ERROR CATCH
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}
I am not sure how to go about producing images that are much smaller in size like the ones produced on the command line. I have a feeling that this is possible somehow during the conversion to RGB or the saving of the file but I can't seem to figure out how.
Also, is there any way that I could go about this much more efficiently? On the command line, this finishes very quickly (no more than a second or two for a 9 sec. movie at ~60 fps).
The command line version compresses the frame into jpeg file hence the size is very small. On the other hand, your code writes the rgb values directly into a file (regardless of the file extension). The size of the image is then Height x Width x 3 bytes, which is very big.
Solution: Adjust your save function to also compress the image.
Code example from Github - save_frame_as_jpeg.c:
int save_frame_as_jpeg(AVCodecContext *pCodecCtx, AVFrame *pFrame, int FrameNo)
{
AVCodec *jpegCodec = avcodec_find_encoder(AV_CODEC_ID_JPEG2000);
if (!jpegCodec) { return -1; }
AVCodecContext *jpegContext = avcodec_alloc_context3(jpegCodec);
if (!jpegContext) { return -1; }
jpegContext->pix_fmt = pCodecCtx->pix_fmt;
jpegContext->height = pFrame->height;
jpegContext->width = pFrame->width;
if (avcodec_open2(jpegContext, jpegCodec, NULL) < 0)
{ return -1; }
FILE *JPEGFile;
char JPEGFName[256];
AVPacket packet = {.data = NULL, .size = 0};
av_init_packet(&packet);
int gotFrame;
if (avcodec_encode_video2(jpegContext, &packet, pFrame, &gotFrame) < 0)
{ return -1; }
sprintf(JPEGFName, "dvr-%06d.jpg", FrameNo);
JPEGFile = fopen(JPEGFName, "wb");
fwrite(packet.data, 1, packet.size, JPEGFile);
fclose(JPEGFile);
av_free_packet(&packet);
avcodec_close(jpegContext);
return 0;
}
So I have a program that reads an opengl window and encodes the read data as a video. Now through a series of experimentation I have learned that the bit format of my glfw window is 8:8:8 as returned by glfwGetVideoMode(monitor). So I use this function to read the window:
glReadPixels(0, 0,gl_width, gl_height,GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*) Buffer);
and I simply encode it in the AV_PIX_FMT_YUV420P format.
Under normal circumstances this method works just fine. However, when I actually run the program, the output I get, as opposed to what I can see in the glfw window, is really low resolution and a bit pixelated.
Here is what my GLFW window looks like:
Now this is what I want it to look like. It looks just fine on the opengl window, and I encode it directly without altering Buffer.
And here is what the encoded result, test.mp4 looks like when I run it using mplayer or similar software:
It's a lot more blurry and pixelated compare to the GLFW window. With some experimentation and following an answer to another question I asked, I us avcodec_find_best_pix_fmt_of_list((*codec)->pix_fmts, AV_PIX_FMT_RGBA, 1, &ret) and it returned 13. Which led me to believe using AV_PIX_FMT_YUVJ422P is the best option for this convertion to not have a blurry/pixelated result. However, no matter which function I pass, every single format gives off an error except AV_PIX_FMT_YUV420P. The error is:
[mpeg4 # 0x558e74f47900] Specified pixel format yuvj422p is invalid or not supported
I have no idea why this is happening, as the format is bound to a define and it is changed throughout the entire program when I change the define.
Here is my encoder so far (I have trimmed some parts):
video_encoder.cpp:
int video_encoder::write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
AVStream *st, AVFrame *frame, AVPacket *pkt)
{
int ret;
// Conditional jump or move depends on uninitialised value
// Use of uninitialised value of size 8
// send the frame to the encoder
// Error is about c.
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
std::cout << "Error sending a frame to the encoder: " << ret << std::endl;
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
else if (ret < 0) {
std::cout << "Error encoding a frame: " << ret << std::endl;
exit(1);
}
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
//log_packet(fmt_ctx, pkt);
//std::cout << "Packet: " << pkt << std::endl;
ret = av_interleaved_write_frame(fmt_ctx, pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) {
std::cout << "Error while writing output packet: " << ret << std::endl;
exit(1);
}
}
return ret == AVERROR_EOF ? 1 : 0;
}
/* Add an output stream. */
void video_encoder::add_stream(OutputStream *ost, AVFormatContext *oc,
const AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
ost->tmp_pkt = av_packet_alloc();
if (!ost->tmp_pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
...
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 10000;
/* Resolution must be a multiple of two. */
c->width = width;
c->height = height;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE }; // *frame_rate
c->time_base = ost->st->time_base;
c->gop_size = 7; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
//if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
// c->max_b_frames = 2;
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
if ((*codec)->pix_fmts){
//c->pix_fmt = (*codec)->pix_fmts[0];
std::cout << "NEW FORMAT : " << c->pix_fmt << std::endl;
}
int ret;
avcodec_find_best_pix_fmt_of_list((*codec)->pix_fmts, AV_PIX_FMT_RGBA, 1, &ret);
std::cout << "Desired format is: " << ret << std::endl;
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* video output */
AVFrame* video_encoder::alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
void video_encoder::open_video(AVFormatContext *oc, const AVCodec *codec,
OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", ret);
exit(1);
}
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
void video_encoder::set_frame_yuv_from_rgb(AVFrame *frame, struct SwsContext *sws_context) {
const int in_linesize[1] = { 4 * width };
//uint8_t* dest[4] = { rgb_data, NULL, NULL, NULL };
sws_context = sws_getContext(
width, height, AV_PIX_FMT_RGBA,
width, height, STREAM_PIX_FMT,
SCALE_FLAGS, 0, 0, 0);
sws_scale(sws_context, (const uint8_t * const *)&rgb_data, in_linesize, 0,
height, frame->data, frame->linesize);
}
AVFrame* video_encoder::get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
(float) STREAM_DURATION / 1000, (AVRational){ 1, 1 }) > 0)
return NULL;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->frame) < 0)
exit(1);
set_frame_yuv_from_rgb(ost->frame, ost->sws_ctx);
ost->frame->pts = ost->next_pts++;
return ost->frame;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
int video_encoder::write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
}
void video_encoder::close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
av_packet_free(&ost->tmp_pkt);
//sws_freeContext(ost->sws_ctx);
//swr_free(&ost->swr_ctx);
}
/**************************************************************/
/* media file output */
void video_encoder::set_encode_framebuffer(uint8_t* data, bool audio_only)
{
rgb_data = data;
}
video_encoder::~video_encoder()
{
av_write_trailer(enc_inf.oc);
/* Close each codec. */
if (enc_inf.have_video)
close_stream(enc_inf.oc, &enc_inf.video_st);
if (!(enc_inf.fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&enc_inf.oc->pb);
/* free the stream */
avformat_free_context(enc_inf.oc);
std::cout << "Done, closing." << std::endl;
}
bool video_encoder::encode_one_frame()
{
if (enc_inf.encode_video || enc_inf.encode_audio) {
/* select the stream to encode */
if (enc_inf.encode_video &&
(!enc_inf.encode_audio || av_compare_ts(enc_inf.video_st.next_pts, enc_inf.video_st.enc->time_base,
enc_inf.audio_st.next_pts, enc_inf.audio_st.enc->time_base) <= 0)) {
enc_inf.encode_video = !write_video_frame(enc_inf.oc, &enc_inf.video_st);
return true;
}
}
return false;
}
video_encoder::video_encoder(int w, int h, float fps, unsigned int duration)
:width(w), height(h), STREAM_FRAME_RATE(fps), STREAM_DURATION(duration)
{
//std::filesystem::create_directory("media");
//std::string as_str = "./output/" + std::string(getenv ("OUTPUT_UUID")) + ".mp4";
std::string as_str = "./output/video.mp4";
char* filename = const_cast<char*>(as_str.c_str());
enc_inf.video_st, enc_inf.audio_st = (struct OutputStream) { 0 };
enc_inf.video_st.next_pts = 1;
enc_inf.audio_st.next_pts = 1;
enc_inf.encode_audio, enc_inf.encode_video = 0;
int ret;
int i;
//rgb_data = (uint8_t*)malloc( 48 * sizeof(uint8_t) );
/* allocate the output media context */
avformat_alloc_output_context2(&enc_inf.oc, NULL, NULL, filename);
if (!enc_inf.oc) {
//VI_ERROR("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&enc_inf.oc, NULL, "mpeg", filename);
}
if (!enc_inf.oc)
std::cout << "FAILED" << std::endl;
//return 1;
enc_inf.fmt = enc_inf.oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (enc_inf.fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&enc_inf.video_st, enc_inf.oc, &video_codec, enc_inf.fmt->video_codec);
enc_inf.have_video = 1;
enc_inf.encode_video = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (enc_inf.have_video)
open_video(enc_inf.oc, video_codec, &enc_inf.video_st, opt);
/* open the output file, if needed */
if (!(enc_inf.fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&enc_inf.oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
//VI_ERROR("Could not open '%s': %s\n", filename, ret);
//return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(enc_inf.oc, &opt);
if (ret < 0) {
VI_ERROR("Error occurred when opening output file:");
//return 1;
}
//return 0;
}
video_encoder.h:
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_SPLINE
/* The output bit rate in bit/s */
#define OUTPUT_BIT_RATE 96000
/* The number of output channels */
#define OUTPUT_CHANNELS 2
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
AVPacket *tmp_pkt;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
typedef struct {
OutputStream video_st, audio_st;
const AVOutputFormat *fmt;
AVFormatContext *oc;
int have_video, have_audio, encode_video, encode_audio;
std::string name;
} encode_info;
Again, changing STREAM_PIX_FMT anything other than AV_PIX_FMT_YUV420P causes the program to give the error.
What is the cause of this and how can I fix this? Also am I on the right track for fixing the pixelation problem? I'm using ubuntu.
So, I'm a relative newbie to network programming and programming in general, and I'm attempting to write a simple client/server text file transfer service. My code asks for the user to choose an upload or download option. When the user selects upload, the new file is created on the server's end, but isn't written with the data until the socket is closed. Also the string "upload" is appended onto the end of the text in the file.
I can't seem to find where my errors are, so any help would be greatly appreciated!
server.cpp
#define SIZE 1024
void write_file(int sockfd) // writing data to file function
{
int n;
FILE *fp;
char const *filename = "recv.txt";
char buffer[SIZE];
fp = fopen(filename, "w");
while (1)
{
n = recv(sockfd, buffer, SIZE, 0);
if (n <= 0)
{
break;
}
fprintf(fp, "%s", buffer);
bzero(buffer, SIZE);
}
fclose(fp);
return;
}
// in main
char msgRecv[10];
int n = 10;
while (n > 0)
{
rcv = read(connected_sd, &msgRecv, 10);
n -= rcv;
}
char msgUpload[10] = "upload";
if(strcmp(msgUpload, msgRecv) == 0)
{
write_file(connected_sd);
}
client.cpp
void send_file(FILE *points, int sockfd) // sending file through socket function
{
char bytes[SIZE] = {0};
bzero(bytes, SIZE);
while(fgets(bytes, SIZE, points) != NULL)
{
if(send(sockfd, bytes, sizeof(bytes), 0) == -1)
{
perror("Error in sending file.");
exit(1);
}
bzero(bytes, SIZE);
}
}
// in main
char msgUpload[10] = "upload";
send(sd, msgUpload, sizeof(msgUpload), 0);
string fileN;
cout << "What is the name of the file you wish to upload?\n";
cin >> fileN;
bzero(msgUpload, sizeof(msgUpload));
FILE *file;
char const *filename = fileN.c_str();
file = fopen(filename, "r");
if (file == NULL)
{
perror("Error in reading file.\n");
exit(1);
}
send_file(file, sd);
printf("File data sent successfully.\n\n");
fclose(file);
I have to following problem.
I need to scan a .pcap file (saved file) for re-transmitted tcp packets.
I'm using the Winpcap lib. I tried using pcap_stats() to check for dropped packets (which will also represent re-transmitted packets), but found out pcap_stats() can only be used for live captures and not saved files. Is there any way around this limitation, or am I looking at it wrong?
Here is my code so far:
int main(int argc, char **argv)
{
pcap_t *fp; //File pointer
char errbuff[PCAP_ERRBUF_SIZE]; //Error buffer
char source[PCAP_BUF_SIZE]; //Source string
struct pcap_pkthdr *header; //Packet header
const u_char *pkt_data; //Packet data
pcap_stat *ps; // Packet stats
char packet_filter[] = "tcp"; //Filter paramaters
struct bpf_program fcode; //compiled filter code
int res; //File reading result
u_int i = 0;
time_t start = time(NULL);
time_t sec;
int lps; //Lines per second
//Create source string
if (pcap_createsrcstr(source, //Source string
PCAP_SRC_FILE, //Open local file
NULL, //Host
NULL, //Port
argv[1], //File name
errbuff //Error buffer
) != 0)
{
fprintf(stderr, "\n Error creating source string");
return -1;
}
//Open File
if ((fp = pcap_open(source, //Device
65536, //Capture size (65536 = whole packet)
PCAP_OPENFLAG_PROMISCUOUS, //Flags
1000, //Timeout
NULL, //Authentication
errbuff //Error buffer
)) == NULL)
{
fprintf(stderr, "Error opening file", source);
return -1;
}
//Complie filter
if ((pcap_compile(fp, //File pointer
&fcode, //Compiled filter code
packet_filter, //Filter paramaters
1, //Optimazation
NULL //netmask
)) < 0)
{
fprintf(stderr, "\n Unable to complile packet filter");
return -1;
}
//Set filter
if ((pcap_setfilter(fp, //File pointer
&fcode //Compiled filter code
)) < 0)
{
fprintf(stderr, "\n Error setting filter");
return -1;
}
if ((pcap_stats(fp, ps)) < 0)
{
fprintf(stderr, "failed to retrive statistics");
printf(pcap_geterr(fp));
return -1;
}
//Read file
while ((res = pcap_next_ex(fp, &header, &pkt_data)) >= 0)
{
}
if (res == -1)
{
printf("Error reading the packets %s\n", pcap_geterr(fp));
}
printf("%f%%", (ps->ps_capt)/(ps->ps_recv) * 100); //percentage of accepted packets
sec = time(NULL) - start;
if (sec > 0)
{
lps = (ps->ps_recv) / sec;
printf("\nSpeed: %d Packets/second", lps);
}
else
{
lps = (ps->ps_recv);
printf("\nSpeed: %d Packets/second", lps);
}
return 0;
}
For better or worse, packet statistics are not saved in pcap files; there's nothing in the file format to support that.
pcap-ng supports it, but libpcap doesn't yet support writing pcap-ng files, and WinPcap is based on an earlier version of libpcap that didn't even support reading them.
This will probably improve at some unknown point in the future.
I am new to ffmpeg and I tried using api-example.c to decode wma files. However when I run the program, it gave me an error saying
"frame_len overflow". Does anyone know how to fix this error?
Here is my code:
extern "C" {
#include <avcodec.h>
#include "../libavcodec/avcodec.h"
#include <avformat.h>
}
#include <iostream>
#include <assert.h>
#include <windows.h>
#include <mmsystem.h>
#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
int main(int argc, char *argv[]) {
avcodec_init();
avcodec_register_all();
//avdevice_register_all();
av_register_all();
AVCodec *codec;
AVCodecContext *c= NULL;
AVCodec *ocodec;
AVCodecContext *oc= NULL;
int out_size, len,out_size2;
FILE *f, *outfile;
uint8_t *outbuf;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
char* outfilename="test.wma";
char* filename="Beethoven's.wma";
AVFormatContext *pFormatCtx;
WAVEFORMATEX* wfx=new WAVEFORMATEX;
int ret;
ret=av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL);
if(ret!=0)
{
std::cout<<"cannot open file!"<<std::endl;
exit(1);
}
if(av_find_stream_info(pFormatCtx)<0)
{
std::cout<<"cannot find stream!"<<std::endl;
exit(1);
}
int audioStream;
AVCodecContext *pCodecCtx;
// Find the first video stream
audioStream=-1;
for(int i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO)
{
audioStream=i;
break;
}
if(audioStream==-1)
{
std::cout<<"cannot find audio!"<<std::endl;
}
// Get a pointer to the codec context for the audio stream
pCodecCtx=pFormatCtx->streams[audioStream]->codec;
av_init_packet(&avpkt);
printf("Audio decoding\n");
/* find the suitable audio decoder */
codec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
if(codec->capabilities & CODEC_CAP_TRUNCATED)
pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
//open the codec (for decoding)
int test = avcodec_open(pCodecCtx, codec);
if (test < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
//find mp3 encoder
ocodec = avcodec_find_encoder(CODEC_ID_MP3);
if (!ocodec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
//allocate context
oc= avcodec_alloc_context();
/* put sample parameters */
oc->bit_rate = 64000;
oc->sample_rate = 44100;
oc->channels = 1;
/* open it */
if (avcodec_open(oc, ocodec) < 0) {
fprintf(stderr, "could not open encoding codec\n");
exit(1);
}
//buffer
outbuf = (uint8_t*)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
//open inputfile
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
//open outputfile
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
exit(1);
}
/* decode until eof */
avpkt.data = inbuf;
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
//while there is still data
while (avpkt.size > 0) {
std::cout<<"decoding..."<<std::endl;
out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
//decode
len = avcodec_decode_audio3(pCodecCtx, (short *)outbuf, &out_size, &avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding\n");
exit(1);
}
if (out_size > 0) {
/* if a frame has been decoded, output it */
std::cout<<"1 frame decoded!"<<std::endl;
out_size2 = avcodec_encode_audio(oc, outbuf, out_size, (short*)outbuf);
fwrite(outbuf, 1, out_size2, outfile);
}
//subtract data from whatever decode function returns
avpkt.size -= len;
avpkt.data += len;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
* a parser, or use a proper container format through
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
len = fread(avpkt.data + avpkt.size, 1,
AUDIO_INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
}
fclose(outfile);
fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
}
I have been stuck on this for quite a long time. Please help me.
anyone know whats wrong with my code?
Thanks,
Izak
Use debug messages to determine the point of failure.
Though I am of the strong opinion that this error occurs while encoding, because you are using the same buffer and respective buffer size.