ZLib won't compress beyond ~600kb output in D - d

I've tried both std.zlib and using zlib directly, but it doesn't seem to compress anything beyond the 600kb mark. I have mostly followed all online guides except for the input buffer since I have to compress a preesixting memory space rather than a file on the disk.
Forgot the code, this one doesn't go further beyond ~600kb:
int ret, flush;
zlib.z_stream strm;
strm.zalloc = null;
strm.zfree = null;
strm.opaque = null;
ret = zlib.deflateInit(&strm, compLevel);
if (ret != zlib.Z_OK)
throw new Exception("Compressor initialization error");
ubyte[] output;
output.length = cast(uint)imageData.length;
strm.next_in = imageData.ptr;
strm.avail_in = cast(uint)imageData.length;
strm.next_out = output.ptr;
strm.avail_out = cast(uint)output.length;
do{
ret = zlib.deflate(&strm, zlib.Z_FINISH);
if(!(ret == zlib.Z_OK || ret == zlib.Z_STREAM_END)){
//version(unittest) std.stdio.writeln(strm.total_out);
zlib.deflateEnd(&strm);
throw new Exception("Compressor output error: " ~ cast(string)std.string.fromStringz(strm.msg));
}
} while (ret != zlib.Z_STREAM_END);
writeBuffer = cast(void[])[Chunk(cast(uint)strm.total_out, DATA_INIT).nativeToBigEndian] ~ output[0..cast(size_t)strm.total_out];
file.rawWrite(writeBuffer);
crc = crc32Of(writeBuffer[4..$]).dup.reverse;
file.rawWrite(crc);
Previously I tried this, but this immediately has a stream error with no output:
int ret, flush;
//uint have;
zlib.z_stream strm;
strm.zalloc = null;
strm.zfree = null;
strm.opaque = null;
ret = zlib.deflateInit(&strm, compLevel);
if (ret != zlib.Z_OK)
throw new Exception("Compressor initialization error");
ubyte[] output;
static if(writeblocksize < 2048)
output.length = 2048;
strm.next_in = imageData.ptr;
strm.avail_in = cast(uint)imageData.length;
do {
flush = strm.avail_in ? zlib.Z_NO_FLUSH : zlib.Z_FINISH;
strm.next_out = output.ptr;
strm.avail_out = cast(uint)output.length;
ret = zlib.deflate(&strm, flush);
if(ret == zlib.Z_STREAM_ERROR){
version(unittest) std.stdio.writeln(ret);
zlib.deflateEnd(&strm);
throw new Exception("Compressor output error: " ~ cast(string)std.string.fromStringz(strm.msg));
}
//version(unittest) std.stdio.writeln(strm.total_out);
//writeBuffer = output[0..$-strm.avail_out];
writeBuffer = cast(void[])[Chunk(cast(uint)writeBuffer.length, DATA_INIT).nativeToBigEndian] ~ output;
file.rawWrite(writeBuffer);
crc = crc32Of(writeBuffer[4..$]).dup.reverse;
file.rawWrite(crc);
//writeBuffer.length = 0;
} while (flush != zlib.Z_FINISH);
zlib.deflateEnd(&strm);

Related

Add actual timestamp to mp4 using ffmpeg

I'm using ffmpeg to write an h264 stream to a mp4 file.
Everything is working, but now I need to embed to this file the actual timestamp in milliseconds of each frame.
Is it possible?
This is my code:
void mp4_file_create(mp4_par * par, t_image * img_in)
{
AVCodec * codec = NULL;
AVCodecContext * cc_in;
AVFormatContext * av_fmt_ctx_out;
AVStream * av_stream;
AVPacket av_pkt;
AVFormatContext * ifmt_ctx;
unsigned long long last_frame_ts_utc;
unsigned long long last_frame_ts_absolute;
unsigned long long last_pts;
t_mp4_dict_metadata metadata;
char file_name[1024];
char TSstrdate[128];
av_register_all();
cc_in = NULL;
av_stream = NULL;
if (avformat_alloc_output_context2(&mp4h->av_fmt_ctx_out, NULL, NULL, file_name) < 0) {
trace_error("avformat_alloc_output_context2 failed");
goto FnExit;
}
/* find the H264 RAW encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
int ret;
AVStream *in_stream = NULL;
if (av_fmt_ctx_in == NULL)
{
trace_error("av_fmt_ctx_in is NULL");
goto FnExit;
}
in_stream = av_fmt_ctx_in->streams[0];
in_stream->codec->width = par.width;
in_stream->codec->height = par.height;
in_stream->codec->coded_width = par.width;
in_stream->codec->coded_height = par.height;
in_stream->codec->bit_rate = 1024;
in_stream->codec->flags = CODEC_FLAG_GLOBAL_HEADER;
in_stream->codec->time_base.num = 1;
in_stream->codec->time_base.den = par.frame_rate;
in_stream->codec->gop_size = par.gop;
in_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
av_stream = avformat_new_stream(mp4h->av_fmt_ctx_out, in_stream->codec->codec);
if (!av_stream) {
trace_error("Failed allocating output stream");
goto FnExit;
}
ret = avcodec_copy_context(av_stream->codec, in_stream->codec);
if (ret != 0) {
goto FnExit;
}
}
else {
int ret;
av_stream = avformat_new_stream(mp4h->av_fmt_ctx_out, NULL);
if (!av_stream) {
goto FnExit;
}
cc_in = avcodec_alloc_context3(codec);
if (cc_in == NULL) {
goto FnExit;
}
cc_in->width = par.width;
cc_in->height = par.height;
cc_in->bit_rate = 1024;
cc_in->flags = CODEC_FLAG_GLOBAL_HEADER;
cc_in->time_base.num = 1;
cc_in->time_base.den = par.frame_rate;
cc_in->gop_size = par.gop;
cc_in->pix_fmt = AV_PIX_FMT_YUVJ420P;
cc_in->extradata = (unsigned char*)av_mallocz(sizeof(sample_spspps));
cc_in->extradata_size = sizeof(sample_spspps);
memcpy(cc_in->extradata, sample_spspps, cc_in->extradata_size);
ret = avcodec_copy_context(av_stream->codec, cc_in);
if (ret != 0) {
goto FnExit;
}
}
av_stream->codec->codec_tag = 0;
if (av_fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
av_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (!(av_fmt_ctx_out->flags & AVFMT_NOFILE)) {
int ret = avio_open(&av_fmt_ctx_out->pb, file_name, AVIO_FLAG_READ_WRITE);
if (ret < 0) {
trace_error("Could not open output file '%s'", file_name);
goto FnExit;
}
}
av_fmt_ctx_out->streams[0]->time_base.num = 1;
av_fmt_ctx_out->streams[0]->time_base.den = par.frame_rate;
av_fmt_ctx_out->streams[0]->codec->time_base.num = 1;
av_fmt_ctx_out->streams[0]->codec->time_base.den = par.frame_rate;
AVRational fps;
fps.num = 1;
fps.den = par.frame_rate;
av_stream_set_r_frame_rate(av_fmt_ctx_out->streams[0], fps);
mp4h->av_fmt_ctx_out->streams[0]->first_dts = AV_TIME_BASE;
av_dict_set(&pMetaData, "title", par.guid_video_function, 0);
av_dict_set(&pMetaData, "artist", "Test Artist", 0);
av_dict_set(&pMetaData, "date", TSstrdate, 0);
av_fmt_ctx_out->metadata = pMetaData;
if (avformat_write_header(av_fmt_ctx_out, NULL) < 0) {
goto FnExit;
}
//............. Now for each frame_rate........
av_init_packet(&av_pkt);
if (first_frame)
{
av_pkt.pts = 0;
av_pkt.dts = 0;
}
else
{
av_pkt.pts = last_pts + (long long int)((img->timestamp_absolute - last_frame_ts_absolute) * (unsigned long long)av_stream->time_base.den / 1000000ULL);
av_pkt.dts = last_pts + (long long int)((img->timestamp_absolute - last_frame_ts_absolute) * (unsigned long long)av_stream->time_base.den / 1000000ULL);
}
mp4h->av_pkt.duration = 0;
mp4h->av_pkt.pos = -1;
last_frame_ts_utc = img->timestamp_utc.t;
last_frame_ts_absolute = img->timestamp_absolute.t;
last_pts = av_pkt.pts;
if (img->type == keyframe)
{
av_pkt.flags |= AV_PKT_FLAG_KEY;
}
av_pkt.data = img->ptr;
av_pkt.size = img->size;
av_pkt.stream_index = av_stream->index;
ret = av_interleaved_write_frame(av_fmt_ctx_out, &av_pkt);
if (ret != 0) {
char strE[256];
av_strerror(ret, strE, sizeof(strE));
trace_error("av_write_frame returns %d - %s", ret, strE);
return;
}
//........then I will close the file
FnExit:
if (av_fmt_ctx_out && av_fmt_ctx_out->pb != NULL) {
if (av_write_trailer(mp4h->av_fmt_ctx_out) != 0) {
trace_error("av_write_trailer Error!");
}
}
if (ifmt_ctx)
avformat_close_input(&ifmt_ctx);
avio_closep(&av_fmt_ctx_out->pb);
avcodec_close(av_stream->codec);
avformat_free_context(av_fmt_ctx_out);
}
How can I modify it in order to embed the actual timestamp of each frame?
I tried to add the actual timestamp to the first frame pts instead of setting it to zero, but it didn't work.

FFMPEG H264 encode each single image

i encode currently a QImage from RGB888 to H264, but i want to encode each image (even if this is not the perfect way) by itself.
Im able to encode the image, but its needed to send the same image 46 times. And i dont know what i do wrong (probably wrong config of the encode, but i cannot find the issue there).
Afterwards i decode this image and then convert it back to a QImage. I do this only for testing some other code.
avcodec_register_all();
AVCodec *nVidiaCodec = avcodec_find_encoder_by_name("h264_nvenc");
if (!nVidiaCodec)
{
return false;
}
AVCodecContext* av_codec_context_ = NULL;
av_codec_context_ = avcodec_alloc_context3(nVidiaCodec);
if (!av_codec_context_)
{
return false;
}
av_codec_context_->width = dst->width;
av_codec_context_->height = dst->height;
av_codec_context_->pix_fmt = AV_PIX_FMT_YUV420P;
av_codec_context_->gop_size = 1;
av_codec_context_->keyint_min = 0;
av_codec_context_->scenechange_threshold = 0;
av_codec_context_->bit_rate = 8000000;
av_codec_context_->time_base.den = 1;
av_codec_context_->time_base.num = 1;
av_codec_context_->refs = 0;
av_codec_context_->qmin = 1;
av_codec_context_->qmax = 1;
av_codec_context_->b_frame_strategy = 0;
av_codec_context_->max_b_frames = 0;
av_codec_context_->thread_count = 1;
av_opt_set(av_codec_context_, "preset", "slow", 0);
av_opt_set(av_codec_context_, "tune", "zerolatency", 0);
int ret = avcodec_open2(av_codec_context_, nVidiaCodec, NULL);
if (0 > ret)
{
return false;
}
AVFrame *picture = av_frame_alloc();
picture->format = AV_PIX_FMT_RGB24;
picture->width = dst->width;
picture->height = dst->height;
ret = avpicture_fill((AVPicture *)picture, imgSrc.bits(), AV_PIX_FMT_RGB24, dst->width, dst->height);
if (0 > ret)
{
return false;
}
AVFrame *tmp_picture = av_frame_alloc();
tmp_picture->format = AV_PIX_FMT_YUV420P;
tmp_picture->width = dst->width;
tmp_picture->height = dst->height;
ret = av_frame_get_buffer(tmp_picture, 32);
SwsContext *img_convert_ctx = sws_getContext(av_codec_context_->width, av_codec_context_->height, AV_PIX_FMT_RGB24, av_codec_context_->width, av_codec_context_->height, av_codec_context_->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
if (!img_convert_ctx)
{
return false;
}
ret = sws_scale(img_convert_ctx, picture->data, picture->linesize, 0, av_codec_context_->height, tmp_picture->data, tmp_picture->linesize);
if (0 > ret)
{
return false;
}
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
return false;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
do
{
ret = avcodec_receive_packet(av_codec_context_, &pkt);
if (ret == 0)
{
break;
}
else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
{
return false;
}
else if (ret == AVERROR(EAGAIN))
{
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
return false;
}
}
} while (ret == 0);
// the do while is called 46 times, then i get the packet, but i want to get the packet at the first call
It would be very nice if you can help me.
Thanks guys.
I assume you just want to encode a single frame. You need to flush the encoder after you have sent your single uncompressed frame by sending NULL instead of a valid buffer.
int result = 0;
// encoder init
// send one uncompressed frame
result = avcodec_send_frame(av_codec_context_, tmp_picture);
if (result < 0) return false;
// send NULL to indicate flushing
result = avcodec_send_frame(av_codec_context_, NULL);
if (result < 0) return false;
while (result != AVERROR_EOF)
{
result = avcodec_receive_packet(av_codec_context_, &pkt);
if (!result)
{
// you should have your encoded frame; do something with it
}
}

C++ memory leaks when read binary file

I wrote the program automatically sends the compressed files to the server.
Files are sent with a weak VPS that the disposal has 1.5 GB of RAM.
When the load binary files gets micro memory leaks, but at 30 000 .pdf refuses to continue allocating memory.
I did a little deception because it saves log and run the program again, cleaning RAM in this way, however, would like to know why. Mark that if I do not do 'ios: binary', such leaks are not.
My code :
std::ifstream ifs(url,ios::binary);
std::string content((std::istreambuf_iterator<char>(ifs)), (std::istreambuf_iterator<char>()));
content = PFHelper::stream_compression(content);
content = PFHelper::ASE_encodeCppStandard(content,KS,PFHelper::AES_CBC_128);
PFHelper::stream_compression
std::string PFHelper::stream_compression(std::string stream)
{
z_stream zs; // z_stream is zlib's control structure
memset(&zs, 0, sizeof(zs));
if (deflateInit(&zs, Z_BEST_COMPRESSION) != Z_OK)
{
throw new PFException(L"EXCEPTION_DURING_ZLIB_COMPRESSION");
}
zs.next_in = (Bytef*)stream.data();
zs.avail_in = stream.size(); // set the z_stream's input
int ret;
char outbuffer[32768];
std::string outstring;
// retrieve the compressed bytes blockwise
do {
zs.next_out = reinterpret_cast<Bytef*>(outbuffer);
zs.avail_out = sizeof(outbuffer);
ret = deflate(&zs, Z_FINISH);
if (outstring.size() < zs.total_out) {
// append the block to the output string
outstring.append(outbuffer,
zs.total_out - outstring.size());
}
} while (ret == Z_OK);
deflateEnd(&zs);
if (ret != Z_STREAM_END) { throw new PFException(L"EXCEPTION_DURING_ZLIB_COMPRESSION"); }
return outstring;
}
PFHelper::ASE_encodeCppStandard
std::string PFHelper::ASE_encodeCppStandard(std::string in, wchar_t* KS ,wchar_t* typ)
{
string ctext = "";
std::string KS_string = PFHelper::ConvertFromUtf8ToString(KS);
if (typ == PFHelper::AES_CBC_128)
ctext = encrypt(KS_string,in);
if (typ == PFHelper::AES_CBC_256)
ctext = encryptEX(KS_string, in);
return ctext;
}
static string encrypt(string KS, const string ptext)
{
EVP_CIPHER_CTX* ctx;
ctx = EVP_CIPHER_CTX_new();
int rc = EVP_EncryptInit_ex(ctx, EVP_aes_128_cbc(), NULL, (byte*)&KS[0], (byte*)&KS[0]);
if (rc != 1)
throw runtime_error("EVP_EncryptInit_ex failed");
// Cipher text will be upto 16 bytes larger than plain text
std::string ctext;
ctext.resize(ptext.size()+16);
int out_len1 = (int)ctext.size();
rc = EVP_EncryptUpdate(ctx, (byte*)&ctext[0], &out_len1, (const byte*)&ptext[0], (int)ptext.size());
if (rc != 1)
throw runtime_error("EVP_EncryptUpdate failed");
int out_len2 = (int)ctext.size() - out_len1;
rc = EVP_EncryptFinal_ex(ctx, (byte*)&ctext[0] + out_len1, &out_len2);
if (rc != 1)
throw runtime_error("EVP_EncryptFinal_ex failed");
ctext.resize(out_len1 + out_len2);
return ctext;
}

zlib different decompresssion size

I am trying to use zlib for decompression. Im look at a tutorial at zlib site and inflate codes produce different size output.
int CZLib::Inflate() {
int ret;
unsigned int have;
z_stream zstream;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
zstream.zalloc = Z_NULL;
zstream.zfree = Z_NULL;
zstream.opaque = Z_NULL;
zstream.avail_in = 0;
zstream.next_in = Z_NULL;
ret = inflateInit(&zstream);
if (ret != Z_OK)
return ret;
do {
zstream.avail_in = fread(in, 1, CHUNK, fin);
if (ferror(fin)) {
(void)inflateEnd(&zstream);
return Z_ERRNO;
}
if (zstream.avail_in == 0) break;
zstream.next_in = in;
do {
zstream.avail_out = CHUNK;
zstream.next_out = out;
ret = inflate(&zstream, Z_NO_FLUSH);
assert(ret != Z_STREAM_ERROR);
switch (ret) {
case Z_NEED_DICT:
ret = Z_DATA_ERROR;
case Z_DATA_ERROR:
case Z_MEM_ERROR:
(void)inflateEnd(&zstream);
return ret;
}
have = CHUNK - zstream.avail_out;
if (fwrite(out, 1, have, fout) != have || ferror(fout)) {
(void)inflateEnd(&zstream);
return Z_ERRNO;
}
} while (zstream.avail_out == 0);
} while (ret != Z_STREAM_END);
(void)inflateEnd(&zstream);
return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR;
}
and other
int CZLib::Inflate(const std::string& src) {
std::vector<char> output;
z_stream zstream;
zstream.zalloc = Z_NULL;
zstream.zfree = Z_NULL;
zstream.opaque = Z_NULL;
zstream.avail_in = 0;
zstream.next_in = Z_NULL;
int ret = inflateInit(&zstream);
if (ret != Z_OK)
return ret;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
int have = 0, nByte = CHUNK, off = 0, remaining = src.size();
if (src.size() < CHUNK) nByte = src.size();
do {
memcpy(in, &src[off], nByte);
off += nByte;
remaining -= nByte;
if (nByte > 0) zstream.avail_in = nByte;
if (remaining > CHUNK) { nByte = CHUNK; }
else { nByte = remaining; }
if (zstream.avail_in == 0) break;
zstream.next_in = in;
do {
zstream.avail_out = CHUNK;
zstream.next_out = out;
ret = inflate(&zstream, Z_NO_FLUSH);
have = CHUNK - zstream.avail_out;
output.insert(output.end(), out, out + have);
} while (zstream.avail_out == 0);
} while (ret != Z_STREAM_END);
CFile* file = new CFile("in.out", "wb");
file->Write<char>(&output[0], output.size());
delete file;
return ret;
}
Its uses same data. One of them reads file on disk and other uses memory (buffer method). CHUNK size 16384. First code produce 524288(0x80000) and other 524800 (0x80200) byte. The difference are 512 bytes. Why is it happening ?
In the first code example you have this line
zstream.avail_in = fread(in, 1, CHUNK, fin);
and then you have
if (zstream.avail_in == 0) break;
to stop the loop.
In the second code example, you have the same line to stop the loop but you also have this line:
if (nByte > 0) zstream.avail_in = nByte;
^^^^^^^^^
So you only assign to zstream.avail_in when nByte > 0
....
....
if (zstream.avail_in == 0) break;
^^^^^^^^^^^^^^^^
Consequently this will not be true when nByte is zero and the
code will not exit
Try this instead:
zstream.avail_in = nByte; // Unconditional assignment
....
if (zstream.avail_in <= 0) break; // Less or equal to zero

C++ ZLib GZipStream Decompression NULL terminated

There are a lot of questions out there revolving around zlib and GZipStreams but none that I've found answer this question. I'm using a C# GZipStream to send compressed data to a client. It reads the compressed data in entirely then tries to decompress it. However, each time inflate() is called in the loop it only gets the NULL terminated string. When sending a binary this is a pretty huge problem.
Before I show you code, I just wanted to say that if I write the received compressed bytes to a .gz file and use gzFile/gzopen/gzread/gzclose everything works perfectly. That means all the data is coming in properly. I want to read in the compressed data, decompress it in memory, and have the contents in a variable.
I think the issue is that inflate() is writing to a char* which is NULL terminated. I just don't know how to get it to be a string. I do fully anticipate this being a major oversight and a simple fix. Thanks for any help!
Here's the decompression code:
bool DecompressString(const std::string& message, std::string& dMsg)
{
int bufferSize = 512;
int messageSize = message.size() + 1;
//decompress string
z_stream zs;
memset(&zs, 0, sizeof(zs));
zs.zalloc = Z_NULL;
zs.zfree = Z_NULL;
zs.opaque = Z_NULL;
zs.next_in = (Bytef*)message.data();
zs.avail_in = messageSize;
int ret = Z_OK;
unsigned char* outbuffer = new unsigned char[bufferSize];
if (inflateInit2(&zs, 16+MAX_WBITS) == Z_OK)
{
do {
zs.next_out = outbuffer;
zs.avail_out = bufferSize;
ret = inflate(&zs, Z_NO_FLUSH);
if (ret < 0) return false;
std::stringstream tmpString;
tmpString << outbuffer;
if (dMsg.size() < zs.total_out) {
dMsg.append(tmpString.str().substr(0, zs.total_out - dMsg.size()));
}
} while (ret == Z_OK);
}
inflateEnd(&zs);
delete[] outbuffer;
//"\n<EOF>" is appended by sender to signify the end of file. This removes it
if (dMsg.find("\n<EOF>") != -1)
dMsg = dMsg.substr(0, dMsg.find("\n<EOF>"));
return true;
}
Working code from solution:
bool DecompressString(const std::string& message, std::string& dMsg)
{
int bufferSize = 512;
int messageSize = message.size() + 1;
//decompress string
z_stream zs;
memset(&zs, 0, sizeof(zs));
zs.zalloc = Z_NULL;
zs.zfree = Z_NULL;
zs.opaque = Z_NULL;
zs.next_in = (Bytef*)message.data();
zs.avail_in = messageSize;
int ret = Z_OK;
unsigned char* outbuffer = new unsigned char[bufferSize];
if (inflateInit2(&zs, 16+MAX_WBITS) == Z_OK)
{
// get the decompressed bytes blockwise using repeated calls to inflate
do {
zs.next_out = outbuffer;
zs.avail_out = bufferSize;
ret = inflate(&zs, Z_NO_FLUSH);
if (ret < 0) return false;
//Here's the difference
if (dMsg.size() < zs.total_out)
dMsg.append(reinterpret_cast<char*>(outbuffer), bufferSize);
//End
} while (ret == Z_OK);
}
inflateEnd(&zs);
delete[] outbuffer;
if (dMsg.find("\n<EOF>") != -1)
dMsg = dMsg.substr(0, dMsg.find("\n<EOF>"));
return true;
}
string is not a problem in itself, it can handle binary data.
It is this line that assumes a zero-terminated c-string:
tmpString << outbuffer;
Replace it with
tmpString.append(outbuffer, bufferSize);