I am trying to dump the memory (made with malloc) to a file. I want to dump the raw data because I don't know what's inside the memory (int float double) at the point that I want to dump the memory.
What's the best way to do this?
I have tried a few thing already but non of them worked as i wanted.
In C, it's quite trivial, really:
const size_t size = 4711;
void *data = malloc(size);
if(data != NULL)
{
FILE *out = fopen("memory.bin", "wb");
if(out != NULL)
{
size_t to_go = size;
while(to_go > 0)
{
const size_t wrote = fwrite(data, to_go, 1, out);
if(wrote == 0)
break;
to_go -= wrote;
}
fclose(out);
}
free(data);
}
The above attempts to properly loop fwrite() to handle short writes, that's where most of the complexity comes from.
It's not clear what you mean by "not working".
You could reinterpret_cast the memory to a char * and write it to file easily.
Reading it back again is a different matter.
The "C++ way" of doing it would probably involve using std::ostream::write with a stream in binary mode.
#include <fstream>
#include <string>
bool write_file_binary (std::string const & filename,
char const * data, size_t const bytes)
{
std::ofstream b_stream(filename.c_str(),
std::fstream::out | std::fstream::binary);
if (b_stream)
{
b_stream.write(data, bytes);
return (b_stream.good());
}
return false;
}
int main (void)
{
double * buffer = new double[100];
write_file_binary("test.bin",
reinterpret_cast<char const *>(buffer),
sizeof(double)*100);
delete[] buffer;
return 0;
}
If this is C++, this might help you, as part of serializing and deserializing,
I write the raw memory array to a file (using new[] is essentially the same
as malloc in the C world):
https://github.com/goblinhack/simple-c-plus-plus-serializer
#include "hexdump.h"
auto elems = 128;
static void serialize (std::ofstream out)
{
auto a = new char[elems];
for (auto i = 0; i > bits(a);
hexdump(a, elems);
}
Output:
128 bytes:
0000 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f |................|
0010 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f |................|
0020 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f | !"#$%&'()*+,-./|
0030 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f |0123456789:;?|
0040 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f |#ABCDEFGHIJKLMNO|
0050 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f |PQRSTUVWXYZ[\]^_|
0060 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f |`abcdefghijklmno|
0070 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f |pqrstuvwxyz{|}~.|
Related
Can someone tell me whats wrong with my code?
It works fine in my test example.. but when I use it in production model it decrypts the string but adds a padded symbol to maintain some kind of block size or something.
I didn't post my encrypt/decrypt methods as they would make this post too big, plus they work fine as my test example decrypts and encrypts properly, ini.GetValue is a INI retrieval method there is nothing wrong with it, plus you can see the base64 size is the same as the example code, so I believe it works fine, I never had any problems with it before without encryption when I used it, it returns a const char*The problem is known as you can see the production code ciphertext has appended to it 2 null bytes which I find strange becasue both codes are pretty much identical, I'm not good at C++ so I'm probably overlooking some basic char array stuff
The encryption code I use is AES-256-CBC from OpenSSL 1.1.1
Look at my outputs to see whats wrong.
Good looking example code:
Ciphertext is:
000000: 7a e1 69 61 65 bb 74 ad 1a 68 8a ae 73 70 b6 0e z.iae.t..h..sp..
000010: 4f c9 45 9b 44 ca e2 be e2 aa 16 14 cd b1 79 7b O.E.D.........y{
000020: 86 a5 92 26 e6 08 3e 55 61 4e 60 03 50 f3 e4 c1 ...&..>UaN`.P...
000030: fe 5a 2c 0b df c9 1b d8 92 1f 48 75 0d f8 c2 44 .Z,.......Hu...D
Base64 (size=88):
000000: 65 75 46 70 59 57 57 37 64 4b 30 61 61 49 71 75 euFpYWW7dK0aaIqu
000010: 63 33 43 32 44 6b 2f 4a 52 5a 74 45 79 75 4b 2b c3C2Dk/JRZtEyuK+
000020: 34 71 6f 57 46 4d 32 78 65 58 75 47 70 5a 49 6d 4qoWFM2xeXuGpZIm
000030: 35 67 67 2b 56 57 46 4f 59 41 4e 51 38 2b 54 42 5gg+VWFOYANQ8+TB
000040: 2f 6c 6f 73 43 39 2f 4a 47 39 69 53 48 30 68 31 /losC9/JG9iSH0h1
000050: 44 66 6a 43 52 41 3d 3d DfjCRA==
b cip len = 64
a cip len = 16
plain b = 0
plain a = 3
Decrypted text is:
wtf
Decrypted base64 is:
wtf
000000: 77 74 66 00 wtf.
Bad production code example:
Base64 (size=88)
000000: 6a 7a 48 30 46 71 73 54 45 47 4d 76 2f 67 76 59 jzH0FqsTEGMv/gvY
000010: 4d 73 34 54 2f 39 58 32 6c 37 54 31 4d 6d 56 61 Ms4T/9X2l7T1MmVa
000020: 36 45 4f 38 52 64 45 57 42 6b 65 48 71 31 31 45 6EO8RdEWBkeHq11E
000030: 39 2b 77 37 47 4e 49 4a 47 4a 71 42 55 74 54 70 9+w7GNIJGJqBUtTp
000040: 30 36 58 46 31 4d 66 45 79 44 45 71 5a 69 58 54 06XF1MfEyDEqZiXT
000050: 79 45 53 6b 65 41 3d 3d yESkeA==
Ciphertext is:
000000: 8f 31 f4 16 ab 13 10 63 2f fe 0b d8 32 ce 13 ff .1.....c/...2...
000010: d5 f6 97 b4 f5 32 65 5a e8 43 bc 45 d1 16 06 47 .....2eZ.C.E...G
000020: 87 ab 5d 44 f7 ec 3b 18 d2 09 18 9a 81 52 d4 e9 ..]D..;......R..
000030: d3 a5 c5 d4 c7 c4 c8 31 2a 66 25 d3 c8 44 a4 78 .......1*f%..D.x
000040: 00 00 ..
b cip len = 65
a cip len = 17
crypt miss-match
plain b = 16
crypt write fail
plain a = 16
000000: 77 74 66 09 09 09 09 09 09 09 09 05 05 05 05 05 wtf.............
Here are my codes as you can see they both look very similar so I don't understand whats the problem.
Here is a little helper function for hexdump outputs I use.
void Hexdump(void* ptr, int buflen)
{
unsigned char* buf = (unsigned char*)ptr;
int i, j;
for (i = 0; i < buflen; i += 16) {
myprintf("%06x: ", i);
for (j = 0; j < 16; j++)
if (i + j < buflen)
myprintf("%02x ", buf[i + j]);
else
myprintf(" ");
myprintf(" ");
for (j = 0; j < 16; j++)
if (i + j < buflen)
myprintf("%c", isprint(buf[i + j]) ? buf[i + j] : '.');
myprintf("\n");
}
}
char* base64(const unsigned char* input, int length) {
const auto pl = 4 * ((length + 2) / 3);
auto output = reinterpret_cast<char*>(calloc(pl + 1, 1)); //+1 for the terminating null that EVP_EncodeBlock adds on
const auto ol = EVP_EncodeBlock(reinterpret_cast<unsigned char*>(output), input, length);
if (pl != ol) { myprintf("b64 calc %d,%d\n",pl, ol); }
return output;
}
unsigned char* decode64(const char* input, int length) {
const auto pl = 3 * length / 4;
auto output = reinterpret_cast<unsigned char*>(calloc(pl + 1, 1));
const auto ol = EVP_DecodeBlock(output, reinterpret_cast<const unsigned char*>(input), length);
if (pl != ol) { myprintf("d64 calc %d,%d\n", pl, ol); }
return output;
}
Here is the test example that works fine.
/* enc test */
/* Message to be encrypted */
unsigned char* plaintext = (unsigned char*)"wtf";
/*
* Buffer for ciphertext. Ensure the buffer is long enough for the
* ciphertext which may be longer than the plaintext, depending on the
* algorithm and mode.
*/
unsigned char* ciphertext = new unsigned char[128];
/* Buffer for the decrypted text */
unsigned char decryptedtext[128];
int decryptedtext_len, ciphertext_len;
/* Encrypt the plaintext */
ciphertext_len = encrypt(plaintext, strlen((char*)plaintext), ciphertext);
/* Do something useful with the ciphertext here */
myprintf("Ciphertext is:\n");
Hexdump((void*)ciphertext, ciphertext_len);
myprintf("Base64 (size=%d):\n", strlen(base64(ciphertext, ciphertext_len)));
Hexdump((void*)base64(ciphertext, ciphertext_len), 4 * ((ciphertext_len + 2) / 3));
/* Decrypt the ciphertext */
decryptedtext_len = decrypt(ciphertext, ciphertext_len, decryptedtext);
/* Add a NULL terminator. We are expecting printable text */
decryptedtext[decryptedtext_len] = '\0';
/* Show the decrypted text */
myprintf("Decrypted text is:\n");
myprintf("%s\n", decryptedtext);
myprintf("Decrypted base64 is:\n");
myprintf("%s\n", decode64(base64(decryptedtext, decryptedtext_len), 4 * ((decryptedtext_len + 2) / 3)));
Hexdump(decode64(base64(decryptedtext, decryptedtext_len), 4 * ((decryptedtext_len + 2) / 3)), 4 * ((decryptedtext_len + 2) / 3));
/* enc test end */
Here is the bad production code:
//Decrypt the username
const char* b64buffer = ini.GetValue("Credentials", "SavedPassword", "");
int b64buffer_length = strlen(b64buffer);
myprintf("Base64 (size=%d)\n", b64buffer_length);
Hexdump((void*)b64buffer, b64buffer_length);
int decryptedtext_len;
int decoded_size = 3 * b64buffer_length / 4;
unsigned char* decryptedtext = new unsigned char[decoded_size];
//unsigned char* ciphertext = decode64(b64buffer, b64buffer_length); //had this before same problem as below line, this worked without initializing new memory I perfer to fix this back up
unsigned char* ciphertext = new unsigned char[decoded_size];
memcpy(ciphertext, decode64(b64buffer, b64buffer_length), decoded_size); //same problem as top line.
myprintf("Ciphertext is:\n");
Hexdump((void*)ciphertext, decoded_size);
/* Decrypt the ciphertext */
decryptedtext_len = decrypt(ciphertext, decoded_size - 1, decryptedtext);
/* Add a NULL terminator. We are expecting printable text */
decryptedtext[decryptedtext_len] = '\0';
Hexdump(decryptedtext, decryptedtext_len);
strcpy(password_setting, (char*)decryptedtext); //save decrypted password back
delete[] decryptedtext;
delete[] ciphertext;
In the example that works, you get ciphertext_len directly from the encryption function. When you display the ciphertext, you use this length.
In the "bad production code", you calculate decoded_size from the length of the Base64 data. However, Base64 encoded data always has a length that is a multiple of 4. If the original data size is not a multiple of 3, then there are one or two padding characters added to the string. In both of your examples, you have two of these characters, the '=' at the end of the Base64 data.
When calculating the length of the decrypted data, you need to account for these bytes. If there are no '=' characters at the end of the string, use the length that you calculated (3 * N / 4). If there is one '=' character, reduce that calculated length by 1, and if there are two '=' characters, reduce the calculated length by 2. (There will not be 3 padding characters.)
Edit: Here is my fix: (sspoke)
char* base64(const unsigned char* input, int length) {
const auto pl = 4 * ((length + 2) / 3);
auto output = reinterpret_cast<char*>(calloc(pl + 1, 1)); //+1 for the terminating null that EVP_EncodeBlock adds on
const auto ol = EVP_EncodeBlock(reinterpret_cast<unsigned char*>(output), input, length);
if (pl != ol) { printf("encode64 fail size size %d,%d\n",pl, ol); }
return output;
}
unsigned char* decode64(const char* input, int* length) {
//Old code generated base length sizes because it didn't take into account the '==' signs.
const auto pl = 3 * *length / 4;
auto output = reinterpret_cast<unsigned char*>(calloc(pl + 1, 1));
const auto ol = EVP_DecodeBlock(output, reinterpret_cast<const unsigned char*>(input), *length);
if (pl != ol) { printf("decode64 fail size size %d,%d\n", pl, ol); }
//Little bug fix I added to fix incorrect length's because '==' signs are not considered in the output. -sspoke
if (*length > 3 && input[*length - 1] == '=' && input[*length - 2] == '=')
*length = ol - 2;
else if (*length > 2 && input[*length - 1] == '=')
*length = ol - 1;
else
*length = ol;
return output;
}
I'm writing a native module using node-addon-api that takes advantage of the Magick++ library. The module takes a file path to an image alongside some parameters and returns a buffer. I seem to have come across a pretty bad memory leak issue which Massif reports as being related to either the buffer that is created or the Magick++ image. Here's my C++ code:
#include <napi.h>
#include <list>
#include <Magick++.h>
using namespace std;
using namespace Magick;
class FlipWorker : public Napi::AsyncWorker {
public:
FlipWorker(Napi::Function& callback, string in_path, bool flop, string type, int delay)
: Napi::AsyncWorker(callback), in_path(in_path), flop(flop), type(type), delay(delay) {}
~FlipWorker() {}
void Execute() {
list<Image> frames;
list<Image> coalesced;
list<Image> mid;
list<Image> result;
readImages(&frames, in_path);
coalesceImages(&coalesced, frames.begin(), frames.end());
for (Image &image : coalesced) {
flop ? image.flop() : image.flip();
image.magick(type);
mid.push_back(image);
}
optimizeImageLayers(&result, mid.begin(), mid.end());
if (delay != 0) for_each(result.begin(), result.end(), animationDelayImage(delay));
writeImages(result.begin(), result.end(), &blob);
}
void OnOK() {
Callback().Call({Env().Undefined(), Napi::Buffer<char>::Copy(Env(), (char *)blob.data(), blob.length())});
}
private:
string in_path, type;
bool flop;
int delay;
Blob blob;
};
Napi::Value Flip(const Napi::CallbackInfo &info)
{
Napi::Env env = info.Env();
Napi::Object obj = info[0].As<Napi::Object>();
Napi::Function cb = info[1].As<Napi::Function>();
string path = obj.Get("path").As<Napi::String>().Utf8Value();
bool flop = obj.Has("flop") ? obj.Get("flop").As<Napi::Boolean>().Value() : false;
string type = obj.Get("type").As<Napi::String>().Utf8Value();
int delay = obj.Get("delay").As<Napi::Number>().Int32Value();
FlipWorker* flipWorker = new FlipWorker(cb, path, flop, type, delay);
flipWorker->Queue();
return env.Undefined();
}
Napi::Object Init(Napi::Env env, Napi::Object exports) {
exports.Set(Napi::String::New(env, "flip"), Napi::Function::New(env, Flip));
return exports;
}
NODE_API_MODULE(addon, Init);
And an example JS script:
const image = require("./build/Release/image.node");
setInterval(() => {
image.flip({ path: "/home/esm/animated.gif", type: "gif", delay: 0 }, (error, buffer) => {
console.log(buffer);
console.log(process.memoryUsage().rss);
});
}, 10000);
Here is a sample output of the script:
<Buffer 47 49 46 38 39 61 80 02 66 01 f7 00 00 38 44 3a 62 58 26 70 64 27 12 1c 4d 19 26 50 26 30 57 10 38 79 2c 37 67 35 51 57 14 47 79 35 4a 71 55 4f 4f 68 ... 868294 more bytes>
69496832
<Buffer 47 49 46 38 39 61 80 02 66 01 f7 00 00 38 44 3a 62 58 26 70 64 27 12 1c 4d 19 26 50 26 30 57 10 38 79 2c 37 67 35 51 57 14 47 79 35 4a 71 55 4f 4f 68 ... 868294 more bytes>
110673920
<Buffer 47 49 46 38 39 61 80 02 66 01 f7 00 00 38 44 3a 62 58 26 70 64 27 12 1c 4d 19 26 50 26 30 57 10 38 79 2c 37 67 35 51 57 14 47 79 35 4a 71 55 4f 4f 68 ... 868294 more bytes>
152092672
<Buffer 47 49 46 38 39 61 80 02 66 01 f7 00 00 38 44 3a 62 58 26 70 64 27 12 1c 4d 19 26 50 26 30 57 10 38 79 2c 37 67 35 51 57 14 47 79 35 4a 71 55 4f 4f 68 ... 868294 more bytes>
192970752
<Buffer 47 49 46 38 39 61 80 02 66 01 f7 00 00 38 44 3a 62 58 26 70 64 27 12 1c 4d 19 26 50 26 30 57 10 38 79 2c 37 67 35 51 57 14 47 79 35 4a 71 55 4f 4f 68 ... 868294 more bytes>
204517376
As you can see, the resident set size increases significantly each time the function is run. This happens with every image in any format that I use with it. How would I keep the code from leaking? Thanks in advance.
EDIT:
I did some more digging and it turns out that since the buffer is not created through JS, it isn't eligible for garbage collection in the same way. I'm now wondering whether or not it's possible to create a buffer that gets garbage collected by V8 and still provides the same data.
I'm trying to sign a file with my private key using the following function:
void Signer::SignFile(const std::string& file)
{
RSASS<PSSR, Whirlpool>::Signer signer(rsaPrivate);
// Encrypt and write
FileSource ss1(file.c_str(), true,
new SignerFilter(rng, signer,
new FileSink(file.c_str(), true)
, true));
}
As an outcome my target file gets overwritten with the signature. Why does that happen? Obviously I'd like to append it to the file, that's why I also added an extra "true" parameter for the "putMessage".
FileSource ss1(file.c_str(), true,
new SignerFilter(rng, signer,
new FileSink(file.c_str(), true), true));
I [personally] have never seen someone use the same file as a source and sink. Usually the file data and signature are separate like with a detached signature.
It looks like one file/two streams is implementation defined: C++ read and write to same file using different streams. I guess you should expect seemingly random results on different platforms.
As an outcome my target file gets overwritten with the signature. Why does that happen?
The FileSink opens with std::ios_base::tunc. Also see FileSink on the Crypto++ wiki.
You should probably do something like this. It saves the signature to an intermediate ByteQueue, and then writes the queue to the file once the file is closed.
#include "cryptlib.h"
#include "filters.h"
#include "osrng.h"
#include "files.h"
#include "pssr.h"
#include "rsa.h"
#include "whrlpool.h"
#include <iostream>
#include <fstream>
#include <string>
int main(int argc, char* argv[])
{
using namespace CryptoPP;
AutoSeededRandomPool prng;
std::string fname("test.bin");
///// Create test message /////
{
std::string msg("Yoda said, Do or do not. There is no try.");
std::ofstream out(fname.c_str());
out.write(msg.data(), msg.size());
}
///// Generate a key /////
RSASS<PSSR, Whirlpool>::Signer signer;
signer.AccessKey().GenerateRandomWithKeySize(prng, 2048);
///// Sign the message /////
ByteQueue queue;
{
FileSource source(fname.c_str(), true,
new SignerFilter(prng, signer,
new Redirector(queue)));
}
///// Append the signature /////
{
std::ofstream out(fname.c_str(), std::ios_base::out | std::ios_base::binary | std::ios_base::app);
queue.TransferTo(FileSink(out).Ref());
}
///// Create a verifier /////
RSASS<PSSR, Whirlpool>::Verifier verifier(signer);
///// Verify the message /////
{
FileSource source(fname.c_str(), true,
new SignatureVerificationFilter(verifier, NULLPTR,
SignatureVerificationFilter::THROW_EXCEPTION));
}
std::cout << "Verified signature on message" << std::endl;
return 0;
}
It does not crash and it appends the message as expected:
$ ./test.exe
Verified signature on message
$ hexdump -C test.bin
00000000 59 6f 64 61 20 73 61 69 64 2c 20 44 6f 20 6f 72 |Yoda said, Do or|
00000010 20 64 6f 20 6e 6f 74 2e 20 54 68 65 72 65 20 69 | do not. There i|
00000020 73 20 6e 6f 20 74 72 79 2e 19 f2 1c 8f f9 cb 2f |s no try......./|
00000030 f2 38 9f a8 3b a9 0b 8b 62 25 56 a8 ea 81 7e 60 |.8..;...b%V...~`|
00000040 22 55 38 ce 79 7f 32 95 a5 1a 75 c1 80 ad b2 c2 |"U8.y.2...u.....|
00000050 6f ce a5 f7 bd 4b d3 3f e4 b3 69 00 21 60 d7 09 |o....K.?..i.!`..|
00000060 a8 71 9b 5f 41 d6 66 b1 80 f1 de 00 26 19 34 01 |.q._A.f.....&.4.|
00000070 b3 65 1b 78 e2 32 71 be bc 07 25 78 36 6b 56 4e |.e.x.2q...%x6kVN|
00000080 26 4e 12 9e a8 bb 72 86 ee 0d 70 b2 f1 bd a3 2c |&N....r...p....,|
00000090 14 fd 12 61 35 98 4a 80 9f ee 3c 31 d3 70 26 0f |...a5.J...<1.p&.|
000000a0 73 a0 5d 36 ef 96 56 65 f8 ac 3a fb 44 c3 04 76 |s.]6..Ve..:.D..v|
000000b0 e5 2f ae 92 84 be 40 34 f6 4b b8 84 aa bd 67 74 |./....#4.K....gt|
000000c0 05 43 91 d2 e6 b1 50 dd 6d 64 47 cc 3e 3c 3a 9d |.C....P.mdG.><:.|
000000d0 67 ff 4f 38 c1 a5 a6 d5 92 45 bc 2d ff 96 30 3a |g.O8.....E.-..0:|
000000e0 1d 3a 42 4f 8c 13 2d 4c 3f e9 ad 08 a6 b3 5e fa |.:BO..-L?.....^.|
000000f0 46 08 24 17 43 ce ed ec f7 1a 38 62 e7 bf 42 93 |F.$.C.....8b..B.|
00000100 84 44 b6 05 22 9e e3 bd 80 a6 08 b0 34 d0 a4 89 |.D..".......4...|
00000110 78 48 20 7f 7b 33 1c 51 9d 48 b5 b7 f7 de 2f dd |xH .{3.Q.H..../.|
00000120 d7 74 7b af 04 cd 92 fc 1c |.t{......|
I was not able to get this to work. I'm pretty sure it is a dead end.
std::fstream inout(fname.c_str(), std::ios_base::in | std::ios_base::out | std::ios_base::binary | std::ios_base::app);
FileSource fs(inout, true,
new SignerFilter(prng, signer,
new FileSink(inout), true));
Obviously I'd like to append it to the file, that's why ...
Also, because you are using PSSR, you don't need the original message. PSSR is "probabilistic signature scheme with recovery". The message is include in the signature with a mask function.
You would need the original message with a SSA scheme. SSA is "signature scheme with appendix". In a SSA scheme you need to provide both the original message and the signature.
(From the comments) Here is an example that uses a Signature Scheme with Appendix (SSA). It also uses std::iftream and std::ofstream directly so FileSource and FileSink work as you expect. std::ofstream includes std::ios_base::app so the signature is appended.
#include "cryptlib.h"
#include "filters.h"
#include "osrng.h"
#include "files.h"
#include "oaep.h"
#include "rsa.h"
#include "sha.h"
#include <iostream>
#include <fstream>
#include <string>
int main(int argc, char* argv[])
{
using namespace CryptoPP;
AutoSeededRandomPool prng;
std::string fname("test.bin");
///// Create test message /////
{
std::string msg("Yoda said, Do or do not. There is no try.");
std::ofstream out(fname.c_str());
out.write(msg.data(), msg.size());
}
///// Generate a key /////
RSASS<PKCS1v15, SHA256>::Signer signer;
signer.AccessKey().GenerateRandomWithKeySize(prng, 2048);
{
///// Create fstreams for input and output /////
std::ifstream fin(fname.c_str(), std::ios_base::in | std::ios_base::binary);
std::ofstream fout(fname.c_str(), std::ios_base::out | std::ios_base::binary | std::ios_base::app);
///// Sign the message /////
FileSource source(fin, true,
new SignerFilter(prng, signer,
new FileSink(fout)));
}
///// Create a verifier /////
RSASS<PKCS1v15, SHA256>::Verifier verifier(signer);
///// Verify the message /////
{
FileSource source(fname.c_str(), true,
new SignatureVerificationFilter(verifier, NULLPTR,
SignatureVerificationFilter::THROW_EXCEPTION));
}
std::cout << "Verified signature on message" << std::endl;
return 0;
}
And then:
$ ./test.exe
Verified signature on message
$ hexdump -C test.bin
00000000 59 6f 64 61 20 73 61 69 64 2c 20 44 6f 20 6f 72 |Yoda said, Do or|
00000010 20 64 6f 20 6e 6f 74 2e 20 54 68 65 72 65 20 69 | do not. There i|
00000020 73 20 6e 6f 20 74 72 79 2e c7 b3 6f 84 1d fd bf |s no try...o....|
00000030 c7 c8 38 7c 89 b1 f3 42 ee 5e f8 10 de a8 01 7f |..8|...B.^......|
00000040 7f a5 24 3d 27 7e 55 16 bc 80 8b 21 21 75 3d ed |..$='~U....!!u=.|
00000050 41 05 84 b1 3d bf d3 ae 3a 2f a8 81 7a e7 e4 ae |A...=...:/..z...|
00000060 50 d7 9b 25 04 17 a6 a3 1d 12 e2 8e cd 7a 02 42 |P..%.........z.B|
00000070 91 c0 d7 fc 43 09 94 a2 66 d9 67 95 55 5e dc 8c |....C...f.g.U^..|
00000080 eb bc 20 af e8 5c d4 63 05 d4 2c 48 57 6d f1 fe |.. ..\.c..,HWm..|
00000090 26 16 80 c3 41 11 58 8e 8d b0 cb 48 95 b9 ed 94 |&...A.X....H....|
000000a0 84 cc 86 0f a4 7e a3 6a ff 0d 0d 24 17 82 13 94 |.....~.j...$....|
000000b0 54 cb 8a ca 04 1e 65 18 c3 ab a2 3f 4d 44 de 42 |T.....e....?MD.B|
000000c0 32 07 29 e4 95 83 cc ff 39 85 08 bf d5 61 46 db |2.).....9....aF.|
000000d0 e0 96 d6 69 25 b9 ce 1e 3e bc 63 81 e5 16 bd 12 |...i%...>.c.....|
000000e0 a0 78 02 19 60 96 80 36 7d a5 79 be 0f 45 54 f4 |.x..`..6}.y..ET.|
000000f0 92 af f0 d8 74 65 7d 45 98 c7 bb 7f 6e 9b e3 cd |....te}E....n...|
00000100 c0 60 91 0f 78 aa 7c 77 a7 f5 4e 7d 6e ed e1 4c |.`..x.|w..N}n..L|
00000110 8e 5e 96 ac cd 30 16 e0 2d be 9e 2d 68 d4 25 46 |.^...0..-..-h.%F|
00000120 86 77 87 be 68 ef 06 26 55 |.w..h..&U|
I'm working with zlib and have some problem with decompression. I try to decompress packets that come to my program, but only the first packet is decompessed correctly. For example :
//first compressed packet
78 5e 72 65 60 08 65 bf cd c0 60 28 98 3f 95 03
08 18 19 19 25 18 4c af b9 32 38 0a a4 d6 6c 6d
6c 60 60 04 42 20 60 31 2b c9 37 61 c9 2c 28 33
e3 cc cd 4c 2e ca 2f ce 4f 2b 61 4e ce cf 65 00
29 38 c0 03 51 c6 7c 9b 81 e5 40 44 32 23 00
//first decompressed packet
//inflate return 0
45 00 00 55 07 db 00 00 31 11 6f 95 08 08 08 08
01 01 01 18 00 35 d6 45 00 41 10 65 7c b5 81 80
00 01 00 01 00 00 00 00 04 36 74 6f 34 04 69 70
76 36 09 6d 69 63 72 6f 73 6f 66 74 03 63 6f 6d
00 00 01 00 01 c0 0c 00 01 00 01 00 00 03 db 00
04 c0 58 63 01
But when i try to decompress second packet "inflate" function return me -3 and decompress nothing. Example for second compressed packet :
//second compressed packet
//inflate return -3
72 65 60 f0 62 bf 03 36 74 3e c2 d0 77 cb 19 cc
de cc d8 18 8c 30 94 b9 20 b1 92 35 33 bf 38 b1
84 a9 a8 14 c5 24 17 2f 06 96 88 63 e7 ad 01 00
I try to initialize decompresor with parameters MAX_WBITS,-MAX_WBITS,30 but it did not help.How can I solve this problem?
Code example :
//functions
InitZDecompressor = ( int (WINAPI *)( z_stream_s*, int,const char*,int)) GetProcAddress(zlibdll,"inflateInit2_");
ZDecompressor = (int (WINAPI *)(z_stream_s*,int)) GetProcAddress(zlibdll,"inflate");
ResetZDecompressor = (int (WINAPI *)(z_stream_s*)) GetProcAddress(zlibdll,"inflateEnd");
//initialize
__int32 Decoder(unsigned __int8* PDU, unsigned __int32 size, unsigned __int8 * out_b, z_stream_s & stream, bool & IsInit)
{
if (IsDllLoaded == false || PDU == nullptr) { return 0; }//if Zlib DLL was not loaded, or incoming packet is not cTCP
if ( !IsInit )
{
SecureZeroMemory(&stream, sizeof(stream));
auto res = InitZDecompressor( &stream, MAX_WBITS , "1.2.11", sizeof(z_stream_s));//initialize only one time
IsInit = true;
}
stream.next_in = PDU;
stream.avail_in = size;
stream.next_out = out_b;
stream.avail_out = 1048576;
stream.total_out = 0;
__int32 ret = 0;
//inflate
while ( stream.avail_in && ret == 0 )
{
ret = ZDecompressor(&stream, 2);
}
return ret;
}
//inflateEnd
void ResetDecompessor(bool & isInit, z_stream_s & stream)
{
if (isInit){
ResetZDecompressor(&stream);
isInit = false;
memset(&stream, 0 ,sizeof(stream));
}
}
//test func
void testZlib(unsigned __int8 *StPt, __int64 size,z_stream_s & stream,bool & isInit)
{
// StPt - start of compressed data
//size - size of compressed data
//isInit - is zStream already initialize
unsigned __int8 * OutBuf = new unsigned __int8[ 1048576 ];
auto res = zlib->Decoder( StPt,size, OutBuf, stream, isInit );
delete [] OutBuf;
}
What's happening here is that the sender is flushing the deflate compressor with an empty stored block in order to produce a decompressible packet, and then deleting the last four bytes of the empty stored block, expecting you, the receiver, to insert that.
So what you need to do is insert the bytes 00 00 ff ff between the compressed packets, and then decompress the whole thing as one zlib stream. Do not initialize inflate for the second packet -- just keep feeding compressed data to the inflator (including the inserted bytes).
I have a chunk of data which is supposed to be zlib compressed data (I was not 100% sure).
I first tried to uncompress it with gzip by prepending "1F 8B 08 00 00 00 00 00". Just like in the accepted answer of this thread (https://unix.stackexchange.com/questions/22834/how-to-uncompress-zlib-data-in-unix). It worked out and it was probably the right approach, because the output contained a lot of human readable strings.
I then tried to implement this in a c++ program using zlib. But it seems that zlib generates a different output. Am I missing something? zlib and gzip should be basically the same (despite the headers and trailers), shouldn't they? Or do I have a simple error in my code below? (the chunk of data is shortened for the sake of simplicity)
unsigned char* decompressed;
unsigned char* dataChunk = /*...*/;
printHex(dataChunk, 160);
int error = inflateZlib(dataChunk, 160, decompressed, 1000);
printHex(decompressed, 160);
//zerr(error);
printHex(unsigned char* data, size_t n)
{
for(size_t i = 0; i < n; i++)
{
std::cout << std::hex << (uint16_t)data[i] << " ";
}
std::cout << std::dec << "\n-\n";
}
int inflateZlib(unsigned char* data, size_t length, unsigned char* decompressed, size_t maxDecompressed)
{
decompressed = new unsigned char[maxDecompressed];
z_stream infstream;
infstream.zalloc = Z_NULL;
infstream.zfree = Z_NULL;
infstream.opaque = Z_NULL;
infstream.avail_in = (uInt)(length); // size of input
infstream.next_in = (Bytef *)data; // input char array
infstream.avail_out = (uInt)maxDecompressed; // size of output
infstream.next_out = (Bytef *)decompressed; // output char array
// the actual DE-compression work.
int ret = inflateInit(&infstream);
zerr(ret);
ret = inflate(&infstream, Z_NO_FLUSH);
zerr(ret);
inflateEnd(&infstream);
return ret;
}
This produces the following output:
78 9c bd 58 4b 88 23 45 18 ee 3c 67 e3 24 93 cc ae 8a f8 42 10 c4 cb 1a 33 a3 7b f0 60 e6 e0 e6 e0 49 90 bd 29 4d 4d 77 25 dd 99 ee ea de aa ee 4c 32 82 2c e8 c1 93 ac 47 c5 45 f 82 8 5e 16 f ba 78 18 45 d0 83 7 95 15 5c d0 c3 aa b0 b2 ee 65 5c f0 e4 c5 bf aa 1f a9 ea 74 cf 64 7 31 c3 24 9d fa fe bf ea ab ff 59 15 ab 62 6a b5 5d 9b 8c 18 2a 5b 15 47 d3 b4 92 55 35 b5 ba b7 3d c6 46 b0 a3 35 3 1c 50 64 61 93 7a a4 67 d5 0 e1 c2 d8 e4 92 75 fe 56 b3 ca a6 76 c2 f0 1c 8f
-
0 0 6 c0 83 50 0 0 16 b0 78 9c bd 58 4b 88 23 45 18 ee 3c 67 e3 24 93 cc ae 8a f8 42 10 c4 cb 1a 33 a3 7b f0 60 e6 e0 e6 e0 49 90 bd 29 4d 4d 77 25 dd 99 ee ea de aa ee 4c 32 82 2c e8 c1 93 ac 47 c5 45 f 82 8 5e 16 f ba 78 18 45 d0 83 7 95 15 5c d0 c3 aa b0 b2 ee 65 5c f0 e4 c5 bf aa 1f a9 ea 74 cf 64 7 31 c3 24 9d fa fe bf ea ab ff 59 15 ab 62 6a b5 5d 9b 8c 18 2a 5b 15 47 d3 b4 92 55 35 b5 ba b7 3d c6 46 b0 a3 35 3 1c 50 64 61 93 7a a4 67 d5 0 e1 c2 d8 e4 92 75
-
which is not what I want.
Whereas gzip:
printf "\x1f\x8b\x08\x00\x00\x00\x00\x00\x78\x9c\xbd\x58\x4b\x88\x23\x45\x18\xee\x3c\x67\xe3\x24\x93\xcc\xae\x8a\xf8\x42\x10\xc4\xcb\x1a\x33\xa3\x7b\xf0\x60\xe6\xe0\xe6\xe0\x49\x90\xbd\x29\x4d\x4d\x77\x25\xdd\x99\xee\xea\xde\xaa\xee\x4c\x32\x82\x2c\xe8\xc1\x93\xac\x47\xc5\x45\xf\x82\x8\x5e\x16\xf\xba\x78\x18\x45\xd0\x83\x7\x95\x15\x5c\xd0\xc3\xaa\xb0\xb2\xee\x65\x5c\xf0\xe4\xc5\xbf\xaa\x1f\xa9\xea\x74\xcf\x64\x7\x31\xc3\x24\x9d\xfa\xfe\xbf\xea\xab\xff\x59\x15\xab\x62\x6a\xb5\x5d\x9b\x8c\x18\x2a\x5b\x15\x47\xd3\xb4\x92\x55\x35\xb5\xba\xb7\x3d\xc6\x46\xb0\xa3\x35\x3\x1c\x50\x64\x61\x93\x7a\xa4\x67\xd5\x0\xe1\xc2\xd8\xe4\x92\x75\xfe\x56\xb3\xca\xa6\x76\xc2\xf0\x1c\x8f" | gzip -dc | hexdump -C
produces:
gzip: stdin: unexpected end of file
00000000 68 03 64 00 05 77 69 6e 67 73 61 02 68 03 6c 00 |h.d..wingsa.h.l.|
00000010 00 00 01 68 04 64 00 06 6f 62 6a 65 63 74 6b 00 |...h.d..objectk.|
00000020 0c 74 65 74 72 61 68 65 64 72 6f 6e 31 68 05 64 |.tetrahedron1h.d|
00000030 00 06 77 69 6e 67 65 64 6c 00 00 00 06 6c 00 00 |..wingedl....l..|
00000040 00 05 68 02 64 00 08 63 6f 6c 6f |..h.d..colo|
0000004b
which is what I want.
I was able to decode the data you provided by using zlib 1.2.8 and the inflateInit2 function with 32 for windowBits. I used 32 based on this information from the zlib documentation:
windowBits can also be zero to request that inflate use the window size in the zlib header of the compressed stream.
and
Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection
Here's the full code. I stripped out error checking since I don't have a zerr function. It doesn't appear you're using Visual C++, so you will want to remove the #pragma to avoid a warning as well.
#include <iostream>
#include <iomanip>
#include <cstdint>
#include <cctype>
#include "zlib.h"
#pragma comment(lib, "zdll.lib")
const size_t block_size = 16;
void printLine(unsigned char* data, size_t offset, size_t n)
{
if(n)
{
std::cout << std::setw(8) << std::setfill('0') << std::right << offset << " ";
for(size_t x = 0; x < block_size; ++x)
{
if(x % (block_size/2) == 0) std::cout << " ";
uint16_t d = x < n ? data[x] : 0;
std::cout << std::hex << std::setw(2) << d << " ";
}
std::cout << "|";
for(size_t x = 0; x < block_size; ++x)
{
int c = (x < n && isalnum(data[x])) ? data[x] : '.';
std::cout << static_cast<char>(c);
}
std::cout << "|\n";
}
}
void printHex(unsigned char* data, size_t n)
{
const size_t blocks = n / block_size;
const size_t remainder = n % block_size;
for(size_t i = 0; i < blocks; i++)
{
size_t offset = i * block_size;
printLine(&data[offset], offset, block_size);
}
size_t offset = blocks * block_size;
printLine(&data[offset], offset, remainder);
std::cout << "\n";
}
int inflateZlib(unsigned char* data, uint32_t length, unsigned char* decompressed, uint32_t maxDecompressed)
{
z_stream infstream;
infstream.zalloc = Z_NULL;
infstream.zfree = Z_NULL;
infstream.opaque = Z_NULL;
infstream.avail_in = length;
infstream.next_in = data;
infstream.avail_out = maxDecompressed;
infstream.next_out = decompressed;
inflateInit2(&infstream, 32);
inflate(&infstream, Z_FINISH);
inflateEnd(&infstream);
return infstream.total_out;
}
int main()
{
unsigned char dataChunk[] =
"\x1f\x8b\x08\x00\x00\x00\x00\x00\x78\x9c\xbd\x58\x4b\x88\x23\x45"
"\x18\xee\x3c\x67\xe3\x24\x93\xcc\xae\x8a\xf8\x42\x10\xc4\xcb\x1a"
"\x33\xa3\x7b\xf0\x60\xe6\xe0\xe6\xe0\x49\x90\xbd\x29\x4d\x4d\x77"
"\x25\xdd\x99\xee\xea\xde\xaa\xee\x4c\x32\x82\x2c\xe8\xc1\x93\xac"
"\x47\xc5\x45\xf\x82\x8\x5e\x16\xf\xba\x78\x18\x45\xd0\x83\x7\x95"
"\x15\x5c\xd0\xc3\xaa\xb0\xb2\xee\x65\x5c\xf0\xe4\xc5\xbf\xaa\x1f"
"\xa9\xea\x74\xcf\x64\x07\x31\xc3\x24\x9d\xfa\xfe\xbf\xea\xab\xff"
"\x59\x15\xab\x62\x6a\xb5\x5d\x9b\x8c\x18\x2a\x5b\x15\x47\xd3\xb4"
"\x92\x55\x35\xb5\xba\xb7\x3d\xc6\x46\xb0\xa3\x35\x03\x1c\x50\x64"
"\x61\x93\x7a\xa4\x67\xd5\x00\xe1\xc2\xd8\xe4\x92\x75\xfe\x56\xb3"
"\xca\xa6\x76\xc2\xf0\x1c\x8f";
unsigned char decompressed[1000] = {};
printHex(dataChunk, sizeof(dataChunk));
uint32_t len = inflateZlib(dataChunk, sizeof(dataChunk), decompressed, sizeof(decompressed));
printHex(decompressed, len);
return 0;
}
I think you might want to define decompressed differently:
unsigned char decompressed[1000];