I'm need encrypt a content of strings in C++ Builder XE and found this code on the internet:
AnsiString Base64Encode(AnsiString slToEnc)
{
//The Base64 Table
const char Base64Table[64]=
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
char * buftoenc = slToEnc.c_str();
int bufsize = slToEnc.Length() + 1;
char * encbuf = new char[slToEnc.Length() * 5];
encbuf[0] = '\0'; int ilStrLen = -1;
int i=0; int b64byte[5];
unsigned char *buftemp;
AnsiString slRetVal = EmptyStr;
buftemp=(unsigned char *)malloc(bufsize+2);
strcpy(buftemp,buftoenc);
if (fmod(bufsize,3)==1)
{
buftemp[bufsize]='\0';
buftemp[bufsize+1]='\0';
}
if (fmod(bufsize,3)==2)buftemp[bufsize]='\0';
while (i<bufsize)
{
b64byte[0]=buftemp[i]>>2;
b64byte[1]=((buftemp[i]&3)<<4)|(buftemp[i+1]>>4);
b64byte[2]=((buftemp[i+1]&0x0F)<<2)|(buftemp[i+2]>>6);
b64byte[3]=buftemp[i+2]&0x3F;
encbuf[i+(i/3)]=Base64Table[b64byte[0]];
encbuf[i+(i/3)+1]=Base64Table[b64byte[1]];
encbuf[i+(i/3)+2]=Base64Table[b64byte[2]];
encbuf[i+(i/3)+3]=Base64Table[b64byte[3]];
i+=3;
}
free(buftemp);
if (fmod(bufsize,3)==0) ilStrLen = bufsize*8/6;
else if (fmod(bufsize,3)==1) ilStrLen = ((bufsize+2)*8/6)-2;
else if (fmod(bufsize,3)==2) ilStrLen = ((bufsize+1)*8/6)-1;
else ilStrLen = -1;
if(ilStrLen> 0) slRetVal = AnsiString(encbuf).SubString(1, ilStrLen);
if(encbuf != NULL) { delete encbuf; encbuf = NULL; }
return slRetVal;
}
// Calling function in a button component =>
Base64Encode(Memo1->Text);
But it generates a small erro on compilation making reference to ambiguity between the function std::fmod(double, double) being in this source code and the same function of math.h library.
Some suggestion?
In order to resolve the ambiguity you need to specify the namespace for the fmod function. Just change the line:
if (fmod(bufsize,3)==1)
to:
if (std::fmod(bufsize,3)==1)
and it should compile for you.
You can use http://docwiki.embarcadero.com/Libraries/Rio/en/System.NetEncoding.TBase64Encoding
TBase64Encoding* enc = new TBase64Encoding();
UnicodeString decoded = enc->Decode(encoded);
delete enc;
TBase64Encoding* enc = new TBase64Encoding();
UnicodeString encoded = enc->Encode(whatever);
delete enc;
My version is a simple fast encoder (decoder) of base64 for C++ Builder.
//---------------------------------------------------------------------------
UnicodeString __fastcall TExample::Base64Encode(void *data,int length)
{
if (length<=0) return L"";
static const char set[]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
unsigned char *in=(unsigned char*)data;
char *pos,*out=pos=new char[((length-1)/3+1)<<2];
while ((length-=3)>=0)
{
pos[0]=set[in[0]>>2];
pos[1]=set[((in[0]&0x03)<<4)|(in[1]>>4)];
pos[2]=set[((in[1]&0x0F)<<2)|(in[2]>>6)];
pos[3]=set[in[2]&0x3F];
pos+=4;
in+=3;
};
if ((length&2)!=0)
{
pos[0]=set[in[0]>>2];
if ((length&1)!=0)
{
pos[1]=set[((in[0]&0x03)<<4)|(in[1]>>4)];
pos[2]=set[(in[1]&0x0F)<<2];
}
else
{
pos[1]=set[(in[0]&0x03)<<4];
pos[2]='=';
};
pos[3]='=';
pos+=4;
};
UnicodeString code=UnicodeString(out,pos-out);
delete[] out;
return code;
};
//---------------------------------------------------------------------------
int __fastcall TExample::Base64Decode(const UnicodeString &code,unsigned char **data)
{
int length;
if (((length=code.Length())==0)||((length&3)!=0)) return 0;
wchar_t *str=code.c_str();
unsigned char *pos,*out=pos=new unsigned char[(length>>2)*3];
while (*str!=0)
{
length=-1;
int shift=18,bits=0;
do
{
wchar_t s=str[++length];
if ((s>=L'A')&&(s<=L'Z')) bits|=(s-L'A')<<shift;
else if ((s>=L'a')&&(s<=L'z')) bits|=(s-(L'a'-26))<<shift;
else if (((s>=L'0')&&(s<=L'9'))) bits|=(s-(L'0'-52))<<shift;
else if (s==L'+') bits|=62<<shift;
else if (s==L'/') bits|=63<<shift;
else if (s==L'=')
{
length--;
break;
}
else
{
delete[] out;
return 0;
};
}
while ((shift-=6)>=0);
pos[0]=bits>>16;
pos[1]=bits>>8;
pos[2]=bits;
pos+=length;
str+=4;
};
*data=out;
return pos-out;
};
//---------------------------------------------------------------------------
Related
I'm testing a UDF external function in Firebird 3 database, I made a C++ DLL which performs a simple XOR to a given string using a given key.
This is the code:
#include <windows.h>
#include <iostream>
#include <string>
#include <stdio.h>
#include <vector>
#include <math.h>
#include "../FirebirdLib/src/include/ibase.h"
#include "ib_util.h"
using namespace std;
//------------------------------------------------------------------------------------
typedef void (__stdcall * FCallback)(const char * message);
FCallback g_messageCallback = 0;
FCallback g_errorCallback = 0;
//------------------------------------------------------------------------------------
#define ON_MESSAGE(mess) { if(g_messageCallback) g_messageCallback(mess); }
#define ON_ERROR(mess) { if(g_errorCallback) g_errorCallback(mess); }
//------------------------------------------------------------------------------------
extern "C" __declspec(dllexport) void RegisterCallbacks(FCallback messageCallback, FCallback errorCallback)
{
g_messageCallback = messageCallback;
g_errorCallback = errorCallback;
}
//------------------------------------------------------------------------------------
class EncryptionUDF
{
public:
EncryptionUDF()
{
//ON_MESSAGE("--EncryptionUDF created--")
}
~EncryptionUDF()
{
//ON_MESSAGE("--EncryptionUDF destroyed--")
}
char* XORCipher(const char* data, const char* key, int dataLen, int keyLen) {
char* output = (char*)ib_util_malloc(2000 + 1L);
output[dataLen] = '\0';
for (int i = 0; i < dataLen; ++i) {
if (data[i] != key[i % keyLen])
output[i] = data[i] ^ key[i % keyLen];
else
output[i] = data[i];
}
return output;
}
char * Encrypt(const char * str, const char * key) {
int dataLen = strlen(str);
int keyLen = strlen(key);
char* output = (char*)ib_util_malloc(2000 + 1L);
output[dataLen] = '\0';
try {
if ((str == NULL) || (str[0] == '\0')) {
return NULL;
}
else {
try {
if ((key != NULL) && (key[0] == '\0')) {
strncpy(output, str, dataLen);
}
else if (key != NULL) {
output = XORCipher(str, key, dataLen, keyLen);
}
else strncpy(output, str, dataLen);
}
catch (...) { strncpy(output, str, dataLen); }
return output;
}
}
catch (...) { strncpy(output, str, dataLen); }
return output;
}
char * Decrypt(const char * str, const char * key) {
int dataLen = strlen(str);
int keyLen = strlen(key);
char* output = (char*)ib_util_malloc(2000 + 1L);
output[dataLen] = '\0';
try {
if ((str == NULL) || (str[0] == '\0')) {
return NULL;
}
else {
try {
if ((key != NULL) && (key[0] == '\0')) {
strncpy(output, str, dataLen);
}
else if (key != NULL) {
output = XORCipher(str, key, dataLen, keyLen);
}
else strncpy(output, str, dataLen);
}
catch (...) { strncpy(output, str, dataLen); }
return output;
}
}
catch (...) { strncpy(output, str, dataLen); }
return output;
}
};
//------------------------------------------------------------------------------------
extern "C" __declspec(dllexport) char * EncryptUDF_DesEncrypt(const char *str, const char *key)
{
try
{
EncryptionUDF self = EncryptionUDF();
return self.Encrypt(str, key);
}
catch (std::exception & ex)
{
ON_ERROR(ex.what());
}
catch (...)
{
ON_ERROR("Unknown error");
}
return 0;
}
//------------------------------------------------------------------------------------
extern "C" __declspec(dllexport) char * EncryptUDF_DesDecrypt(const char *str, const char *key)
{
try
{
EncryptionUDF self = EncryptionUDF();
return self.Decrypt(str, key);
}
catch (std::exception & ex)
{
ON_ERROR(ex.what());
}
catch (...)
{
ON_ERROR("Unknown error");
}
return 0;
}
//------------------------------------------------------------------------------------
BOOL APIENTRY DllMain( HMODULE hModule,
DWORD ul_reason_for_call,
LPVOID lpReserved
)
{
return TRUE;
}
//------------------------------------------------------------------------------------
The UDF is defined in database as:
DECLARE EXTERNAL FUNCTION X_DECRYPT
CSTRING(2000),
CSTRING(64)
RETURNS CSTRING(2000) FREE_IT
ENTRY_POINT 'EncryptUDF_DesDecrypt' MODULE_NAME 'EncryptUDF';
DECLARE EXTERNAL FUNCTION X_ENCRYPT
CSTRING(2000),
CSTRING(64)
RETURNS CSTRING(2000) FREE_IT
ENTRY_POINT 'EncryptUDF_DesEncrypt' MODULE_NAME 'EncryptUDF';
When using this UDF in SQL select commands, the ram used by firebird server tends to increase continously. When using embedded the RAM goes up quickly, when in server mode, the RAM is increasing but slowly and somehow more controlled.
Please help to understand where the error is.
After some investigation, I decided to change the parts on code where the string is copied using:
strncpy(output, str, dataLen);
with:
strncpy_s(output, dataLen, str, dataLen);
and after this change, the memory has been in normal levels, either in embedded firebird or in server mode.
It seems that was a memory leak in releasing or managing those string copies.
I can not get my c++ application to work properly, i will paste my simple memory class and a test app here.
Right now i get an access violation and no output when i run it in the console. I use Visual Studio 2015.
I seem to haave problems with my car class, not sure what could be wrong. And i ge some error message if i try to debug. I have no idea how to fix it...
#define WIN32_LEAN_AND_MEAN
enum memtype {typechar = 1, typeint};
class Mem
{
public:
Mem(int size);
void * alloc(memtype t);
void * ptr();
void release();
~Mem();
private:
int sizebytes;
void * p;
};
#include <new.h>
#include "Mem.h"
Mem::Mem(int size)
{
sizebytes = size;
}
void * Mem::alloc(memtype t)
{
if (t==typechar)
{
p = (char *)new char(sizebytes);
return p;
}
}
void * Mem::ptr()
{
return p;
}
void Mem::release()
{
if(p)
delete p;
}
Mem::~Mem()
{
if(p)
delete p;
}
#include "Mem.h"
#include <stdio.h>
int check(void * p)
{
int retval = 0;
if (p == NULL)
{
printf("Memory Fail: NULL pointer...\n");
retval = 0;
}
else
retval = 1;
return retval;
}
class Car
{
public:
Car::Car()
{
Car::name = 0;
Car::brand = 0;
Car::type = 0;
}
int Car::alloc(char *inname, char *inbrand, char *intype)
{
Car::name = new Mem(sizeof(*inname));
if (!check(Car::name->alloc(typechar)))
return 0;
printf("%s", sizeof(*inname));
Car::brand = new Mem(sizeof(*inbrand));
if (!check(Car::brand->alloc(typechar)))
return 0;
printf("%s", sizeof(*inbrand));
Car::type = new Mem(sizeof(*intype));
if (!check(Car::type->alloc(typechar)))
return 0;
printf("%s", sizeof(*intype));
/*sprintf?*/
sprintf_s((char *)Car::name->ptr(), sizeof(*inname), "%s", inname);
sprintf_s((char *)Car::brand->ptr(), sizeof(*inbrand), "%s", inbrand);
sprintf_s((char *)Car::type->ptr(), sizeof(*intype), "%s", intype);
return 1;
}
char * Car::getName()
{
if(Car::name!=0)
return (char *)Car::name->ptr();
}
char * Car::getBrand()
{
if(Car::brand!=0)
return (char *)Car::brand->ptr();
}
char * Car::getType()
{
if(Car::type!=0)
return (char *)Car::type->ptr();
}
Car::~Car()
{
if (Car::name != 0)
delete Car::name;
if (Car::brand != 0)
delete Car::brand;
if (Car::type != 0)
delete Car::type;
}
private:
Mem *name, *brand, *type;
};
void store()
{
}
int main()
{
Mem cartype1(sizeof("Sedan"));
cartype1.alloc(typechar);
check(cartype1.ptr());
Mem cartype2(sizeof("Van"));
cartype2.alloc(typechar);
check(cartype2.ptr());
Mem cartype3(sizeof("Pickup"));
cartype3.alloc(typechar);
check(cartype3.ptr());
sprintf((char *)cartype1.ptr(), "%s", "Sedan");
sprintf((char *)cartype2.ptr(), "%s", "Van");
sprintf((char *)cartype3.ptr(), "%s", "Pickup");
Mem carname(sizeof("Supah Car"));
carname.alloc(typechar);
check(carname.ptr());
Mem carbrand(sizeof("Supah"));
carbrand.alloc(typechar);
check(carbrand.ptr());
sprintf((char *)carname.ptr(), "%s", "Supah Car");
sprintf((char *)carbrand.ptr(), "%s", "Supah");
Car test;
test.alloc((char *)carname.ptr(), (char *)carbrand.ptr(), (char *)cartype1.ptr());
printf("%s is of brand %s and type %s\n", test.getName(), test.getBrand(), test.getType());
char * nullptrtest = NULL;
printf_s("%d", &test);
printf_s("sizeof int %d\n", sizeof(int));
printf_s("Test %s\n", carname.ptr());
return 1;
}
int Car::alloc(char *inname, char *inbrand, char *intype)
{
Car::name = new Mem(sizeof(*inname));
sizeof *inname will give you sizeof(char) == 1
So your name member has allocated array of 1 char exactly.
You later write to this array a lot more. As a result your heap is corrupted.
I do not know why you want to play with emulating memory allocation instead of using std::string - but you need to allocate at least strlen(inname)+1 bytes to store inname
I was making a re-creation of some System.IO functions from that class.
When I setup a buffer and allocate n number of bytes it'll read bytes to that and then add random bytes to the end of that buffer.
For example:
My Main:
int main(int argc, char *args[])
{
SetConsoleTitle(TEXT("Stream Test."));
cout<<"Press any Key to begin reading.";
cin.get();
const char* data = File::ReadAllBytes(args[1]);
Stream* stream = new Stream(data);
char* magic = new char[8];
stream->Read(magic, 0, 8);
magic[8] = '\0';
cout<<magic<<endl<<endl;
delete[]data;
cout<<"Press any key to quit.";
cin.get();
return 0;
}
and here is my System::IO namespace + stream class:
namespace System
{
namespace IO
{
class File
{
public:
static char* ReadAllBytes(const char *name)
{
ifstream fl(name, ifstream::in|ifstream::binary);
fl.seekg( 0, ifstream::end );
size_t len = fl.tellg();
char* ret = new char[len+1];
ret[len] = '\0';
fl.seekg(0);
fl.read(ret, len);
fl.close();
return ret;
}
//not sure of this use yet.
static size_t fileSize(const char* filename)
{
ifstream in(filename, ifstream::in | ifstream::binary);
in.seekg(0, ifstream::end);
return in.tellg();
}
};
class Stream
{
public:
const char *_buffer;
__int64 _origin;
__int64 _position;
__int64 _length;
__int64 _capacity;
bool _expandable;
bool _writable;
bool _exposable;
bool _isOpen;
static const int MemStreamMaxLength = 2147483647;
Stream()
{
InitializeInstanceFields();
}
Stream(const char *buffer)
{
_buffer = buffer;
_length = strlen(_buffer);
_capacity = _length;
_position = 0;
_origin = 0;
_expandable = false;
_writable = true;
_exposable = true;
_isOpen = true;
}
int ReadByte()
{
if (_position >= _length)
return -1;
return _buffer[_position++];
}
void Read(char* &buffer, int offset, int length)
{
if((_position + offset + length) <= _length)
{
memcpy( buffer, _buffer + (_position + offset), length );
_position += length;
}
}
private:
void InitializeInstanceFields()
{
_origin = 0;
_position = 0;
_length = 0;
_capacity = 0;
_expandable = false;
_writable = false;
_exposable = false;
_isOpen = false;
}
};
}
}
This is what ends up happening:
Can anyone explain why this happens, how I can fix, or anything else? I'm new to C++ so any explanations would help. Also please don't criticize my scripting, I know it may be bad, outdated, deprecated, etc. but I'm open to learning and any helping advice goes for the better. :)
You can only use operator << (char *) on C-style strings, not arbitrary arrays of characters. How would you expect it to know how many characters to output?
I would guess the file was not opened correctly and thus the magic buffer is not set at all which leaves it with initialized junk data:
If the constructor is not successful in opening the file, the object
is still created although no file is associated to the stream buffer
and the stream's failbit is set (which can be checked with inherited
member fail).
http://www.cplusplus.com/reference/fstream/ifstream/ifstream/
Try adding more error checking along the way (using cout), especially when opening and reading the buffer. Perhaps set the magic buffer to zero or something recognizable that is overwritten when successful.
I have a simple program that reads data from a PNG into a 2D array. I would like to save that data to a .RAW file so that Raw Studio or Irfanview can view the raw image that my program outputs to my_out.raw. Currently if I just write the raw binary data to the my_out.raw file, neither application can actually read the file, that is view the image. What do I need to do to the program below so that I can see the image?
The code to read the PNG files is:
// MAIN.cpp
#include "pngfilereader.h"
#include <string>
#include <vector>
#include <fstream>
int main (int argc, char *argv[])
{
PNGFileReader pngfr;
if (!pngfr.decompress_png_to_raw(std::string("/home/matt6809/Downloads"
"/City.png"))) {
std::cout << "File decompression error: " << std::endl;
} else {
std::ofstream out;
out.open("./my_out.raw", std::ios_base::out);
std::vector<std::vector<unsigned char> > data;
pngfr.get_image_data(data);
typedef std::vector<std::vector<unsigned char> >::iterator row_it;
typedef std::vector<unsigned char>::iterator col_it;
for(row_it rit= data.begin(); rit != data.end(); ++rit) {
for(col_it cit = rit->begin(); cit != rit->end(); ++cit) {
out << (*cit);
}
}
out << std::endl;
}
return 0;
}
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <png.h>
#include <iostream>
#include <vector>
#include <string>
class PNGFileReader
{
public:
PNGFileReader();
~PNGFileReader();
// Public exposed API:
bool compress_raw_to_png(uint8_t data, int size);
bool decompress_png_to_raw(const std::string &path);
// Getters
long unsigned int get_image_width();
long unsigned int get_image_height();
void get_image_data(std::vector<std::vector<unsigned char> > &data);
private:
// Helper functions:
bool read_png(const std::string &path);
bool create_png_structs(FILE *fp);
bool free_data();
bool alloc_data();
// Member variables:
png_structp m_pPNG;
png_infop m_pPNGInfo;
png_infop m_pPNGEndInfo;
png_bytepp m_Data;
long unsigned int m_ImageWidth;
long unsigned int m_ImageHeight;
// Enums
enum PNGBOOL {NOT_PNG, PNG};
enum PNGERRORS {ERROR, SUCCESS};
};
#include "pngfilereader.h"
#include <stdexcept>
PNGFileReader::PNGFileReader() :
m_pPNG(NULL),
m_pPNGInfo(NULL),
m_pPNGEndInfo(NULL),
m_Data(NULL),
m_ImageWidth(0),
m_ImageHeight(0)
{
}
PNGFileReader::~PNGFileReader()
{
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
}
// Public Exposed API
bool PNGFileReader::compress_raw_to_png(uint8_t m_Data, int size)
{
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::decompress_png_to_raw(const std::string &path)
{
return read_png(path);
}
// Getters
long unsigned int PNGFileReader::get_image_width()
{
return m_ImageWidth;
}
long unsigned int PNGFileReader::get_image_height()
{
return m_ImageHeight;
}
void PNGFileReader::get_image_data(
std::vector<std::vector<unsigned char> > &data)
{
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
std::vector<unsigned char> v;
data.push_back(v);
for (unsigned long int j = 0; j < m_ImageWidth; ++j) {
std::vector<unsigned char> *vp = &data[i];
vp->push_back(m_Data[i][j]);
}
}
}
// Private Methods
bool PNGFileReader::read_png(const std::string &path)
{
/*
* Open up the file to read (path) in binary mode
* first so that if anything goes wrong with libpng
* we won't have much to undo
*/
const char *c_path = path.c_str();
FILE *fp = fopen(c_path, "rb");
if (!fp)
return PNGFileReader::ERROR;
/*
* Read the first BYTES_TO_READ bytes from file
* then determine if it is a png file or
* not. If png_sig_cmp == 0 all is okay
*/
enum {BYTES_TO_READ = 8};
unsigned char sig[BYTES_TO_READ];
if (!fread(sig, 1, BYTES_TO_READ, fp)) {
fclose(fp);
return PNGFileReader::ERROR;
}
bool is_png = !png_sig_cmp(sig, 0, BYTES_TO_READ);
if (!is_png) {
fclose(fp);
return PNGFileReader::ERROR;
}
if (!this->create_png_structs(fp)) {
fclose(fp);
return PNGFileReader::ERROR;
}
/*
* For error handling purposes. Set a long pointer
* back to this function to handle all error related
* to file IO
*/
if (setjmp(png_jmpbuf(m_pPNG)))
{
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, &m_pPNGEndInfo);
fclose(fp);
return PNGFileReader::ERROR;
}
/*
* Set up the input code for FILE openend in binary mode,
* and tell libpng we have already read BYTES_TO_READ btyes from
* signature
*/
png_init_io(m_pPNG, fp);
png_set_sig_bytes(m_pPNG, BYTES_TO_READ);
/*
* Using the lowlevel interface to lib png ...
*/
png_read_info(m_pPNG, m_pPNGInfo);
m_ImageHeight = png_get_image_height(m_pPNG, m_pPNGInfo);
m_ImageWidth = png_get_rowbytes(m_pPNG, m_pPNGInfo);
this->alloc_data();
png_read_image(m_pPNG, m_Data);
png_read_end(m_pPNG, NULL);
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, &m_pPNGEndInfo);
fclose(fp);
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::create_png_structs(FILE *fp)
{
/*
* Create the pointer to main libpng struct, as well as
* two info structs to maintain information after, and
* prior to all operations on png m_Data. Only necessary
* to release resource after function succeeds.
*/
m_pPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING, (png_voidp)NULL,
NULL, NULL);
if (!m_pPNG)
{
fclose(fp);
return PNGFileReader::ERROR;
}
m_pPNGInfo = png_create_info_struct(m_pPNG);
if (!m_pPNGInfo)
{
png_destroy_read_struct(&m_pPNG, (png_infopp)NULL,(png_infopp)NULL);
fclose(fp);
return PNGFileReader::ERROR;
}
m_pPNGEndInfo = png_create_info_struct(m_pPNG);
if (!m_pPNGEndInfo)
{
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, (png_infopp)NULL);
fclose(fp);
return PNGFileReader::ERROR;
}
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::free_data()
{
if (m_ImageHeight == 0 || m_ImageWidth == 0)
return PNGFileReader::ERROR;
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::alloc_data()
{
if (m_ImageHeight == 0 || m_ImageWidth == 0)
return PNGFileReader::ERROR;
if (m_Data != NULL)
this->free_data();
m_Data = new png_bytep[m_ImageHeight]();
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
m_Data[i] = NULL;
}
try {
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
m_Data[i] = new png_byte[m_ImageWidth];
}
}
catch (std::bad_alloc e) {
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
throw e;
}
return PNGFileReader::SUCCESS;
}
A "raw" file that is intended to be used with a camera-image processing program like Raw Studio and Irfraview is not a raw-binary dump of the image-data with no header. Instead the "raw" moniker refers to the fact that the image has a minimal amount of image-processing applied in-camera. For instance, the image-data may still be a single-channel monochrome image from the camera's bayer-pattern CFA, or no white-balance, color-matrix, etc. has been applied, etc. Either way, the image-data is still formatted in a standard binary image file format complete with a header, data-packing method, etc. Examples include formats such as Adobe's DNG file format (which is based on TIFF), or proprietary formats from camera manufacturer's themselves such as Canon's CR2, Nikon's NEF, etc.
So if you want these raw-file processing programs to read your "raw" file image data, you'll have to read-up on the binary data specifications the raw-file formats they support, and then re-format the original PNG image-data correctly.
How can I generate SHA1 or SHA2 hashes using the OpenSSL libarary?
I searched google and could not find any function or example code.
From the command line, it's simply:
printf "compute sha1" | openssl sha1
You can invoke the library like this:
#include <stdio.h>
#include <string.h>
#include <openssl/sha.h>
int main()
{
unsigned char ibuf[] = "compute sha1";
unsigned char obuf[20];
SHA1(ibuf, strlen(ibuf), obuf);
int i;
for (i = 0; i < 20; i++) {
printf("%02x ", obuf[i]);
}
printf("\n");
return 0;
}
OpenSSL has a horrible documentation with no code examples, but here you are:
#include <openssl/sha.h>
bool simpleSHA256(void* input, unsigned long length, unsigned char* md)
{
SHA256_CTX context;
if(!SHA256_Init(&context))
return false;
if(!SHA256_Update(&context, (unsigned char*)input, length))
return false;
if(!SHA256_Final(md, &context))
return false;
return true;
}
Usage:
unsigned char md[SHA256_DIGEST_LENGTH]; // 32 bytes
if(!simpleSHA256(<data buffer>, <data length>, md))
{
// handle error
}
Afterwards, md will contain the binary SHA-256 message digest. Similar code can be used for the other SHA family members, just replace "256" in the code.
If you have larger data, you of course should feed data chunks as they arrive (multiple SHA256_Update calls).
Adaptation of #AndiDog version for big file:
static const int K_READ_BUF_SIZE{ 1024 * 16 };
std::optional<std::string> CalcSha256(std::string filename)
{
// Initialize openssl
SHA256_CTX context;
if(!SHA256_Init(&context))
{
return std::nullopt;
}
// Read file and update calculated SHA
char buf[K_READ_BUF_SIZE];
std::ifstream file(filename, std::ifstream::binary);
while (file.good())
{
file.read(buf, sizeof(buf));
if(!SHA256_Update(&context, buf, file.gcount()))
{
return std::nullopt;
}
}
// Get Final SHA
unsigned char result[SHA256_DIGEST_LENGTH];
if(!SHA256_Final(result, &context))
{
return std::nullopt;
}
// Transform byte-array to string
std::stringstream shastr;
shastr << std::hex << std::setfill('0');
for (const auto &byte: result)
{
shastr << std::setw(2) << (int)byte;
}
return shastr.str();
}
correct syntax at command line should be
echo -n "compute sha1" | openssl sha1
otherwise you'll hash the trailing newline character as well.
Here is OpenSSL example of calculating sha-1 digest using BIO:
#include <openssl/bio.h>
#include <openssl/evp.h>
std::string sha1(const std::string &input)
{
BIO * p_bio_md = nullptr;
BIO * p_bio_mem = nullptr;
try
{
// make chain: p_bio_md <-> p_bio_mem
p_bio_md = BIO_new(BIO_f_md());
if (!p_bio_md) throw std::bad_alloc();
BIO_set_md(p_bio_md, EVP_sha1());
p_bio_mem = BIO_new_mem_buf((void*)input.c_str(), input.length());
if (!p_bio_mem) throw std::bad_alloc();
BIO_push(p_bio_md, p_bio_mem);
// read through p_bio_md
// read sequence: buf <<-- p_bio_md <<-- p_bio_mem
std::vector<char> buf(input.size());
for (;;)
{
auto nread = BIO_read(p_bio_md, buf.data(), buf.size());
if (nread < 0) { throw std::runtime_error("BIO_read failed"); }
if (nread == 0) { break; } // eof
}
// get result
char md_buf[EVP_MAX_MD_SIZE];
auto md_len = BIO_gets(p_bio_md, md_buf, sizeof(md_buf));
if (md_len <= 0) { throw std::runtime_error("BIO_gets failed"); }
std::string result(md_buf, md_len);
// clean
BIO_free_all(p_bio_md);
return result;
}
catch (...)
{
if (p_bio_md) { BIO_free_all(p_bio_md); }
throw;
}
}
Though it's longer than just calling SHA1 function from OpenSSL, but it's more universal and can be reworked for using with file streams (thus processing data of any length).
C version of #Nayfe code, generating SHA1 hash from file:
#include <stdio.h>
#include <openssl/sha.h>
static const int K_READ_BUF_SIZE = { 1024 * 16 };
unsigned char* calculateSHA1(char *filename)
{
if (!filename) {
return NULL;
}
FILE *fp = fopen(filename, "rb");
if (fp == NULL) {
return NULL;
}
unsigned char* sha1_digest = malloc(sizeof(char)*SHA_DIGEST_LENGTH);
SHA_CTX context;
if(!SHA1_Init(&context))
return NULL;
unsigned char buf[K_READ_BUF_SIZE];
while (!feof(fp))
{
size_t total_read = fread(buf, 1, sizeof(buf), fp);
if(!SHA1_Update(&context, buf, total_read))
{
return NULL;
}
}
fclose(fp);
if(!SHA1_Final(sha1_digest, &context))
return NULL;
return sha1_digest;
}
It can be used as follows:
unsigned char *sha1digest = calculateSHA1("/tmp/file1");
The res variable contains the sha1 hash.
You can print it on the screen using the following for-loop:
char *sha1hash = (char *)malloc(sizeof(char) * 41);
sha1hash[40] = '\0';
int i;
for (i = 0; i < SHA_DIGEST_LENGTH; i++)
{
sprintf(&sha1hash[i*2], "%02x", sha1digest[i]);
}
printf("SHA1 HASH: %s\n", sha1hash);