I've been trying to figure out the openssl documentation for base64 decoding and encoding. I found some code snippets below
#include <openssl/sha.h>
#include <openssl/hmac.h>
#include <openssl/evp.h>
#include <openssl/bio.h>
#include <openssl/buffer.h>
char *base64(const unsigned char *input, int length)
{
BIO *bmem, *b64;
BUF_MEM *bptr;
b64 = BIO_new(BIO_f_base64());
bmem = BIO_new(BIO_s_mem());
b64 = BIO_push(b64, bmem);
BIO_write(b64, input, length);
BIO_flush(b64);
BIO_get_mem_ptr(b64, &bptr);
char *buff = (char *)malloc(bptr->length);
memcpy(buff, bptr->data, bptr->length-1);
buff[bptr->length-1] = 0;
BIO_free_all(b64);
return buff;
}
char *decode64(unsigned char *input, int length)
{
BIO *b64, *bmem;
char *buffer = (char *)malloc(length);
memset(buffer, 0, length);
b64 = BIO_new(BIO_f_base64());
bmem = BIO_new_mem_buf(input, length);
bmem = BIO_push(b64, bmem);
BIO_read(bmem, buffer, length);
BIO_free_all(bmem);
return buffer;
}
This only seems to work for single line strings such as "Start", the moment I introduce complex strings with newlines and spaces etc it fails horribly.
It doesn't even have to be openssl, a simple class or set of functions that do the same thing would be fine, theres a very complicated build process for the solution and I am trying to avoid having to go in there and make multiple changes. The only reason I went for openssl is because the solution is already compiled with the libraries.
Personally, I find the OpenSSL API to be so incredibly painful to use, I avoid it unless the cost of avoiding it is extremely high. I find it quite upsetting that it has become the standard API in the crypto world.
I was feeling bored, and I wrote you one in C++. This one should even handle the edge cases that can cause security problems, like, for example, encoding a string that results in integer overflow because it's too large.
I have done some unit testing on it, so it should work.
#include <string>
#include <cassert>
#include <limits>
#include <stdexcept>
#include <cctype>
static const char b64_table[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static const char reverse_table[128] = {
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64,
64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64
};
::std::string base64_encode(const ::std::string &bindata)
{
using ::std::string;
using ::std::numeric_limits;
if (bindata.size() > (numeric_limits<string::size_type>::max() / 4u) * 3u) {
throw ::std::length_error("Converting too large a string to base64.");
}
const ::std::size_t binlen = bindata.size();
// Use = signs so the end is properly padded.
string retval((((binlen + 2) / 3) * 4), '=');
::std::size_t outpos = 0;
int bits_collected = 0;
unsigned int accumulator = 0;
const string::const_iterator binend = bindata.end();
for (string::const_iterator i = bindata.begin(); i != binend; ++i) {
accumulator = (accumulator << 8) | (*i & 0xffu);
bits_collected += 8;
while (bits_collected >= 6) {
bits_collected -= 6;
retval[outpos++] = b64_table[(accumulator >> bits_collected) & 0x3fu];
}
}
if (bits_collected > 0) { // Any trailing bits that are missing.
assert(bits_collected < 6);
accumulator <<= 6 - bits_collected;
retval[outpos++] = b64_table[accumulator & 0x3fu];
}
assert(outpos >= (retval.size() - 2));
assert(outpos <= retval.size());
return retval;
}
::std::string base64_decode(const ::std::string &ascdata)
{
using ::std::string;
string retval;
const string::const_iterator last = ascdata.end();
int bits_collected = 0;
unsigned int accumulator = 0;
for (string::const_iterator i = ascdata.begin(); i != last; ++i) {
const int c = *i;
if (::std::isspace(c) || c == '=') {
// Skip whitespace and padding. Be liberal in what you accept.
continue;
}
if ((c > 127) || (c < 0) || (reverse_table[c] > 63)) {
throw ::std::invalid_argument("This contains characters not legal in a base64 encoded string.");
}
accumulator = (accumulator << 6) | reverse_table[c];
bits_collected += 6;
if (bits_collected >= 8) {
bits_collected -= 8;
retval += static_cast<char>((accumulator >> bits_collected) & 0xffu);
}
}
return retval;
}
Rather than using the BIO_ interface it's much easier to use the EVP_ interface. For instance:
#include <iostream>
#include <stdlib.h>
#include <openssl/evp.h>
char *base64(const unsigned char *input, int length) {
const auto pl = 4*((length+2)/3);
auto output = reinterpret_cast<char *>(calloc(pl+1, 1)); //+1 for the terminating null that EVP_EncodeBlock adds on
const auto ol = EVP_EncodeBlock(reinterpret_cast<unsigned char *>(output), input, length);
if (pl != ol) { std::cerr << "Whoops, encode predicted " << pl << " but we got " << ol << "\n"; }
return output;
}
unsigned char *decode64(const char *input, int length) {
const auto pl = 3*length/4;
auto output = reinterpret_cast<unsigned char *>(calloc(pl+1, 1));
const auto ol = EVP_DecodeBlock(output, reinterpret_cast<const unsigned char *>(input), length);
if (pl != ol) { std::cerr << "Whoops, decode predicted " << pl << " but we got " << ol << "\n"; }
return output;
}
The EVP functions include a streaming interface too, see the man page.
Here is an example of OpenSSL base64 encode/decode I wrote:
Notice, I have some macros/classes in the code that I wrote, but none of them is important for the example. It is simply some C++ wrappers I wrote:
buffer base64::encode( const buffer& data )
{
// bio is simply a class that wraps BIO* and it free the BIO in the destructor.
bio b64(BIO_f_base64()); // create BIO to perform base64
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
bio mem(BIO_s_mem()); // create BIO that holds the result
// chain base64 with mem, so writing to b64 will encode base64 and write to mem.
BIO_push(b64, mem);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data.data, (int)data.size);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
// get a pointer to mem's data
char* dt;
long len = BIO_get_mem_data(mem, &dt);
// assign data to output
std::string s(dt, len);
return buffer(s.length()+sizeof(char), (byte*)s.c_str());
}
This works for me, and verified no memory leaks with valgrind.
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <iostream>
namespace {
struct BIOFreeAll { void operator()(BIO* p) { BIO_free_all(p); } };
}
std::string Base64Encode(const std::vector<unsigned char>& binary)
{
std::unique_ptr<BIO,BIOFreeAll> b64(BIO_new(BIO_f_base64()));
BIO_set_flags(b64.get(), BIO_FLAGS_BASE64_NO_NL);
BIO* sink = BIO_new(BIO_s_mem());
BIO_push(b64.get(), sink);
BIO_write(b64.get(), binary.data(), binary.size());
BIO_flush(b64.get());
const char* encoded;
const long len = BIO_get_mem_data(sink, &encoded);
return std::string(encoded, len);
}
// Assumes no newlines or extra characters in encoded string
std::vector<unsigned char> Base64Decode(const char* encoded)
{
std::unique_ptr<BIO,BIOFreeAll> b64(BIO_new(BIO_f_base64()));
BIO_set_flags(b64.get(), BIO_FLAGS_BASE64_NO_NL);
BIO* source = BIO_new_mem_buf(encoded, -1); // read-only source
BIO_push(b64.get(), source);
const int maxlen = strlen(encoded) / 4 * 3 + 1;
std::vector<unsigned char> decoded(maxlen);
const int len = BIO_read(b64.get(), decoded.data(), maxlen);
decoded.resize(len);
return decoded;
}
int main()
{
const char* msg = "hello";
const std::vector<unsigned char> binary(msg, msg+strlen(msg));
const std::string encoded = Base64Encode(binary);
std::cout << "encoded = " << encoded << std::endl;
const std::vector<unsigned char> decoded = Base64Decode(encoded.c_str());
std::cout << "decoded = ";
for (unsigned char c : decoded) std::cout << c;
std::cout << std::endl;
return 0;
}
Compile:
g++ -lcrypto main.cc
Output:
encoded = aGVsbG8=
decoded = hello
So many horrible C code examples with buffers and malloc(), what about using std::string properly on this C++ tagged question?
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/buffer.h>
#include <string>
std::string base64_encode(const std::string& input)
{
const auto base64_memory = BIO_new(BIO_s_mem());
auto base64 = BIO_new(BIO_f_base64());
base64 = BIO_push(base64, base64_memory);
BIO_write(base64, input.c_str(), static_cast<int>(input.length()));
BIO_flush(base64);
BUF_MEM* buffer_memory{};
BIO_get_mem_ptr(base64, &buffer_memory);
auto base64_encoded = std::string(buffer_memory->data, buffer_memory->length - 1);
BIO_free_all(base64);
return base64_encoded;
}
I like mtrw's use of EVP.
Below is my "modern C++" take on his answer without manual memory allocation (calloc). It will take a std::string but it can easily be overloaded to use raw bytes for example.
#include <openssl/evp.h>
#include <memory>
#include <stdexcept>
#include <vector>
auto EncodeBase64(const std::string& to_encode) -> std::string {
/// #sa https://www.openssl.org/docs/manmaster/man3/EVP_EncodeBlock.html
const auto predicted_len = 4 * ((to_encode.length() + 2) / 3); // predict output size
const auto output_buffer{std::make_unique<char[]>(predicted_len + 1)};
const std::vector<unsigned char> vec_chars{to_encode.begin(), to_encode.end()}; // convert to_encode into uchar container
const auto output_len = EVP_EncodeBlock(reinterpret_cast<unsigned char*>(output_buffer.get()), vec_chars.data(), static_cast<int>(vec_chars.size()));
if (predicted_len != static_cast<unsigned long>(output_len)) {
throw std::runtime_error("EncodeBase64 error");
}
return output_buffer.get();
}
auto DecodeBase64(const std::string& to_decode) -> std::string {
/// #sa https://www.openssl.org/docs/manmaster/man3/EVP_DecodeBlock.html
const auto predicted_len = 3 * to_decode.length() / 4; // predict output size
const auto output_buffer{std::make_unique<char[]>(predicted_len + 1)};
const std::vector<unsigned char> vec_chars{to_decode.begin(), to_decode.end()}; // convert to_decode into uchar container
const auto output_len = EVP_DecodeBlock(reinterpret_cast<unsigned char*>(output_buffer.get()), vec_chars.data(), static_cast<int>(vec_chars.size()));
if (predicted_len != static_cast<unsigned long>(output_len)) {
throw std::runtime_error("DecodeBase64 error");
}
return output_buffer.get();
}
There's probably a cleaner/better way of doing this (I'd like to get rid of reinterpret_cast). You'll also definitely want a try/catch block to deal with the potential exception.
Improved TCS answer to remove macros/datastructures
unsigned char *encodeb64mem( unsigned char *data, int len, int *lenoutput )
{
// bio is simply a class that wraps BIO* and it free the BIO in the destructor.
BIO *b64 = BIO_new(BIO_f_base64()); // create BIO to perform base64
BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL);
BIO *mem = BIO_new(BIO_s_mem()); // create BIO that holds the result
// chain base64 with mem, so writing to b64 will encode base64 and write to mem.
BIO_push(b64, mem);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data, len);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
// get a pointer to mem's data
unsigned char* output;
*lenoutput = BIO_get_mem_data(mem, &output);
// assign data to output
//std::string s(dt, len2);
return output;
}
To write to file
int encodeb64(unsigned char* input, const char* filenm, int leni)
{
BIO *b64 = BIO_new(BIO_f_base64());
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
BIO *file = BIO_new_file(filenm, "w");
BIO *mem = BIO_new(BIO_f_buffer());
BIO_push(b64, mem);
BIO_push(mem, file);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, input, leni);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
BIO_pop(b64);
BIO_free_all(b64);
return 0;
}
Base64 encoding from file to file. Many times due to file constraint we have read in chunks of data and do encoding. Below is the code.
int encodeb64FromFile(const char* input, const char* outputfilename)
{
BIO *b64 = BIO_new(BIO_f_base64());
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
int leni = 3*64;
unsigned char *data[3*64];
BIO *file = BIO_new_file(outputfilename, "w");
BIO *mem = BIO_new(BIO_f_buffer());
BIO_push(b64, mem);
BIO_push(mem, file);
FILE *fp = fopen(input, "rb");
while ((leni = fread(data, 1, sizeof data, fp)) > 0) {
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data, leni);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
}
BIO_flush(b64);
BIO_pop(b64);
BIO_free_all(b64);
fclose(fp);
return 0;
}
Base64 is really pretty simple; you shouldn't have trouble finding any number of implementations via a quick Google. For example here is a reference implementation in C from the Internet Software Consortium, with detailed comments explaining the process.
The openssl implementation layers a lot of complexity with the "BIO" stuff that's not (IMHO) very useful if all you're doing is decoding/encoding.
#include <openssl/bio.h>
typedef unsigned char byte;
namespace base64 {
static void Encode(const byte* in, size_t in_len,
char** out, size_t* out_len) {
BIO *buff, *b64f;
BUF_MEM *ptr;
b64f = BIO_new(BIO_f_base64());
buff = BIO_new(BIO_s_mem());
buff = BIO_push(b64f, buff);
BIO_set_flags(buff, BIO_FLAGS_BASE64_NO_NL);
BIO_set_close(buff, BIO_CLOSE);
BIO_write(buff, in, in_len);
BIO_flush(buff);
BIO_get_mem_ptr(buff, &ptr);
(*out_len) = ptr->length;
(*out) = (char *) malloc(((*out_len) + 1) * sizeof(char));
memcpy(*out, ptr->data, (*out_len));
(*out)[(*out_len)] = '\0';
BIO_free_all(buff);
}
static void Decode(const char* in, size_t in_len,
byte** out, size_t* out_len) {
BIO *buff, *b64f;
b64f = BIO_new(BIO_f_base64());
buff = BIO_new_mem_buf((void *)in, in_len);
buff = BIO_push(b64f, buff);
(*out) = (byte *) malloc(in_len * sizeof(char));
BIO_set_flags(buff, BIO_FLAGS_BASE64_NO_NL);
BIO_set_close(buff, BIO_CLOSE);
(*out_len) = BIO_read(buff, (*out), in_len);
(*out) = (byte *) realloc((void *)(*out), ((*out_len) + 1) * sizeof(byte));
(*out)[(*out_len)] = '\0';
BIO_free_all(buff);
}
}
Late to the party, but I came across this problem recently myself, but was unhappy with both the BIO solution, which is unnecessarily convoluted, but did not like 'EncodeBlock' either, because it introduces newline characters I do not want in my Base64 encoded string.
After a little sniffing, I came across the header file openssl/include/crypto/evp.h which is not part of the default installation (which only exports the include/openssl folder for me), but exports the solution to the problem.
void evp_encode_ctx_set_flags(EVP_ENCODE_CTX *ctx, unsigned int flags);
/* EVP_ENCODE_CTX flags */
/* Don't generate new lines when encoding */
#define EVP_ENCODE_CTX_NO_NEWLINES 1
/* Use the SRP base64 alphabet instead of the standard one */
#define EVP_ENCODE_CTX_USE_SRP_ALPHABET 2
Using this function, the 'no newline' becomes possible using the EVP interface.
Example:
if (EVP_ENCODE_CTX *context = EVP_ENCODE_CTX_new())
{
EVP_EncodeInit(context);
evp_encode_ctx_set_flags(context, EVP_ENCODE_CTX_NO_NEWLINES);
while (hasData())
{
uint8_t *data;
int32_t length = fetchData(&data);
int32_t size = (((EVP_ENCODE_CTX_num(context) + length)/48) * 65) + 1;
uint8_t buffer[size];
EVP_EncodeUpdate(context, buffer, &size, pData, length);
//process encoded data.
}
uint8_t buffer[65];
int32_t writtenBytes;
EVP_EncodeFinal(context, buffer, &writtenBytes);
//Do something with the final remainder of the encoded string.
EVP_ENCODE_CTX_free(context);
}
This piece of code will encode the buffer to Base64 without the newlines.
Please note the use of EVP_ENCODE_CTX_num to obtain the 'leftover bytes' still stored in the context object to calculate the correct buffer size.
It is only necessary, if you need to call EVP_EncodeUpdate multiple times, because your data is exceedingly large or not available at once.
Related
Thank you so much for answering this question.
I use lame and I want to decode mp3 file to wav.
I succeeded in decoding mp3 files into wav files through several searches.
However, the size of the wav file is created too large and an error message appears.
Media player error message :
This file cannot be played. The file format may not be supported, the file extension may be incorrect, or the file may be corrupted.
If you know my problem, please give me some advice.
Thank you
HEADER FILE
#pragma once
#ifndef _LAME_HELPER_H_
#define _LAME_HELPER_H_
#include <windows.h>
#include "lame.h"
#define LH_STARTED WM_USER+1
#define LH_COMPUTED WM_USER+2
#define LH_DONE WM_USER+3
#define LH_ERROR WM_USER+4
#define MAX_THREAD_COUNT 5
enum encode_mode_e
{
EM_ABR,
EM_CBR,
EM_VBR
};
enum encode_channel_e
{
EC_MONO,
EC_STEREO
};
enum bitrate_e
{
BR_8kbps = 8,
BR_16kbps = 16,
BR_24kbps = 24,
BR_32kbps = 32,
BR_40kbps = 40,
BR_48kbps = 48,
BR_56kbps = 56,
BR_64kbps = 64,
BR_80kbps = 80,
BR_96kbps = 96,
BR_112kbps = 112,
BR_128kbps = 128,
BR_144kbps = 144,
BR_160kbps = 160,
BR_192kbps = 192,
BR_224kbps = 224,
BR_256kbps = 256,
BR_320kbps = 320
};
enum samplerate_e
{
SR_8khz = 8000,
SR_11khz = 11025,
SR_12khz = 12000,
SR_16khz = 16000,
SR_22khz = 22050,
SR_24khz = 24000,
SR_32khz = 32000,
SR_44khz = 44100,
SR_48khz = 48000
};
struct settings_t
{
char* title;
char* artist;
char* album;
char* comment;
char* year;
char* track;
char* genre;
char* albumart;
encode_channel_e channels;
bitrate_e abr_bitrate;
bitrate_e cbr_bitrate;
int quality;
encode_mode_e enc_mode;
samplerate_e resample_frequency;
samplerate_e in_samplerate;
//The constructor; used to set default values
settings_t();
};
class CLameHelper; //lameHelper prototype, needed because of struct StaticParam_t
//Use to hold parameters for the thread function
struct StaticParam_t
{
char* pcm;
char* mp3;
settings_t settings;
WNDPROC callback_proc;
CLameHelper* lhObj;
};
class CLameHelper
{
public :
static const int PCM_SIZE = 4096;
static const int MP3_SIZE = 4096;
HANDLE m_hThread[MAX_THREAD_COUNT];
StaticParam_t* m_phSParam[MAX_THREAD_COUNT];
static int Decode_s(void* pParam);
void WriteWaveHeader(FILE* const, int, int, int, int);
void Write32BitLowHigh(FILE*, int);
void Write16BitLowHigh(FILE*, int);
int SetID3AlbumArt(lame_t gfp, char const* szFileName);
void errorHandler(char*);
char errMsg[1000];
public:
CLameHelper();
~CLameHelper();
int Decode(char* szMp3_in, char* szPcm_out);
int Decode(char* szMp3_in, char* szPcm_out, WNDPROC callback_proc);
};
#endif
CPP FILE
#include "stdafx.h"
#include "LameHelper.h"
settings_t::settings_t()
{
//Setting the default values
title = "";
artist = "";
album = "";
comment = "";
year = "";
track = "";
genre = "";
albumart = NULL;
channels = EC_STEREO;
abr_bitrate = BR_128kbps;
cbr_bitrate = BR_128kbps;
quality = 5;
enc_mode = EM_CBR;
resample_frequency = SR_44khz;
in_samplerate = SR_44khz;
}
CLameHelper::CLameHelper()
{
//Initialize to NULL, aids deletion/closing later
for(int i = 0; i < MAX_THREAD_COUNT; i++)
{
m_hThread[i] = NULL;
m_phSParam[i] = NULL;
}
}
CLameHelper::~CLameHelper()
{
//Destroy all declared objects
for(int i = 0; i < MAX_THREAD_COUNT; i++)
{
if(m_hThread[i] != NULL)
CloseHandle(m_hThread[i]);
if(m_phSParam[i] != NULL)
delete m_phSParam[i];
}
}
int CLameHelper::SetID3AlbumArt(lame_t gfp, char const* szFileName)
{
int iResult = -1;
FILE *pFileName = 0;
char *szAlbumart = 0;
if(szFileName == NULL)
{
return 0;
}
pFileName = fopen(szFileName, "rb");
if(!pFileName)
{
iResult = 1;
}
else
{
size_t size;
fseek(pFileName, 0, SEEK_END);
size = ftell(pFileName);
fseek(pFileName, 0, SEEK_SET);
szAlbumart = (char*)malloc(size);
if(!szAlbumart)
{
iResult = 2;
}
else
{
if(fread(szAlbumart, 1, size, pFileName) != size)
{
iResult = 3;
}
else
{
iResult = (gfp, szAlbumart, size) ? 4 : 0;
}
free(szAlbumart);
}
fclose(pFileName);
}
switch(iResult)
{
case 1:
sprintf(errMsg, "WARNING: could not find file '%s' for szAlbumart.\n", szFileName);
errorHandler(errMsg);
break;
case 2:
errorHandler("WARNING: insufficient memory for reading the szAlbumart.\n");
break;
case 3:
sprintf(errMsg, "WARNING: read error in '%s' for szAlbumart.\n", szFileName);
errorHandler(errMsg);
break;
case 4:
sprintf(errMsg, "WARNING: unsupported image: '%s' for szAlbumart. Specify JPEG/PNG/GIF image\n", szFileName);
errorHandler(errMsg);
break;
default:
break;
}
return iResult;
}
void CLameHelper::Write16BitLowHigh(FILE * fp, int val)
{
unsigned char bytes[2];
bytes[0] = (val & 0xff);
bytes[1] = ((val >> 8) & 0xff);
fwrite(bytes, 1, 2, fp);
}
void CLameHelper::Write32BitLowHigh(FILE * fp, int val)
{
unsigned char bytes[4];
bytes[0] = (val & 0xff);
bytes[1] = ((val >> 8) & 0xff);
bytes[2] = ((val >> 16) & 0xff);
bytes[3] = ((val >> 24) & 0xff);
fwrite(bytes, 1, 4, fp);
}
void CLameHelper::WriteWaveHeader(FILE * const fp, int pcmbytes, int freq, int channels, int bits)
{
int bytes = (bits + 7) / 8;
/* quick and dirty, but documented */
fwrite("RIFF", 1, 4, fp); /* label */
Write32BitLowHigh(fp, pcmbytes + 44 - 8); /* length in bytes without header */
fwrite("WAVEfmt ", 2, 4, fp); /* 2 labels */
Write32BitLowHigh(fp, 2 + 2 + 4 + 4 + 2 + 2); /* length of PCM format declaration area */
Write16BitLowHigh(fp, 1); /* is PCM? */
Write16BitLowHigh(fp, channels); /* number of channels */
Write32BitLowHigh(fp, freq); /* sample frequency in [Hz] */
Write32BitLowHigh(fp, freq * channels * bytes); /* bytes per second */
Write16BitLowHigh(fp, channels * bytes); /* bytes per sample time */
Write16BitLowHigh(fp, bits); /* bits per sample */
fwrite("data", 1, 4, fp); /* label */
Write32BitLowHigh(fp, pcmbytes); /* length in bytes of raw PCM data */
}
int CLameHelper::Decode(char* szMp3_in, char* szPcm_out)
{
return Decode(szMp3_in, szPcm_out, NULL);
}
//the static function used for the thread
int CLameHelper::Decode_s(void* param)
{
StaticParam_t* sp = (StaticParam_t*)param;
char* szPcm_out = sp->pcm;
char* szMp3_in = sp->mp3;
WNDPROC callback_proc = sp->callback_proc;
CLameHelper* lh = (CLameHelper*)sp->lhObj;
return lh->Decode(szMp3_in, szPcm_out, callback_proc);
}
int CLameHelper::Decode(char* szMp3_in, char* szPcm_out, WNDPROC callback_proc)
{
int read, i, samples;
long wavsize = 0; // use to count the number of mp3 byte read, this is used to write the length of the wave file
long cumulative_read = 0;
short int pcm_l[PCM_SIZE], pcm_r[PCM_SIZE];
unsigned char mp3_buffer[MP3_SIZE];
FILE* mp3 = fopen(szMp3_in, "rb");
if(mp3 == NULL)
{
if(callback_proc != NULL)
{
callback_proc((HWND)GetModuleHandle(NULL), LH_ERROR, -1, NULL);
}
sprintf(errMsg, "FATAL ERROR: file '%s' can't be open for read. Aborting!\n", szMp3_in);
errorHandler(errMsg);
return -1;
}
fseek(mp3, 0, SEEK_END);
long MP3_total_size = ftell(mp3);
fseek(mp3, 0, SEEK_SET);
FILE* pcm = fopen(szPcm_out, "wb");
if(pcm == NULL)
{
if(callback_proc != NULL)
{
callback_proc((HWND)GetModuleHandle(NULL), LH_ERROR, -1, NULL);
}
sprintf(errMsg, "FATAL ERROR: file '%s' can't be open for write. Aborting!\n", szPcm_out);
errorHandler(errMsg);
return -1;
}
lame_t lame = lame_init();
lame_set_decode_only(lame, 1);
if(lame_init_params(lame) == -1)
{
if(callback_proc != NULL)
{
callback_proc((HWND)GetModuleHandle(NULL), LH_ERROR, -2, NULL);
}
sprintf(errMsg, "FATAL ERROR: parameters failed to initialize properly in lame. Aborting!\n", szPcm_out);
errorHandler(errMsg);
return -2;
}
hip_t hip = hip_decode_init();
mp3data_struct mp3data;
memset(&mp3data, 0, sizeof(mp3data));
int nChannels = -1;
int nSampleRate = -1;
int mp3_len;
if(callback_proc != NULL)
{
callback_proc((HWND)GetModuleHandle(NULL), LH_STARTED, NULL, NULL);
}
while((read = fread(mp3_buffer, sizeof(char), MP3_SIZE, mp3)) > 0)
{
mp3_len = read;
cumulative_read += read * sizeof(char);
do
{
samples = hip_decode1_headers(hip, mp3_buffer, mp3_len, pcm_l, pcm_r, &mp3data);
wavsize += samples;
if(mp3data.header_parsed == 1)//header is gotten
{
if(nChannels < 0)//reading for the first time
{
//Write the header
WriteWaveHeader(pcm, 0x7FFFFFFF, mp3data.samplerate, mp3data.stereo, 16); //unknown size, so write maximum 32 bit signed value
}
nChannels = mp3data.stereo;
nSampleRate = mp3data.samplerate;
}
if(samples > 0 && mp3data.header_parsed != 1)
{
errorHandler("WARNING: lame decode error occured!");
break;
}
if(samples > 0)
{
for(i = 0 ; i < samples; i++)
{
fwrite((char*)&pcm_l[i], sizeof(char), sizeof(pcm_l[i]), pcm);
if(nChannels == 2)
{
fwrite((char*)&pcm_r[i], sizeof(char), sizeof(pcm_r[i]), pcm);
}
}
}
mp3_len = 0;
if(callback_proc != NULL)
{
int percentage = ((float)cumulative_read/MP3_total_size)*100;
callback_proc((HWND)GetModuleHandle(NULL), LH_COMPUTED, percentage, NULL);
}
}while(samples>0);
}
i = (16 / 8) * mp3data.stereo;
if (wavsize <= 0)
{
wavsize = 0;
}
else if (wavsize > 0xFFFFFFD0 / i)
{
wavsize = 0xFFFFFFD0;
}
else
{
wavsize *= i;
}
if(!fseek(pcm, 0l, SEEK_SET))//seek back and adjust length
WriteWaveHeader(pcm, (int) wavsize, mp3data.samplerate, mp3data.stereo, 16);
else
errorHandler("WARNING: can't seek back to adjust length in wave header!");
hip_decode_exit(hip);
lame_close(lame);
fclose(mp3);
fclose(pcm);
if(callback_proc != NULL)
{
callback_proc((HWND)GetModuleHandle(NULL), LH_DONE, NULL, NULL);
}
return 0;
}
void CLameHelper::errorHandler(char* msg)
{
printf("%s\n", msg);
}
I'm trying to apply the u-law algorithm to a wav file file.wav, and then create a new file file2.wav.
file.wav has 16 bits/sample, and I want to obtain a file2.wav that has 8 bits/sample.
This is my code:
#define _CRT_SECURE_NO_DEPRECATE
#include <stdio.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
using std::string;
using std::fstream;
typedef struct WAV_HEADER {
char RIFF[4];
unsigned long ChunkSize;
char WAVE[4];
char fmt[4];
unsigned long Subchunk1Size;
unsigned short AudioFormat;
unsigned short NumOfChan;
unsigned long SamplesPerSec;
unsigned long bytesPerSec;
unsigned short blockAlign;
unsigned short bitsPerSample;
char Subchunk2ID[4];
unsigned long Subchunk2Size;
} wav_hdr;
int headerSize = 0;
string path = "file.wav";
wav_hdr wavHeader;
FILE* openFile() {
const char* filePath;
FILE *wavFile;
headerSize = sizeof(wav_hdr);
filePath = path.c_str();
wavFile = fopen(filePath, "rb");
if (wavFile == NULL) {
printf("Error\n");
}
fread(&wavHeader, headerSize, 1, wavFile);
return wavFile;
}
int8_t MuLaw_Encode(int16_t number)
{
const uint16_t MULAW_MAX = 0x1FFF;
const uint16_t MULAW_BIAS = 33;
uint16_t mask = 0x1000;
uint8_t sign = 0;
uint8_t position = 12;
uint8_t lsb = 0;
if (number < 0)
{
number = -number;
sign = 0x80;
}
number += MULAW_BIAS;
if (number > MULAW_MAX)
{
number = MULAW_MAX;
}
for (; ((number & mask) != mask && position >= 5); mask >>= 1, position--)
;
lsb = (number >> (position - 4)) & 0x0f;
return (~(sign | ((position - 5) << 4) | lsb));
}
int fileSize(FILE *file) {
int fileSize = 0;
fseek(file, 0, SEEK_END);
fileSize = ftell(file);
fseek(file, 0, SEEK_SET);
return fileSize;
}
double bitsPerSample() {
double bitsPerE;
bitsPerE = wavHeader.bitsPerSample;
return bitsPerE;
}
int main() {
FILE *wavFile;
wavFile = openFile();
FILE* fptr2;
fptr2 = fopen("file2.wav", "wb");
int samples_count = fileSize(wavFile) / bitsPerSample();
short int *value = new short int[samples_count];
for (int16_t i = 0; i < samples_count; i++)
{
fread(&value[i], samples_count, 1, wavFile);
cout << value[i] << " "; // the output is in the attached picture
MuLaw_Encode(value[i]);
}
fwrite(value, sizeof(char), samples_count, fptr2);
return 0;
}
I took the u-law algorithm from here (2.1. µ-Law Compression (Encoding) Algorithm)
Am I doing something wrong? Because I obtain a corrupt file.
No header is ever written to the result file, so the first part of the data would get interpreted as a header, and it would be wrong. You can see in the file that it does not start with RIFFþR�WAVEfmt or something sufficiently similar.
The data written to the result file is value, the original data read from the input file, not the µ-law encoded data (which is only cout'ed and not saved).
The loop that reads the samples reads some wrong samples, because the computation of samples_count puts the current position back at the start, where the header is.
I've been trying to figure out the openssl documentation for base64 decoding and encoding. I found some code snippets below
#include <openssl/sha.h>
#include <openssl/hmac.h>
#include <openssl/evp.h>
#include <openssl/bio.h>
#include <openssl/buffer.h>
char *base64(const unsigned char *input, int length)
{
BIO *bmem, *b64;
BUF_MEM *bptr;
b64 = BIO_new(BIO_f_base64());
bmem = BIO_new(BIO_s_mem());
b64 = BIO_push(b64, bmem);
BIO_write(b64, input, length);
BIO_flush(b64);
BIO_get_mem_ptr(b64, &bptr);
char *buff = (char *)malloc(bptr->length);
memcpy(buff, bptr->data, bptr->length-1);
buff[bptr->length-1] = 0;
BIO_free_all(b64);
return buff;
}
char *decode64(unsigned char *input, int length)
{
BIO *b64, *bmem;
char *buffer = (char *)malloc(length);
memset(buffer, 0, length);
b64 = BIO_new(BIO_f_base64());
bmem = BIO_new_mem_buf(input, length);
bmem = BIO_push(b64, bmem);
BIO_read(bmem, buffer, length);
BIO_free_all(bmem);
return buffer;
}
This only seems to work for single line strings such as "Start", the moment I introduce complex strings with newlines and spaces etc it fails horribly.
It doesn't even have to be openssl, a simple class or set of functions that do the same thing would be fine, theres a very complicated build process for the solution and I am trying to avoid having to go in there and make multiple changes. The only reason I went for openssl is because the solution is already compiled with the libraries.
Personally, I find the OpenSSL API to be so incredibly painful to use, I avoid it unless the cost of avoiding it is extremely high. I find it quite upsetting that it has become the standard API in the crypto world.
I was feeling bored, and I wrote you one in C++. This one should even handle the edge cases that can cause security problems, like, for example, encoding a string that results in integer overflow because it's too large.
I have done some unit testing on it, so it should work.
#include <string>
#include <cassert>
#include <limits>
#include <stdexcept>
#include <cctype>
static const char b64_table[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static const char reverse_table[128] = {
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64,
64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64
};
::std::string base64_encode(const ::std::string &bindata)
{
using ::std::string;
using ::std::numeric_limits;
if (bindata.size() > (numeric_limits<string::size_type>::max() / 4u) * 3u) {
throw ::std::length_error("Converting too large a string to base64.");
}
const ::std::size_t binlen = bindata.size();
// Use = signs so the end is properly padded.
string retval((((binlen + 2) / 3) * 4), '=');
::std::size_t outpos = 0;
int bits_collected = 0;
unsigned int accumulator = 0;
const string::const_iterator binend = bindata.end();
for (string::const_iterator i = bindata.begin(); i != binend; ++i) {
accumulator = (accumulator << 8) | (*i & 0xffu);
bits_collected += 8;
while (bits_collected >= 6) {
bits_collected -= 6;
retval[outpos++] = b64_table[(accumulator >> bits_collected) & 0x3fu];
}
}
if (bits_collected > 0) { // Any trailing bits that are missing.
assert(bits_collected < 6);
accumulator <<= 6 - bits_collected;
retval[outpos++] = b64_table[accumulator & 0x3fu];
}
assert(outpos >= (retval.size() - 2));
assert(outpos <= retval.size());
return retval;
}
::std::string base64_decode(const ::std::string &ascdata)
{
using ::std::string;
string retval;
const string::const_iterator last = ascdata.end();
int bits_collected = 0;
unsigned int accumulator = 0;
for (string::const_iterator i = ascdata.begin(); i != last; ++i) {
const int c = *i;
if (::std::isspace(c) || c == '=') {
// Skip whitespace and padding. Be liberal in what you accept.
continue;
}
if ((c > 127) || (c < 0) || (reverse_table[c] > 63)) {
throw ::std::invalid_argument("This contains characters not legal in a base64 encoded string.");
}
accumulator = (accumulator << 6) | reverse_table[c];
bits_collected += 6;
if (bits_collected >= 8) {
bits_collected -= 8;
retval += static_cast<char>((accumulator >> bits_collected) & 0xffu);
}
}
return retval;
}
Rather than using the BIO_ interface it's much easier to use the EVP_ interface. For instance:
#include <iostream>
#include <stdlib.h>
#include <openssl/evp.h>
char *base64(const unsigned char *input, int length) {
const auto pl = 4*((length+2)/3);
auto output = reinterpret_cast<char *>(calloc(pl+1, 1)); //+1 for the terminating null that EVP_EncodeBlock adds on
const auto ol = EVP_EncodeBlock(reinterpret_cast<unsigned char *>(output), input, length);
if (pl != ol) { std::cerr << "Whoops, encode predicted " << pl << " but we got " << ol << "\n"; }
return output;
}
unsigned char *decode64(const char *input, int length) {
const auto pl = 3*length/4;
auto output = reinterpret_cast<unsigned char *>(calloc(pl+1, 1));
const auto ol = EVP_DecodeBlock(output, reinterpret_cast<const unsigned char *>(input), length);
if (pl != ol) { std::cerr << "Whoops, decode predicted " << pl << " but we got " << ol << "\n"; }
return output;
}
The EVP functions include a streaming interface too, see the man page.
Here is an example of OpenSSL base64 encode/decode I wrote:
Notice, I have some macros/classes in the code that I wrote, but none of them is important for the example. It is simply some C++ wrappers I wrote:
buffer base64::encode( const buffer& data )
{
// bio is simply a class that wraps BIO* and it free the BIO in the destructor.
bio b64(BIO_f_base64()); // create BIO to perform base64
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
bio mem(BIO_s_mem()); // create BIO that holds the result
// chain base64 with mem, so writing to b64 will encode base64 and write to mem.
BIO_push(b64, mem);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data.data, (int)data.size);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
// get a pointer to mem's data
char* dt;
long len = BIO_get_mem_data(mem, &dt);
// assign data to output
std::string s(dt, len);
return buffer(s.length()+sizeof(char), (byte*)s.c_str());
}
This works for me, and verified no memory leaks with valgrind.
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <iostream>
namespace {
struct BIOFreeAll { void operator()(BIO* p) { BIO_free_all(p); } };
}
std::string Base64Encode(const std::vector<unsigned char>& binary)
{
std::unique_ptr<BIO,BIOFreeAll> b64(BIO_new(BIO_f_base64()));
BIO_set_flags(b64.get(), BIO_FLAGS_BASE64_NO_NL);
BIO* sink = BIO_new(BIO_s_mem());
BIO_push(b64.get(), sink);
BIO_write(b64.get(), binary.data(), binary.size());
BIO_flush(b64.get());
const char* encoded;
const long len = BIO_get_mem_data(sink, &encoded);
return std::string(encoded, len);
}
// Assumes no newlines or extra characters in encoded string
std::vector<unsigned char> Base64Decode(const char* encoded)
{
std::unique_ptr<BIO,BIOFreeAll> b64(BIO_new(BIO_f_base64()));
BIO_set_flags(b64.get(), BIO_FLAGS_BASE64_NO_NL);
BIO* source = BIO_new_mem_buf(encoded, -1); // read-only source
BIO_push(b64.get(), source);
const int maxlen = strlen(encoded) / 4 * 3 + 1;
std::vector<unsigned char> decoded(maxlen);
const int len = BIO_read(b64.get(), decoded.data(), maxlen);
decoded.resize(len);
return decoded;
}
int main()
{
const char* msg = "hello";
const std::vector<unsigned char> binary(msg, msg+strlen(msg));
const std::string encoded = Base64Encode(binary);
std::cout << "encoded = " << encoded << std::endl;
const std::vector<unsigned char> decoded = Base64Decode(encoded.c_str());
std::cout << "decoded = ";
for (unsigned char c : decoded) std::cout << c;
std::cout << std::endl;
return 0;
}
Compile:
g++ -lcrypto main.cc
Output:
encoded = aGVsbG8=
decoded = hello
So many horrible C code examples with buffers and malloc(), what about using std::string properly on this C++ tagged question?
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/buffer.h>
#include <string>
std::string base64_encode(const std::string& input)
{
const auto base64_memory = BIO_new(BIO_s_mem());
auto base64 = BIO_new(BIO_f_base64());
base64 = BIO_push(base64, base64_memory);
BIO_write(base64, input.c_str(), static_cast<int>(input.length()));
BIO_flush(base64);
BUF_MEM* buffer_memory{};
BIO_get_mem_ptr(base64, &buffer_memory);
auto base64_encoded = std::string(buffer_memory->data, buffer_memory->length - 1);
BIO_free_all(base64);
return base64_encoded;
}
I like mtrw's use of EVP.
Below is my "modern C++" take on his answer without manual memory allocation (calloc). It will take a std::string but it can easily be overloaded to use raw bytes for example.
#include <openssl/evp.h>
#include <memory>
#include <stdexcept>
#include <vector>
auto EncodeBase64(const std::string& to_encode) -> std::string {
/// #sa https://www.openssl.org/docs/manmaster/man3/EVP_EncodeBlock.html
const auto predicted_len = 4 * ((to_encode.length() + 2) / 3); // predict output size
const auto output_buffer{std::make_unique<char[]>(predicted_len + 1)};
const std::vector<unsigned char> vec_chars{to_encode.begin(), to_encode.end()}; // convert to_encode into uchar container
const auto output_len = EVP_EncodeBlock(reinterpret_cast<unsigned char*>(output_buffer.get()), vec_chars.data(), static_cast<int>(vec_chars.size()));
if (predicted_len != static_cast<unsigned long>(output_len)) {
throw std::runtime_error("EncodeBase64 error");
}
return output_buffer.get();
}
auto DecodeBase64(const std::string& to_decode) -> std::string {
/// #sa https://www.openssl.org/docs/manmaster/man3/EVP_DecodeBlock.html
const auto predicted_len = 3 * to_decode.length() / 4; // predict output size
const auto output_buffer{std::make_unique<char[]>(predicted_len + 1)};
const std::vector<unsigned char> vec_chars{to_decode.begin(), to_decode.end()}; // convert to_decode into uchar container
const auto output_len = EVP_DecodeBlock(reinterpret_cast<unsigned char*>(output_buffer.get()), vec_chars.data(), static_cast<int>(vec_chars.size()));
if (predicted_len != static_cast<unsigned long>(output_len)) {
throw std::runtime_error("DecodeBase64 error");
}
return output_buffer.get();
}
There's probably a cleaner/better way of doing this (I'd like to get rid of reinterpret_cast). You'll also definitely want a try/catch block to deal with the potential exception.
Improved TCS answer to remove macros/datastructures
unsigned char *encodeb64mem( unsigned char *data, int len, int *lenoutput )
{
// bio is simply a class that wraps BIO* and it free the BIO in the destructor.
BIO *b64 = BIO_new(BIO_f_base64()); // create BIO to perform base64
BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL);
BIO *mem = BIO_new(BIO_s_mem()); // create BIO that holds the result
// chain base64 with mem, so writing to b64 will encode base64 and write to mem.
BIO_push(b64, mem);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data, len);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
// get a pointer to mem's data
unsigned char* output;
*lenoutput = BIO_get_mem_data(mem, &output);
// assign data to output
//std::string s(dt, len2);
return output;
}
To write to file
int encodeb64(unsigned char* input, const char* filenm, int leni)
{
BIO *b64 = BIO_new(BIO_f_base64());
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
BIO *file = BIO_new_file(filenm, "w");
BIO *mem = BIO_new(BIO_f_buffer());
BIO_push(b64, mem);
BIO_push(mem, file);
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, input, leni);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
BIO_flush(b64);
BIO_pop(b64);
BIO_free_all(b64);
return 0;
}
Base64 encoding from file to file. Many times due to file constraint we have read in chunks of data and do encoding. Below is the code.
int encodeb64FromFile(const char* input, const char* outputfilename)
{
BIO *b64 = BIO_new(BIO_f_base64());
BIO_set_flags(b64,BIO_FLAGS_BASE64_NO_NL);
int leni = 3*64;
unsigned char *data[3*64];
BIO *file = BIO_new_file(outputfilename, "w");
BIO *mem = BIO_new(BIO_f_buffer());
BIO_push(b64, mem);
BIO_push(mem, file);
FILE *fp = fopen(input, "rb");
while ((leni = fread(data, 1, sizeof data, fp)) > 0) {
// write data
bool done = false;
int res = 0;
while(!done)
{
res = BIO_write(b64, data, leni);
if(res <= 0) // if failed
{
if(BIO_should_retry(b64)){
continue;
}
else // encoding failed
{
/* Handle Error!!! */
}
}
else // success!
done = true;
}
}
BIO_flush(b64);
BIO_pop(b64);
BIO_free_all(b64);
fclose(fp);
return 0;
}
Base64 is really pretty simple; you shouldn't have trouble finding any number of implementations via a quick Google. For example here is a reference implementation in C from the Internet Software Consortium, with detailed comments explaining the process.
The openssl implementation layers a lot of complexity with the "BIO" stuff that's not (IMHO) very useful if all you're doing is decoding/encoding.
#include <openssl/bio.h>
typedef unsigned char byte;
namespace base64 {
static void Encode(const byte* in, size_t in_len,
char** out, size_t* out_len) {
BIO *buff, *b64f;
BUF_MEM *ptr;
b64f = BIO_new(BIO_f_base64());
buff = BIO_new(BIO_s_mem());
buff = BIO_push(b64f, buff);
BIO_set_flags(buff, BIO_FLAGS_BASE64_NO_NL);
BIO_set_close(buff, BIO_CLOSE);
BIO_write(buff, in, in_len);
BIO_flush(buff);
BIO_get_mem_ptr(buff, &ptr);
(*out_len) = ptr->length;
(*out) = (char *) malloc(((*out_len) + 1) * sizeof(char));
memcpy(*out, ptr->data, (*out_len));
(*out)[(*out_len)] = '\0';
BIO_free_all(buff);
}
static void Decode(const char* in, size_t in_len,
byte** out, size_t* out_len) {
BIO *buff, *b64f;
b64f = BIO_new(BIO_f_base64());
buff = BIO_new_mem_buf((void *)in, in_len);
buff = BIO_push(b64f, buff);
(*out) = (byte *) malloc(in_len * sizeof(char));
BIO_set_flags(buff, BIO_FLAGS_BASE64_NO_NL);
BIO_set_close(buff, BIO_CLOSE);
(*out_len) = BIO_read(buff, (*out), in_len);
(*out) = (byte *) realloc((void *)(*out), ((*out_len) + 1) * sizeof(byte));
(*out)[(*out_len)] = '\0';
BIO_free_all(buff);
}
}
Late to the party, but I came across this problem recently myself, but was unhappy with both the BIO solution, which is unnecessarily convoluted, but did not like 'EncodeBlock' either, because it introduces newline characters I do not want in my Base64 encoded string.
After a little sniffing, I came across the header file openssl/include/crypto/evp.h which is not part of the default installation (which only exports the include/openssl folder for me), but exports the solution to the problem.
void evp_encode_ctx_set_flags(EVP_ENCODE_CTX *ctx, unsigned int flags);
/* EVP_ENCODE_CTX flags */
/* Don't generate new lines when encoding */
#define EVP_ENCODE_CTX_NO_NEWLINES 1
/* Use the SRP base64 alphabet instead of the standard one */
#define EVP_ENCODE_CTX_USE_SRP_ALPHABET 2
Using this function, the 'no newline' becomes possible using the EVP interface.
Example:
if (EVP_ENCODE_CTX *context = EVP_ENCODE_CTX_new())
{
EVP_EncodeInit(context);
evp_encode_ctx_set_flags(context, EVP_ENCODE_CTX_NO_NEWLINES);
while (hasData())
{
uint8_t *data;
int32_t length = fetchData(&data);
int32_t size = (((EVP_ENCODE_CTX_num(context) + length)/48) * 65) + 1;
uint8_t buffer[size];
EVP_EncodeUpdate(context, buffer, &size, pData, length);
//process encoded data.
}
uint8_t buffer[65];
int32_t writtenBytes;
EVP_EncodeFinal(context, buffer, &writtenBytes);
//Do something with the final remainder of the encoded string.
EVP_ENCODE_CTX_free(context);
}
This piece of code will encode the buffer to Base64 without the newlines.
Please note the use of EVP_ENCODE_CTX_num to obtain the 'leftover bytes' still stored in the context object to calculate the correct buffer size.
It is only necessary, if you need to call EVP_EncodeUpdate multiple times, because your data is exceedingly large or not available at once.
I've been trying to the libgcrypt for a small cryptography project of mine, but I can't seem to be able to implement the en/decryption correctly. The following the class and the usage of it.
#include <iostream>
#include <string>
#include <cstdlib>
#include <gcrypt.h>
#include "aes.h"
#define GCRY_CIPHER GCRY_CIPHER_AES128
#define GCRY_MODE GCRY_CIPHER_MODE_ECB
using namespace std;
aes::aes(string a) {
key = a;
keyLength = gcry_cipher_get_algo_keylen(GCRY_CIPHER);
gcry_control (GCRYCTL_DISABLE_SECMEM, 0);
gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0);
gcry_cipher_open(&handle, GCRY_CIPHER, GCRY_MODE, 0);
gcry_cipher_setkey(handle, key.c_str(), keyLength);
}
string aes::encrypt(string text) {
size_t textLength = text.size() + 1;
char * encBuffer = (char *)malloc(textLength);
gcry_cipher_encrypt(handle, encBuffer, textLength, text.c_str(), textLength);
string ret (encBuffer);
return ret;
}
string aes::decrypt(string text) {
size_t textLength = text.size() + 1;
char * decBuffer = (char * )malloc(textLength);
gcry_cipher_decrypt(handle, decBuffer, textLength, text.c_str(), textLength);
string ret (decBuffer);
return ret;
}
And I use it in a main function like so:
...
aes bb = aes("one test AES key");
string test = "Some Message";
string enc = bb.encrypt(test);
string dec = bb.decrypt(enc);
for (size_t index = 0; index<enc.size(); index++)
printf("%c", enc[index]);
printf("\n");
cout << dec << endl;
...
And the output is
BBBBBBBBBBBBB
�n�S[
The odd thing is, I made a test program with almost the exact same statements that works perfectly. It started falling apart when I tried to package it into a class. The following is the code for this program, if anyone wants to see it.
#include <cstdlib>
#include <iostream>
#include <string>
#include <gcrypt.h>
using namespace std;
#define GCRY_CIPHER GCRY_CIPHER_AES128 // Pick the cipher here
#define GCRY_MODE GCRY_CIPHER_MODE_ECB // Pick the cipher mode here
void aesTest(void)
{
gcry_cipher_hd_t handle;
size_t keyLength = gcry_cipher_get_algo_keylen(GCRY_CIPHER);
string txtBuffer ("123456789 abcdefghijklmnopqrstuvwzyz ABCDEFGHIJKLMNOPQRSTUVWZYZ");
size_t txtLength = txtBuffer.size() +1; // string plus termination
char * encBuffer = (char *)malloc(txtLength);
char * outBuffer = (char *)malloc(txtLength);
char * key = "one test AES key"; // 16 bytes
gcry_control (GCRYCTL_DISABLE_SECMEM, 0);
gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0);
gcry_cipher_open(&handle, GCRY_CIPHER, GCRY_MODE, 0);
gcry_cipher_setkey(handle, key, keyLength);
gcry_cipher_encrypt(handle, encBuffer, txtLength, txtBuffer.c_str(), txtLength);
gcry_cipher_decrypt(handle, outBuffer, txtLength, encBuffer, txtLength);
size_t index;
printf("encBuffer = ");
for (index = 0; index<txtLength; index++)
printf("%c", encBuffer[index]);
printf("\n");
printf("outBuffer = %s\n", outBuffer);
gcry_cipher_close(handle);
free(encBuffer);
free(outBuffer);
}
int main() {
aesTest();
return 0;
}
When you use a std::string to capture the encrypted data, you are possibly losing some data due to the presence of '\0' in the encrypted string.
Try using std::vector<char> instead.
void aes::encrypt(string text, std::vector<char>& ret) {
size_t textLength = text.size() + 1;
ret.resize(textLength);
gcry_cipher_encrypt(handle, ret.data(), textLength, text.c_str(), textLength);
}
string aes::decrypt(std::vector<char> const& text) {
size_t textLength = text.size() + 1;
// Since you are in C++ land, use new and delete
// instead of malloc and free.
char * decBuffer = new char[textLength];
gcry_cipher_decrypt(handle, decBuffer, textLength, text.data(), textLength);
string ret (decBuffer);
delete [] decBuffer;
return ret;
}
How can I generate SHA1 or SHA2 hashes using the OpenSSL libarary?
I searched google and could not find any function or example code.
From the command line, it's simply:
printf "compute sha1" | openssl sha1
You can invoke the library like this:
#include <stdio.h>
#include <string.h>
#include <openssl/sha.h>
int main()
{
unsigned char ibuf[] = "compute sha1";
unsigned char obuf[20];
SHA1(ibuf, strlen(ibuf), obuf);
int i;
for (i = 0; i < 20; i++) {
printf("%02x ", obuf[i]);
}
printf("\n");
return 0;
}
OpenSSL has a horrible documentation with no code examples, but here you are:
#include <openssl/sha.h>
bool simpleSHA256(void* input, unsigned long length, unsigned char* md)
{
SHA256_CTX context;
if(!SHA256_Init(&context))
return false;
if(!SHA256_Update(&context, (unsigned char*)input, length))
return false;
if(!SHA256_Final(md, &context))
return false;
return true;
}
Usage:
unsigned char md[SHA256_DIGEST_LENGTH]; // 32 bytes
if(!simpleSHA256(<data buffer>, <data length>, md))
{
// handle error
}
Afterwards, md will contain the binary SHA-256 message digest. Similar code can be used for the other SHA family members, just replace "256" in the code.
If you have larger data, you of course should feed data chunks as they arrive (multiple SHA256_Update calls).
Adaptation of #AndiDog version for big file:
static const int K_READ_BUF_SIZE{ 1024 * 16 };
std::optional<std::string> CalcSha256(std::string filename)
{
// Initialize openssl
SHA256_CTX context;
if(!SHA256_Init(&context))
{
return std::nullopt;
}
// Read file and update calculated SHA
char buf[K_READ_BUF_SIZE];
std::ifstream file(filename, std::ifstream::binary);
while (file.good())
{
file.read(buf, sizeof(buf));
if(!SHA256_Update(&context, buf, file.gcount()))
{
return std::nullopt;
}
}
// Get Final SHA
unsigned char result[SHA256_DIGEST_LENGTH];
if(!SHA256_Final(result, &context))
{
return std::nullopt;
}
// Transform byte-array to string
std::stringstream shastr;
shastr << std::hex << std::setfill('0');
for (const auto &byte: result)
{
shastr << std::setw(2) << (int)byte;
}
return shastr.str();
}
correct syntax at command line should be
echo -n "compute sha1" | openssl sha1
otherwise you'll hash the trailing newline character as well.
Here is OpenSSL example of calculating sha-1 digest using BIO:
#include <openssl/bio.h>
#include <openssl/evp.h>
std::string sha1(const std::string &input)
{
BIO * p_bio_md = nullptr;
BIO * p_bio_mem = nullptr;
try
{
// make chain: p_bio_md <-> p_bio_mem
p_bio_md = BIO_new(BIO_f_md());
if (!p_bio_md) throw std::bad_alloc();
BIO_set_md(p_bio_md, EVP_sha1());
p_bio_mem = BIO_new_mem_buf((void*)input.c_str(), input.length());
if (!p_bio_mem) throw std::bad_alloc();
BIO_push(p_bio_md, p_bio_mem);
// read through p_bio_md
// read sequence: buf <<-- p_bio_md <<-- p_bio_mem
std::vector<char> buf(input.size());
for (;;)
{
auto nread = BIO_read(p_bio_md, buf.data(), buf.size());
if (nread < 0) { throw std::runtime_error("BIO_read failed"); }
if (nread == 0) { break; } // eof
}
// get result
char md_buf[EVP_MAX_MD_SIZE];
auto md_len = BIO_gets(p_bio_md, md_buf, sizeof(md_buf));
if (md_len <= 0) { throw std::runtime_error("BIO_gets failed"); }
std::string result(md_buf, md_len);
// clean
BIO_free_all(p_bio_md);
return result;
}
catch (...)
{
if (p_bio_md) { BIO_free_all(p_bio_md); }
throw;
}
}
Though it's longer than just calling SHA1 function from OpenSSL, but it's more universal and can be reworked for using with file streams (thus processing data of any length).
C version of #Nayfe code, generating SHA1 hash from file:
#include <stdio.h>
#include <openssl/sha.h>
static const int K_READ_BUF_SIZE = { 1024 * 16 };
unsigned char* calculateSHA1(char *filename)
{
if (!filename) {
return NULL;
}
FILE *fp = fopen(filename, "rb");
if (fp == NULL) {
return NULL;
}
unsigned char* sha1_digest = malloc(sizeof(char)*SHA_DIGEST_LENGTH);
SHA_CTX context;
if(!SHA1_Init(&context))
return NULL;
unsigned char buf[K_READ_BUF_SIZE];
while (!feof(fp))
{
size_t total_read = fread(buf, 1, sizeof(buf), fp);
if(!SHA1_Update(&context, buf, total_read))
{
return NULL;
}
}
fclose(fp);
if(!SHA1_Final(sha1_digest, &context))
return NULL;
return sha1_digest;
}
It can be used as follows:
unsigned char *sha1digest = calculateSHA1("/tmp/file1");
The res variable contains the sha1 hash.
You can print it on the screen using the following for-loop:
char *sha1hash = (char *)malloc(sizeof(char) * 41);
sha1hash[40] = '\0';
int i;
for (i = 0; i < SHA_DIGEST_LENGTH; i++)
{
sprintf(&sha1hash[i*2], "%02x", sha1digest[i]);
}
printf("SHA1 HASH: %s\n", sha1hash);