I'm trying to find an integer (6664) in a binary file (file.bin) and I must shift this integer to the right once and write this result to the same position the integer was found. However, I am not able to overwrite the original value with the shifted value. I developed the code below:
#include <cstdio>
#include <cstring>
int main(){
//opening file
FILE *pfile
pfile = fopen("file.bin","r+b");
//ffinding file size
fseek(pfile, 0L, SEEK_END);
long size= ftell(pfile);
rewind(pfile);
//reading data
int vec[size];
fread(vec, sizeof(int), size, pfile);
for(int i = 0; i<size; i++){
if(vec[i]==6664){
int aux = vec[i]>>1;
fseek(pfile, i, SEEK_SET);
fwrite(&aux, sizeof(int), 1, pfile);
}
}
return 0;
}
I generated the file with the following command:
echo "0000000: 6408 0623 77ef bfbd efbf bdef bfbd 2779 d..#w.........'y
0000010: efbf bdef 081a 0000 0000 efbf bdef bfbd ................
0000020: 4577 efbf bdef bfbd efbf bd00 Ew.........." | xxd -r > file.bin
Any thoughts?
IMHO, you should read in chunks. There is no guarantee that your platform or executable has enough memory to read in the file (files can be huge).
static const size_t QUANTITY_INTEGERS = 1024u * 1024u;
uint16_t buffer[QUANTITY_INTEGERS] = {0};
std::ifstream number_file("file.bin", ios::binary);
while (number_file.read((char *) &buffer[0], QUANTITY_INTEGERS * sizeof(uint16_t)))
{
unsigned int numbers_read = number_file.gcount();
uint16_t * p_number = std::find(&buffer[0], &buffer[numbers_read], 0x6664);
if (p_number != &buffer[numbers_read])
{
std::cout << "0x6664 found.\n";
break;
}
}
You can use whatever buffer capacity you want.
Aside from the complete lack of error handling, you are not adequately differentiating between bytes and integers.
Your size variable is expressed in bytes, but you are using it as a count when dealing with your vec array. As such, you are over-allocating your array, reading too many ints from the file into the array, and iterating through too many elements of the array.
Also, when writing back to the file, you are treating the loop counter i as a byte offset, which it is not, thus you are writing to the wrong offset in the file.
Try something more like this instead:
#include <cstdio>
#include <cstdint>
#include <vector>
typedef int32_t myint_t; // or int16_t, if needed...
int main()
{
//opening file
FILE *pfile = fopen("file.bin", "r+b");
if (!pFile) return -1
//finding file size
//TODO: use f/stat() or std::filesystem::file_size() instead
if (fseek(pfile, 0L, SEEK_END) < 0) {
fclose(pFile);
return -1;
}
long size = ftell(pfile);
if (size < 0) {
fclose(pFile);
return -1;
}
rewind(pfile);
//reading data
std::vector<myint_t> vec(size / sizeof(myint_t));
size_t numItems = fread(vec.data(), sizeof(myint_t), vec.size(), pfile);
for(size_t i = 0; i < numItems; ++i) {
if (vec[i] == 6664) {
vec[i] >>= 1;
// TODO: it is more efficient for the filesystem to make
// smaller relative seeks using SEEK_CUR than to make
// larger seeks from the beginning using SEEK_SET...
if (fseek(pfile, i * sizeof(myint_t), SEEK_SET) < 0) {
fclose(pFile);
return -1;
}
if (fwrite(&vec[i], sizeof(myint_t), 1, pfile) != 1) {
fclose(pFile);
return -1;
}
}
}
fclose(pfile);
return 0;
}
That being said, consider using C++-style file I/O instead of C-style file I/O, eg:
#include <iostream>
#include <vector>
#include <cstdint>
using myint_t = int32_t; // or int16_t, if needed...
int main()
{
std::fstream fs;
fs.exceptions(std::fstream::failbit | std::fstream::badbit);
try {
//opening file
fs.open("file.bin", std::ios::in | std::ios::out | std::ios::binary | std::ios::ate);
//finding file size
//TODO: use f/stat() or std::filesystem::file_size() instead
long size = fs.tellg();
fs.seekg(0);
//reading data
std::vector<myint_t> vec(size / sizeof(myint_t));
fs.read(reinterpret_cast<char*>(vec.data()), sizeof(myint_t) * vec.size());
size_t numItems = fs.gcount() / sizeof(myint_t);
for(size_t i = 0; i < numItems; ++i) {
if (vec[i] == 6664) {
vec[i] >>= 1;
// TODO: it is more efficient for the filesystem to make
// smaller relative seeks using std::ios::cur than to make
// larger seeks from the beginning...
fs.seekp(i * sizeof(myint_t));
fs.write(reinterpret_cast<char*>(&vec[i]), sizeof(myint_t));
}
}
}
catch(...) {
return -1;
}
return 0;
}
Though, the following would be a little bit simpler and safer:
#include <iostream>
#include <vector>
#include <filesystem>
#include <cstdint>
using myint_t = int32_t; // or int16_t, if needed...
int main()
{
std::ifstream ifs;
std::ofstream ofs;
ifs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
ofs.exceptions(std::ofstream::failbit | std::ofstream::badbit);
try {
//opening files
ifs.open("file.bin", std::ios::binary);
ofs.open("file.new", std::ios::binary);
//reading data
myint_t value;
bool found = false;
while (ifs.read(reinterpret_cast<char*>(&value), sizeof(value))) {
if (value == 6664) {
value >>= 1;
found = true;
}
ofs.write(reinterpret_cast<char*>(&value), sizeof(value));
}
ifs.close();
ofs.close();
if (found) {
std::filesystem::rename("file.bin", "file.bak");
std::filesystem::rename("file.new", "file.bin");
std::filesystem::remove("file.bak");
}
else {
std::filesystem::remove("file.new");
}
}
catch(...) {
return -1;
}
return 0;
}
Related
i don't know whats the problem with filesystem resize_file standart c++ 17 in visual studio. when i test the STL with manual. it can resize as expect. when using in conditional. its like a bug. the result output is split 2 parts. i test it a file 2MB size. the result i should expected is input file should be resize to 0 byte in the end.
std::ifstream input("D:/input.exe", std::ios::binary);
if (input.is_open())
{
std::ofstream output("D:/output.exe", std::ios::binary | std::ios::ate | std::ios::app);
auto p = std::filesystem::path("D:/input.exe");
std::vector<char> buffer(1048576);
do
{
long long setPosition;
if (std::filesystem::file_size(p) > buffer.size())
{
setPosition = (std::filesystem::file_size(p) - buffer.size());
input.seekg(setPosition);
}
else
{
input.seekg(0);
}
input.read(buffer.data(), buffer.size());
std::streamsize dataSize = input.gcount();
if (dataSize)
{
output.seekp(0);
output.write(buffer.data(), dataSize);
long long resizeFile = (std::filesystem::file_size(p) - dataSize);
std::filesystem::resize_file(p, resizeFile);
}
else
{
output.close();
input.close();
break;
}
} while (true);
}
else
{
std::cout << "File is not exist";
}
What you are trying to do will not work. resize_file will truncate the file at the end of the file, so when you read X bytes from the start of the file and then truncate the file, you will chop off X bytes at the end of the file. You will probably end up with both input.exe and output.exe containing the beginning of the original input.exe. What you should do is to truncate the file after you've read it. The below will make output.exe a copy of the original input.exe and input.exe will be 0 bytes.
#include <iostream>
#include <fstream>
#include <filesystem>
int main() {
auto p = std::filesystem::path("D:/input.exe");
std::ifstream input(p, std::ios::binary);
if (input)
{
std::ofstream output("D:/output.exe", std::ios::binary);
std::vector<char> buffer(1048576);
while (true) {
input.read(buffer.data(), buffer.size());
std::streamsize dataSize = input.gcount();
if (dataSize==0) break;
output.write(buffer.data(), dataSize);
}
std::filesystem::resize_file(p, 0);
}
else
{
std::cout << "File does not exist";
}
}
I was reading sehe's answer for fast text file reading in C++, which looks like this.
static uintmax_t wc(char const *fname)
{
static const auto BUFFER_SIZE = 16*1024;
int fd = open(fname, O_RDONLY);
if(fd == -1)
handle_error("open");
/* Advise the kernel of our access pattern. */
posix_fadvise(fd, 0, 0, 1); // FDADVICE_SEQUENTIAL
char buf[BUFFER_SIZE + 1];
uintmax_t lines = 0;
while(size_t bytes_read = read(fd, buf, BUFFER_SIZE))
{
if(bytes_read == (size_t)-1)
handle_error("read failed");
if (!bytes_read)
break;
for(char *p = buf; (p = (char*) memchr(p, '\n', (buf + bytes_read) - p)); ++p)
++lines;
}
return lines;
}
This is cool, but I was wondering if a similar approach can be taken when we aren't dealing with a character operation like counting newlines, but want to operate on each line of data. Say for instance I had a file of doubles, and already some function parse_line_to_double to use on each line.
12.44243
4242.910
...
That is, how can I read BUFFER_SIZE bytes into my buffer but avoid splitting the last line read? Effectively, can I ask "Give me BUFFER_SIZE or less bytes while ensuring that the last byte read is a newline character (or EOF)"?
Knowing extremely little about low level IO like this, ideas that came to mind were
Can I "back up" fd to the most recent newline between iterations?
Do I have to keep a second buffer holding a copy of the current line being read all the time?
Here is a comparison test. First, lets try the easy way. Just read the file with standard C++ functions:
#include <iostream>
#include <string>
#include <fstream> //std::ifstream
#include <sstream> //std::stringstream
uintmax_t test1(char const *fname)
{
std::ifstream fin(fname);
if(!fin) return 0;
uintmax_t lines = 0;
std::string str;
double value;
while(fin >> value)
{
//std::cout << value << "\n";
lines++;
}
return lines;
}
Next, with std::stringstream this is about 2.5 times faster:
uintmax_t test2(char const *fname)
{
std::ifstream fin(fname);
if(!fin) return 0;
uintmax_t lines = 0;
std::string str;
double value;
std::stringstream ss;
ss << fin.rdbuf();
while(ss >> value)
lines++;
return lines;
}
Next, lets read the whole file in to memory. This will be fine as long as the file is less than 1 GiB or so. Assuming there is a double value on each line, lets extract that value. test3 is more complicated and less flexible, and it's not any faster than test2:
uintmax_t test3(char const *fname)
{
std::ifstream fin(fname, std::ios::binary);
if(!fin) return 0;
fin.seekg(0, std::ios::end);
size_t filesize = (size_t)fin.tellg();
fin.seekg(0);
std::string str(filesize, 0);
fin.read(&str[0], filesize);
double value;
uintmax_t lines = 0;
size_t beg = 0;
size_t i;
size_t len = str.size();
for(i = 0; i < len; i++)
{
if(str[i] == '\n' || i == len - 1)
{
try
{
value = std::stod(str.substr(beg, i - beg));
//std::cout << value << "\n";
beg = i + 1;
lines++;
}
catch(...)
{
}
}
}
return lines;
}
For comparison to the wc function in the question, let's read the whole file in to memory and only count the number of lines. This runs a little faster than wc (as expected), suggesting that there is no need for additional optimizations
uintmax_t test_countlines(char const *fname)
{
std::ifstream fin(fname, std::ios::binary);
if(!fin) return 0;
fin.seekg(0, std::ios::end);
size_t filesize = (size_t)fin.tellg();
fin.seekg(0);
std::string str(filesize, 0);
fin.read(&str[0], filesize);
uintmax_t lines = 0;
for(auto &c : str)
if(c == '\n')
lines++;
return lines;
}
I'm trying to implement the function from listing 5.1 here
but when copying into a buffer with read from a file I just get the same character (Í) for the whole array, where string.txt is a copy and paste from the previous link content.
Here is my code:
#include <iostream>
#include <fstream>
#include <string>
#include <cinttypes>
#include <cstdio>
#include <cstring>
const int block_size = 0x4000; //16KB
int search(char* buffer, int searchLength, char* stringToSearch, int stringToSearchLength) {
char * potentialMatch;
while (searchLength) {
potentialMatch = reinterpret_cast<char *>(memchr(buffer, *stringToSearch, searchLength));
if (potentialMatch == NULL)
break;
if (stringToSearchLength == 1) {
return 1;
} else {
if (!memcmp(potentialMatch + 1, stringToSearch + 1, stringToSearchLength - 1))
return 1;
}
searchLength -= potentialMatch - buffer + 1;
buffer = potentialMatch + 1;
}
return 0;
}
int main(int argc, char* argv[]) {
char *toSearch = "Interpreting Where";
int done = 0;
int found = 0;
char *buffer;
int64_t fileSizeLeft = 0;
std::ifstream myFile("string.txt");
if (!myFile.fail()) {
buffer = new char[block_size];
myFile.seekg(0, std::ios::end); //Get file's size
fileSizeLeft = myFile.tellg();
} else {
std::cout << "Cannot open file" << std::endl;
return 1;
}
int toSearchLength = strlen(toSearch);
int stringLeft = toSearchLength - 1;
int first_time = 1;
while (!done && fileSizeLeft > toSearchLength) {
if (first_time) {
myFile.read(buffer, block_size);
found = search(buffer, block_size, toSearch, toSearchLength);
} else {
memcpy(buffer, buffer + stringLeft, stringLeft);
myFile.read(buffer+stringLeft, fileSizeLeft-stringLeft);
found = search(buffer, block_size, toSearch, toSearchLength);
}
fileSizeLeft = fileSizeLeft - block_size;
first_time = 0;
}
if (found) {
std::cout << "String found" << std::endl;
} else {
std::cout << "String not found" << std::endl;
}
myFile.close();
delete[] buffer;
return 0;
}
I hope you can help me see what I'm doing wrong, thanks!
You are setting myFile's position to ios_base::end with seekg:
myFile.seekg(0, ios::end);
Then trying to read from it:
myFile.read(buffer, block_size);
Clearly no data will be read since myFile is already at ios_base::end. And you'll be reading whatever uninitialized data that was already in buffer
What you probably intended to do was to set your myFile position back to the beginning by doing this before reading:
myFile.seekg(0, ios::beg);
I have a simple program that reads data from a PNG into a 2D array. I would like to save that data to a .RAW file so that Raw Studio or Irfanview can view the raw image that my program outputs to my_out.raw. Currently if I just write the raw binary data to the my_out.raw file, neither application can actually read the file, that is view the image. What do I need to do to the program below so that I can see the image?
The code to read the PNG files is:
// MAIN.cpp
#include "pngfilereader.h"
#include <string>
#include <vector>
#include <fstream>
int main (int argc, char *argv[])
{
PNGFileReader pngfr;
if (!pngfr.decompress_png_to_raw(std::string("/home/matt6809/Downloads"
"/City.png"))) {
std::cout << "File decompression error: " << std::endl;
} else {
std::ofstream out;
out.open("./my_out.raw", std::ios_base::out);
std::vector<std::vector<unsigned char> > data;
pngfr.get_image_data(data);
typedef std::vector<std::vector<unsigned char> >::iterator row_it;
typedef std::vector<unsigned char>::iterator col_it;
for(row_it rit= data.begin(); rit != data.end(); ++rit) {
for(col_it cit = rit->begin(); cit != rit->end(); ++cit) {
out << (*cit);
}
}
out << std::endl;
}
return 0;
}
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <png.h>
#include <iostream>
#include <vector>
#include <string>
class PNGFileReader
{
public:
PNGFileReader();
~PNGFileReader();
// Public exposed API:
bool compress_raw_to_png(uint8_t data, int size);
bool decompress_png_to_raw(const std::string &path);
// Getters
long unsigned int get_image_width();
long unsigned int get_image_height();
void get_image_data(std::vector<std::vector<unsigned char> > &data);
private:
// Helper functions:
bool read_png(const std::string &path);
bool create_png_structs(FILE *fp);
bool free_data();
bool alloc_data();
// Member variables:
png_structp m_pPNG;
png_infop m_pPNGInfo;
png_infop m_pPNGEndInfo;
png_bytepp m_Data;
long unsigned int m_ImageWidth;
long unsigned int m_ImageHeight;
// Enums
enum PNGBOOL {NOT_PNG, PNG};
enum PNGERRORS {ERROR, SUCCESS};
};
#include "pngfilereader.h"
#include <stdexcept>
PNGFileReader::PNGFileReader() :
m_pPNG(NULL),
m_pPNGInfo(NULL),
m_pPNGEndInfo(NULL),
m_Data(NULL),
m_ImageWidth(0),
m_ImageHeight(0)
{
}
PNGFileReader::~PNGFileReader()
{
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
}
// Public Exposed API
bool PNGFileReader::compress_raw_to_png(uint8_t m_Data, int size)
{
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::decompress_png_to_raw(const std::string &path)
{
return read_png(path);
}
// Getters
long unsigned int PNGFileReader::get_image_width()
{
return m_ImageWidth;
}
long unsigned int PNGFileReader::get_image_height()
{
return m_ImageHeight;
}
void PNGFileReader::get_image_data(
std::vector<std::vector<unsigned char> > &data)
{
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
std::vector<unsigned char> v;
data.push_back(v);
for (unsigned long int j = 0; j < m_ImageWidth; ++j) {
std::vector<unsigned char> *vp = &data[i];
vp->push_back(m_Data[i][j]);
}
}
}
// Private Methods
bool PNGFileReader::read_png(const std::string &path)
{
/*
* Open up the file to read (path) in binary mode
* first so that if anything goes wrong with libpng
* we won't have much to undo
*/
const char *c_path = path.c_str();
FILE *fp = fopen(c_path, "rb");
if (!fp)
return PNGFileReader::ERROR;
/*
* Read the first BYTES_TO_READ bytes from file
* then determine if it is a png file or
* not. If png_sig_cmp == 0 all is okay
*/
enum {BYTES_TO_READ = 8};
unsigned char sig[BYTES_TO_READ];
if (!fread(sig, 1, BYTES_TO_READ, fp)) {
fclose(fp);
return PNGFileReader::ERROR;
}
bool is_png = !png_sig_cmp(sig, 0, BYTES_TO_READ);
if (!is_png) {
fclose(fp);
return PNGFileReader::ERROR;
}
if (!this->create_png_structs(fp)) {
fclose(fp);
return PNGFileReader::ERROR;
}
/*
* For error handling purposes. Set a long pointer
* back to this function to handle all error related
* to file IO
*/
if (setjmp(png_jmpbuf(m_pPNG)))
{
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, &m_pPNGEndInfo);
fclose(fp);
return PNGFileReader::ERROR;
}
/*
* Set up the input code for FILE openend in binary mode,
* and tell libpng we have already read BYTES_TO_READ btyes from
* signature
*/
png_init_io(m_pPNG, fp);
png_set_sig_bytes(m_pPNG, BYTES_TO_READ);
/*
* Using the lowlevel interface to lib png ...
*/
png_read_info(m_pPNG, m_pPNGInfo);
m_ImageHeight = png_get_image_height(m_pPNG, m_pPNGInfo);
m_ImageWidth = png_get_rowbytes(m_pPNG, m_pPNGInfo);
this->alloc_data();
png_read_image(m_pPNG, m_Data);
png_read_end(m_pPNG, NULL);
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, &m_pPNGEndInfo);
fclose(fp);
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::create_png_structs(FILE *fp)
{
/*
* Create the pointer to main libpng struct, as well as
* two info structs to maintain information after, and
* prior to all operations on png m_Data. Only necessary
* to release resource after function succeeds.
*/
m_pPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING, (png_voidp)NULL,
NULL, NULL);
if (!m_pPNG)
{
fclose(fp);
return PNGFileReader::ERROR;
}
m_pPNGInfo = png_create_info_struct(m_pPNG);
if (!m_pPNGInfo)
{
png_destroy_read_struct(&m_pPNG, (png_infopp)NULL,(png_infopp)NULL);
fclose(fp);
return PNGFileReader::ERROR;
}
m_pPNGEndInfo = png_create_info_struct(m_pPNG);
if (!m_pPNGEndInfo)
{
png_destroy_read_struct(&m_pPNG, &m_pPNGInfo, (png_infopp)NULL);
fclose(fp);
return PNGFileReader::ERROR;
}
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::free_data()
{
if (m_ImageHeight == 0 || m_ImageWidth == 0)
return PNGFileReader::ERROR;
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
return PNGFileReader::SUCCESS;
}
bool PNGFileReader::alloc_data()
{
if (m_ImageHeight == 0 || m_ImageWidth == 0)
return PNGFileReader::ERROR;
if (m_Data != NULL)
this->free_data();
m_Data = new png_bytep[m_ImageHeight]();
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
m_Data[i] = NULL;
}
try {
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
m_Data[i] = new png_byte[m_ImageWidth];
}
}
catch (std::bad_alloc e) {
for (unsigned long int i = 0; i < m_ImageHeight; ++i) {
if (m_Data[i]) {
delete m_Data[i];
m_Data[i] = NULL;
}
}
if (m_Data) {
delete m_Data;
m_Data = NULL;
}
throw e;
}
return PNGFileReader::SUCCESS;
}
A "raw" file that is intended to be used with a camera-image processing program like Raw Studio and Irfraview is not a raw-binary dump of the image-data with no header. Instead the "raw" moniker refers to the fact that the image has a minimal amount of image-processing applied in-camera. For instance, the image-data may still be a single-channel monochrome image from the camera's bayer-pattern CFA, or no white-balance, color-matrix, etc. has been applied, etc. Either way, the image-data is still formatted in a standard binary image file format complete with a header, data-packing method, etc. Examples include formats such as Adobe's DNG file format (which is based on TIFF), or proprietary formats from camera manufacturer's themselves such as Canon's CR2, Nikon's NEF, etc.
So if you want these raw-file processing programs to read your "raw" file image data, you'll have to read-up on the binary data specifications the raw-file formats they support, and then re-format the original PNG image-data correctly.
How can I generate SHA1 or SHA2 hashes using the OpenSSL libarary?
I searched google and could not find any function or example code.
From the command line, it's simply:
printf "compute sha1" | openssl sha1
You can invoke the library like this:
#include <stdio.h>
#include <string.h>
#include <openssl/sha.h>
int main()
{
unsigned char ibuf[] = "compute sha1";
unsigned char obuf[20];
SHA1(ibuf, strlen(ibuf), obuf);
int i;
for (i = 0; i < 20; i++) {
printf("%02x ", obuf[i]);
}
printf("\n");
return 0;
}
OpenSSL has a horrible documentation with no code examples, but here you are:
#include <openssl/sha.h>
bool simpleSHA256(void* input, unsigned long length, unsigned char* md)
{
SHA256_CTX context;
if(!SHA256_Init(&context))
return false;
if(!SHA256_Update(&context, (unsigned char*)input, length))
return false;
if(!SHA256_Final(md, &context))
return false;
return true;
}
Usage:
unsigned char md[SHA256_DIGEST_LENGTH]; // 32 bytes
if(!simpleSHA256(<data buffer>, <data length>, md))
{
// handle error
}
Afterwards, md will contain the binary SHA-256 message digest. Similar code can be used for the other SHA family members, just replace "256" in the code.
If you have larger data, you of course should feed data chunks as they arrive (multiple SHA256_Update calls).
Adaptation of #AndiDog version for big file:
static const int K_READ_BUF_SIZE{ 1024 * 16 };
std::optional<std::string> CalcSha256(std::string filename)
{
// Initialize openssl
SHA256_CTX context;
if(!SHA256_Init(&context))
{
return std::nullopt;
}
// Read file and update calculated SHA
char buf[K_READ_BUF_SIZE];
std::ifstream file(filename, std::ifstream::binary);
while (file.good())
{
file.read(buf, sizeof(buf));
if(!SHA256_Update(&context, buf, file.gcount()))
{
return std::nullopt;
}
}
// Get Final SHA
unsigned char result[SHA256_DIGEST_LENGTH];
if(!SHA256_Final(result, &context))
{
return std::nullopt;
}
// Transform byte-array to string
std::stringstream shastr;
shastr << std::hex << std::setfill('0');
for (const auto &byte: result)
{
shastr << std::setw(2) << (int)byte;
}
return shastr.str();
}
correct syntax at command line should be
echo -n "compute sha1" | openssl sha1
otherwise you'll hash the trailing newline character as well.
Here is OpenSSL example of calculating sha-1 digest using BIO:
#include <openssl/bio.h>
#include <openssl/evp.h>
std::string sha1(const std::string &input)
{
BIO * p_bio_md = nullptr;
BIO * p_bio_mem = nullptr;
try
{
// make chain: p_bio_md <-> p_bio_mem
p_bio_md = BIO_new(BIO_f_md());
if (!p_bio_md) throw std::bad_alloc();
BIO_set_md(p_bio_md, EVP_sha1());
p_bio_mem = BIO_new_mem_buf((void*)input.c_str(), input.length());
if (!p_bio_mem) throw std::bad_alloc();
BIO_push(p_bio_md, p_bio_mem);
// read through p_bio_md
// read sequence: buf <<-- p_bio_md <<-- p_bio_mem
std::vector<char> buf(input.size());
for (;;)
{
auto nread = BIO_read(p_bio_md, buf.data(), buf.size());
if (nread < 0) { throw std::runtime_error("BIO_read failed"); }
if (nread == 0) { break; } // eof
}
// get result
char md_buf[EVP_MAX_MD_SIZE];
auto md_len = BIO_gets(p_bio_md, md_buf, sizeof(md_buf));
if (md_len <= 0) { throw std::runtime_error("BIO_gets failed"); }
std::string result(md_buf, md_len);
// clean
BIO_free_all(p_bio_md);
return result;
}
catch (...)
{
if (p_bio_md) { BIO_free_all(p_bio_md); }
throw;
}
}
Though it's longer than just calling SHA1 function from OpenSSL, but it's more universal and can be reworked for using with file streams (thus processing data of any length).
C version of #Nayfe code, generating SHA1 hash from file:
#include <stdio.h>
#include <openssl/sha.h>
static const int K_READ_BUF_SIZE = { 1024 * 16 };
unsigned char* calculateSHA1(char *filename)
{
if (!filename) {
return NULL;
}
FILE *fp = fopen(filename, "rb");
if (fp == NULL) {
return NULL;
}
unsigned char* sha1_digest = malloc(sizeof(char)*SHA_DIGEST_LENGTH);
SHA_CTX context;
if(!SHA1_Init(&context))
return NULL;
unsigned char buf[K_READ_BUF_SIZE];
while (!feof(fp))
{
size_t total_read = fread(buf, 1, sizeof(buf), fp);
if(!SHA1_Update(&context, buf, total_read))
{
return NULL;
}
}
fclose(fp);
if(!SHA1_Final(sha1_digest, &context))
return NULL;
return sha1_digest;
}
It can be used as follows:
unsigned char *sha1digest = calculateSHA1("/tmp/file1");
The res variable contains the sha1 hash.
You can print it on the screen using the following for-loop:
char *sha1hash = (char *)malloc(sizeof(char) * 41);
sha1hash[40] = '\0';
int i;
for (i = 0; i < SHA_DIGEST_LENGTH; i++)
{
sprintf(&sha1hash[i*2], "%02x", sha1digest[i]);
}
printf("SHA1 HASH: %s\n", sha1hash);