Im going to send a int64 over tcp and need to serialize&deserialize it.
First i cast it to a uin64.
I byteshift it into an uint8 array.
Then i byteshift the array into a uint64
And finally cast it back to a int.
But it returns a different value than i put in...
I have checked the hex values, but they are supposed to be correct...
Code:
#include <math.h>
#include <string.h>
#include <iostream>
#include <iomanip>
//SER & D-SER int64
std::array<uint8_t, 8> int64ToBytes(int64_t val)
{
uint64_t v = (uint64_t)val;
std::array<uint8_t, 8> bytes;
bytes[0] = (v&0xFF00000000000000)>>56;
bytes[1] = (v&0x00FF000000000000)>>48;
bytes[2] = (v&0x0000FF0000000000)>>40;
bytes[3] = (v&0x000000FF00000000)>>32;
bytes[4] = (v&0x00000000FF000000)>>24;
bytes[5] = (v&0x0000000000FF0000)>>16;
bytes[6] = (v&0x000000000000FF00)>>8;
bytes[7] = (v&0x00000000000000FF);
return bytes;
}
int64_t bytesToInt64(uint8_t bytes[8])
{
uint64_t v = 0;
v |= bytes[0]; v <<= 8;
v |= bytes[1]; v <<= 8;
v |= bytes[3]; v <<= 8;
v |= bytes[4]; v <<= 8;
v |= bytes[5]; v <<= 8;
v |= bytes[6]; v <<= 8;
v |= bytes[7]; v <<= 8;
v |= bytes[8];
return (int64_t)v;
}
int main() {
uint8_t bytes[8] = {0};
int64_t val = 1234567890;
//Print value to be received on the other side
std::cout << std::dec << "INPUT: " << val << std::endl;
//Serialize
memcpy(&bytes, int64ToBytes(val).data(), 8);
//Deserialize
int64_t val2 = bytesToInt64(bytes);
//print deserialized int64
std::cout << std::dec << "RESULT: " << val2 << std::endl;
}
Output:
INPUT: 1234567890
RESULT: 316049379840
Been trying to solve this for a day now, cant find the problem
Thanks.
Try using the uint64_t htobe64(uint64_t host_64bits) and uint64_t be64toh(uint64_t big_endian_64bits) functions to convert from host to big endian (network order) and from network order to host order respectively.
You are shifting the entire value. Try something like:
(bytes[0] << 56) |
(bytes[1] << 48) |
... (bytes[7])
There is no 9th byte (ie. byte[8]).
you are missing a bit shift in the bytesToInt64 function:
below you find the corrected bytesToInt64 function:
int64_t bytesToInt64(uint8_t bytes[8])
{
uint64_t v = 0;
v |= bytes[0]; v <<= 8;
v |= bytes[1]; v <<= 8;
v |= bytes[2]; v <<= 8;
v |= bytes[3]; v <<= 8;
v |= bytes[4]; v <<= 8;
v |= bytes[5]; v <<= 8;
v |= bytes[6]; v <<= 8;
v |= bytes[7];
return (int64_t)v;
}
If you're transferring data between machines with the same endianness you don't need to serialize the data byte by byte, you can just send the data as it is represented in memory. In this case you don't need anything like that you can just use your memcpy call like this:
// Serialize
memcpy(&bytes, &val, sizeof(val));
// Deserialize
int64_t val2;
memcpy(&val2, &bytes, sizeof(val));
If you're sending data between hosts with different endianness you should send it as you find it in the aswer from Roger, basically you have to make sure the data is represented in the same way on both ends.
here's a variant which not only serializes but will work with any type of int and across any platforms
#include <iostream>
#include <type_traits>
using namespace std;
template <typename T> enable_if_t<is_integral_v<T>> serialize(T t, char *buf)
{
for(auto i = 0U; i < sizeof(t); ++i) {
buf[i] = t & 0xff;
t >>= 8;
}
}
template <typename T> enable_if_t<is_integral_v<T>> deserialize(T &t, char const *buf)
{
for(auto i = 0U; i < sizeof(t); ++i) {
t <<= 8;
t |= buf[sizeof(t) - 1 - i];
}
}
int main() {
int64_t t1 = 0x12345678;
int64_t t2{0};
char buffer[sizeof(t1)];
serialize(t1, buffer);
deserialize(t2, buffer);
cout << "I got " << hex << t2 << endl;
}
you should probably use containers and parts to serialize/deserialize data to make sure you don't overflow your buffer (considering you are transferring more than one int at a time)
This should work. You may also need to check the input array is the right size in your bytesToInt64 function.
std::array<uint8_t, 8> int64ToBytes(int64_t val)
{
uint64_t v = (uint64_t)val;
std::array<uint8_t, 8> bytes;
for (size_t i = 0; i < 8; i++)
{
bytes[i] = (v >> (8 * (7 - i))) & 0xFF;
}
return bytes;
}
int64_t bytesToInt64(uint8_t bytes[8])
{
uint64_t v = 0;
for (size_t i = 0; i < 8; i++)
{
v |= (bytes[i] << (8 * (7 - i)));
}
return (int64_t)v;
}
Related
The first four variables are unsigned char with value 0 or 1.
The messageType is unsigned char, example: 48.
I was wondering if there is some way to do this code more clearer.
I am trying to not use any library.
std::vector<unsigned char> packet(2);
packet.at(0) |= (retain) << 0;
packet.at(0) |= (qosLevel >> 0) << 1;
packet.at(0) |= (qosLevel >> 1) << 2;
packet.at(0) |= (dupFlag) << 3;
packet.at(0) |= ((messageType >> 4) & 1) << 4;
packet.at(0) |= ((messageType >> 5) & 1) << 5;
packet.at(0) |= ((messageType >> 6) & 1) << 6;
packet.at(0) |= ((messageType >> 7) & 1) << 7;
If you are sure that the variables don't contain any extra bits then you can write:
unsigned char packet[2] = { 0 };
packet[0] |= retain;
packet[0] |= qosLevel << 1;
packet[0] |= dupFlag << 3;
packet[0] |= messageType;
Alternatively you could mask out any potential extra bits:
unsigned char packet[2] = { 0 };
packet[0] |= retain & 1;
packet[0] |= (qosLevel & 3) << 1;
packet[0] |= (dupFlag & 1) << 3;
packet[0] |= messageType & 0xf0;
Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 3 years ago.
Improve this question
Given this checksum calculation function in C++
int calcCrcPartial(unsigned short* lpdwBlockData, unsigned short dwBlockSizeInBytes, int iInitialCrc)
{
unsigned short dwBlocksSizeInWords;
int bIsSigned;
signed int j;
signed int i;
unsigned short dwDataItem;
bool bIsNegative;
// number of WORD blocks
dwBlocksSizeInWords = dwBlockSizeInBytes >> 1;
for (i = 0; ; ++i)
{
if (dwBlocksSizeInWords <= i)
{
break;
}
dwDataItem = lpdwBlockData[i];
if (dwDataItem != 0)
{
bInvalidCrc = false;
}
for (j = 0; j <= 15; ++j)
{
bIsSigned = (dwDataItem & 0x8000u) != 0;
dwDataItem <<= 1;
bIsNegative = iInitialCrc < 0;
iInitialCrc <<= 1;
iInitialCrc += bIsSigned;
if (bIsNegative)
{
iInitialCrc ^= 0x400007u;
}
}
}
return iInitialCrc;
}
Task:
How to write a function to generate a valid block of data lpdwBlockData (512 bytes) that will make function calcCrcPartial() to return 0 for any given iInitialCrc (calculated from previous calls to this function)?
The CRC is not stored in the block.
The generated block of data (512 bytes) can contain any data.
I have tried to fill the buffer with random data in hope it will hit the 0 checksum value after CRC calculation, but I guess it's not the way...
How to reverse this algorithm and generate valid data to make the calcCrcPartial() on the generated buffer data and provided iInitialCrc to return value 0?
This is not a normal CRC. The initial CRC is cycled left 16 times, then the first short is XOR'ed to the lower 16 bits of the CRC, which is then cycled again 16 times, and the next short is XOR'ed to the lower 16 bits of the CRC. If the data is just 2 shorts, it's the same as cycling the initial CRC 32 times, then XOR'ing the 2 shorts to the cycled CRC. To get the CRC==0, just set the 2 shorts to the initial CRC cycled 32 times. Example code below.
How to calculate data buffer to zero checksum value with as little data as possible
Just need 2 shorts to do this. Set the 2 shorts = 0, calculate CRC, then set the 2 shorts to the calculated CRC. This will result in a CRC of 0 for any initial CRC.
I switched to a table driven version of the checksum function, but the code below also includes a "cleaned up" version of the questions example CRC function.
This code compares the CRC outputs from the questions code, an alternate version, and a table driven version:
#include <iostream>
#include <iomanip>
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
uint32_t crctbl[65536];
void gentbl()
{
uint32_t crc;
int i, j;
for(j = 0; j < 0x10000; j++){
crc = j<<16;
for(i = 0; i < 16; i++)
// assumes twos complement
crc = (crc<<1)^((0-(crc>>31))&0x400007u);
crctbl[j] = crc;
}
}
int calcCrcPartial(unsigned short* lpdwBlockData, unsigned short dwBlockSizeInBytes, int iInitialCrc)
{
unsigned short dwBlocksSizeInWords;
int bIsSigned;
signed int j;
signed int i;
unsigned short dwDataItem;
bool bIsNegative;
// number of WORD blocks
dwBlocksSizeInWords = dwBlockSizeInBytes >> 1;
for (i = 0; ; ++i)
{
if (dwBlocksSizeInWords <= i)
{
break;
}
dwDataItem = lpdwBlockData[i];
// bInvalidCrc not delcared and not used
// if (dwDataItem != 0)
// {
// bInvalidCrc = false;
// }
for (j = 0; j <= 15; ++j)
{
bIsSigned = (dwDataItem & 0x8000u) != 0;
dwDataItem <<= 1;
bIsNegative = iInitialCrc < 0;
iInitialCrc <<= 1;
iInitialCrc += bIsSigned;
if (bIsNegative)
{
iInitialCrc ^= 0x400007u;
}
}
}
return iInitialCrc;
}
// alternate version of calcCrcPartial
uint32_t calcCrcPartiala(uint16_t* lpwBlockData, uint16_t wBlockSizeInBytes, uint32_t iInitialCrc)
{
int sz = wBlockSizeInBytes >> 1;
int i;
while(sz--){
for(i = 0; i < 16; i++)
// assumes twos complement
iInitialCrc = (iInitialCrc<<1)^((0-(iInitialCrc>>31))&0x400007u);
iInitialCrc ^= *lpwBlockData++;
}
return iInitialCrc;
}
// table version of calcCrcPartial
uint32_t calcCrcPartialt(uint16_t* lpwBlockData, uint16_t wBlockSizeInBytes, uint32_t iInitialCrc)
{
int sz = wBlockSizeInBytes >> 1;
while(sz--)
iInitialCrc = (iInitialCrc<<16)^crctbl[iInitialCrc>>16]^*lpwBlockData++;
return iInitialCrc;
}
int main()
{
uint16_t data[] = {0x0000, 0x0000};
uint32_t iCrc, oCrc, oCra, oCrt;
gentbl();
iCrc = 0x00000000u;
do{
oCrc = calcCrcPartial (data, sizeof(data), iCrc);
oCra = calcCrcPartiala(data, sizeof(data), iCrc);
oCrt = calcCrcPartiala(data, sizeof(data), iCrc);
if(oCrc != oCra || oCrc != oCrt){
std::cout << "mismatch" << std::endl;
break;}
if ((iCrc & 0x0ffffffu) == 0)
std::cout << std::hex << iCrc << std::endl;
}while(++iCrc != 0x10000000u);
return 0;
}
This code tests all 4 billion possible initial CRCs.
#include <iostream>
#include <iomanip>
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
uint32_t crctbl[65536];
void gentbl()
{
uint32_t crc;
int i, j;
for(j = 0; j < 0x10000; j++){
crc = j<<16;
for(i = 0; i < 16; i++)
// assumes twos complement
crc = (crc<<1)^((0-(crc>>31))&0x400007u);
crctbl[j] = crc;
}
}
uint32_t calcCrcPartial(uint16_t* lpwBlockData, uint16_t wBlockSizeInBytes, uint32_t iInitialCrc)
{
int sz = wBlockSizeInBytes >> 1;
while(sz--)
iInitialCrc = (iInitialCrc<<16)^crctbl[iInitialCrc>>16]^*lpwBlockData++;
return iInitialCrc;
}
// alternate version of questions code
uint32_t calcCrcPartialx(uint16_t* lpwBlockData, uint16_t wBlockSizeInBytes, uint32_t iInitialCrc)
{
int sz = wBlockSizeInBytes >> 1;
int i;
while(sz--){
for(i = 0; i < 16; i++)
// assumes twos complement
iInitialCrc = (iInitialCrc<<1)^((0-(iInitialCrc>>31))&0x400007u);
iInitialCrc ^= *lpwBlockData++;
}
return iInitialCrc;
}
int main()
{
uint16_t data[] = {0x0000, 0x0000};
uint32_t iCrc, oCrc;
gentbl();
iCrc = 0x00000000u;
do{
// oCrc = iCrc cycled 32 times
data[0] = 0x0000;
data[1] = 0x0000;
oCrc = calcCrcPartial(data, 4, iCrc);
// store oCrc and verify new crc == 0
data[0] = (oCrc>>16);
data[1] = (oCrc>> 0);
oCrc = calcCrcPartial(data, 4, iCrc);
if (oCrc != 0) {
std::cout << "error" << std::endl;
break;
}
if ((iCrc & 0xfffffffu) == 0)
std::cout << std::hex << iCrc << std::endl;
}while(++iCrc != 0x00000000u);
return 0;
}
How to calculate data buffer to zero checksum value with as little data as possible
If this means the minimum number of error bits, then in a buffer of 34 shorts, all zero, and initial CRC = 0, 6 bits in specific locations (based on poly and initial CRC) need to be toggled to also produce a CRC = 0:
0x8000, 0x0000, 0x0000, 0x0000, 0x0000,0x0000,0x0000,0x0000,
0x0000, 0x0000, 0x8000, 0x0000, 0x0000,0x0000,0x0000,0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,0x0000,0x0000,0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,0x0000,0x0020,0x8003,
0x0000, 0x0000
or simple still use the CRC polynomial which only has 5 bits = 1, only 5 words needed:
0x0100, 0x4000, 0x0700, 0x0000, 0x0000
This is how this version of CRC could be used for 512 bytes of data and 4 bytes of CRC:
#include <stdlib.h> // for rand()
// # of shorts in data, not including CRC
#define COUNT 256
int main()
{
uint16_t data[COUNT+2];
uint32_t iCrc, oCrc;
int i;
gentbl();
// fill data with psuedo random values
for(i = 0; i < COUNT; i++)
data[i] = ((rand()>>4)&0xff)|((rand()<<4)&0xff00);
iCrc = 0x00000000u;
do{
// generate crc
data[COUNT+0] = 0x0000u;
data[COUNT+1] = 0x0000u;
oCrc = calcCrcPartial(data, sizeof(data), iCrc);
// append crc to data
data[COUNT+0] = (oCrc>>16);
data[COUNT+1] = (oCrc>> 0);
// check crc
oCrc = calcCrcPartial(data, sizeof(data), iCrc);
if (oCrc != 0) {
std::cout << "error" << std::endl;
break;
}
if ((iCrc & 0xfffffu) == 0)
std::cout << std::hex << iCrc << std::endl;
}while(++iCrc != 0x01000000u);
return 0;
}
Is it possible (if so, how) to convert a struct of integers into a bitmask. One bit for each integer (0 if the int is 0, otherwise 1). For example
struct Int_List_t
{
uint64_t int1;
uint64_t int2;
uint64_t int3;
uint64_t int4;
} int_list={10,0,5,0};
char int_mask = somefunction(int_list);
//Would contain 1010
||||
|||+-- int4 is 0
||+--- int3 is not 0
|+---- int2 is 0
+----- int1 is not 0
You could just do it explicitly:
char mask(const Int_List_t& vals)
{
return (vals.int1 ? 0x8 : 0x0) |
(vals.int2 ? 0x4 : 0x0) |
(vals.int3 ? 0x2 : 0x0) |
(vals.int4 ? 0x1 : 0x0);
}
If you passed in an array instead of a struct, you could write a loop:
template <size_t N>
uint64_t mask(uint64_t (&vals)[N])
{
uint64_t result = 0;
uint64_t mask = 1 << (N - 1);
for (size_t i = 0; i < N; ++i, mask >>= 1) {
result |= (vals[i] ? mask : 0);
}
return result;
}
If you're open to completely bypassing any type safety whatsoever, you could even implement the above by just reinterpreting your object to be a pointer, although I wouldn't necessarily recommend it:
template <typename T>
uint64_t mask(const T& obj)
{
const uint64_t* p = reinterpret_cast<const uint64_t*>(&obj);
const uint64_t N = sizeof(T)/8;
uint64_t result = 0;
uint64_t mask = 1 << (N - 1);
for (size_t i = 0; i < N; ++i, ++p, mask >>= 1) {
result |= (*p ? mask : 0);
}
return result;
}
I have two for loops that I want to write in a function as one. The problem is that it differ only in one instruction
for (int i = 1; i <= fin_cabecera - 1 ; i++ ){
buffer[i] &= 0xfe;
if (bitsLetraRestantes < 0) {
bitsLetraRestantes = 7;
mask = 0x80;
letra = sms[++indiceLetra]; //*differs here*
}
char c = (letra & mask) >> bitsLetraRestantes--;
mask >>= 1;
buffer[i] ^= c;
}
And the other
for (int i = datos_fichero; i <= tamanio_en_bits + datos_fichero; i++){
buffer[i] &= 0xfe;
if (bitsLetraRestantes < 0) {
bitsLetraRestantes = 7;
mask = 0x80;
f.read(&letra, 1); //*differs here*
}
char c = (letra & mask) >> bitsLetraRestantes--;
mask >>= 1;
buffer[i] ^= c;
}
I thought in something like this:
void write_bit_by_bit(unsigned char buffer[], int from, int to, bool type) {
for (int i = to; i <= from; i++) {
buffer[i] &= 0xfe;
if (bitsLetraRestantes < 0) {
bitsLetraRestantes = 7;
mask = 0x80;
type ? (letra = sms[++indiceLetra]) : f.read(&letra, 1);
}
char c = (letra & mask) >> bitsLetraRestantes--;
mask >>= 1;
buffer[i] ^= c;
}
}
But I think there has to be a better method.
Context:
I will give more context (I will try explain it as better as I can within my language limitations). I have to read one byte each time because The Buffer variable represents a image pixel. sms is a message that have to be hidden within the image, and letra is a single char of that message. In order to not modify the aspect of the image, each bit of each character have to be written in the last bit of each pixel. Let me give you and example.
letra = 'H' // 01001000 in binary
buffer[0] = 255 // white pixel 11111111
In order to hide the H char, I will need 8 pixel:
The result will be like:
buffer[0] //11111110,
buffer[1] //11111111
buffer[2] //11111110
buffer[3] //11111110
buffer[4] //11111111
buffer[5] //11111110
buffer[6]//11111110
buffer[7]//11111110
The H is hidden in the last bit of the image. I hope I explained well.
[Solution]
Thanks to #anatolyg I've rewrited the code and now works just as I wanted. Here is how it looks:
void write_bit_by_bit(unsigned char buffer[], ifstream& f,int from, int to, char sms[], bool type){
unsigned short int indiceLetra = 0;
short int bitsLetraRestantes = 7;
unsigned char mask = 0x80; //Empezamos por el bit más significativo (10000000)
char* file_buffer;
if(type){ //Write file data
int number_of_bytes_to_read = get_file_size(f);
file_buffer = new char[number_of_bytes_to_read];
f.read(file_buffer, number_of_bytes_to_read);
}
const char* place_to_get_stuff_from = type ? file_buffer : sms;
char letra = place_to_get_stuff_from[0];
for (int i = from; i <= to; i++) {
buffer[i] &= 0xfe; //hacemos 0 último bit con máscara 11111110
//TODO: Hacer con dos for
if (bitsLetraRestantes < 0) {
bitsLetraRestantes = 7;
mask = 0x80;
letra = place_to_get_stuff_from[++indiceLetra];//letra = sms[++indiceLetra];
}
char c = (letra & mask) >> bitsLetraRestantes--;
mask >>= 1;
buffer[i] ^= c; //Almacenamos en el ultimo bit del pixel el valor del caracter
}
}
int ocultar(unsigned char buffer[],int tamImage, char sms[], int tamSms){
ifstream f(sms);
if (f) {
strcpy(sms,basename(sms));
buffer[0] = 0xff;
int fin_cabecera = strlen(sms)*8 + 1;
buffer[fin_cabecera] = 0xff;
write_bit_by_bit(buffer, f, 1, fin_cabecera -1, sms, WRITE_FILE_NAME);
int tamanio_en_bits = get_file_size(f) * 8;
int datos_fichero = fin_cabecera + 1;
write_bit_by_bit(buffer, f, datos_fichero, tamanio_en_bits + datos_fichero, sms, WRITE_FILE_DATA);
unsigned char fin_contenido = 0xff;
short int bitsLetraRestantes = 7;
unsigned char mask = 0x80;
for (int i = tamanio_en_bits + datos_fichero + 1;
i < tamanio_en_bits + datos_fichero + 1 + 8; i++) {
buffer[i] &= 0xfe;
char c = (fin_contenido & mask) >> bitsLetraRestantes--;
mask >>= 1;
buffer[i] ^= c;
}
}
return 0;
}
Since you are talking about optimization here, consider performing the read outside the loop. This will be a major optimization (reading 10 bytes at once must be quicker than reading 1 byte 10 times). This will require an additional buffer for (the file?) f.
if (!type)
{
char f_buffer[ENOUGH_SPACE];
number = calc_number_of_bytes_to_read();
f.read(f_buffer, number);
}
for (...) {
// your code
}
After you have done this, your original question is easy to answer:
const char* place_to_get_stuff_from = type ? sms : f_buffer;
for (...) {
...
letra = place_to_get_stuff_from[++indiceLetra];
...
}
I want a fixed length string from a number just like struct.pack present in python but in c++. I thought of itoa (i,buffer,2) but problem can be that its length will depend on platform. Is there any way to make it independent of platform ?
If you're looking for a complete solution similar to Python's struct package, you might check out Google's Protocol Buffers Library. Using that will take care of a lot of issues (e.g. endian-ness, language-portability, cross-version compatibility) for you.
Here's a start:
typedef std::vector<uint8_t> byte_buffer;
template <std::size_t N>
void append_fixed_width(byte_buffer& buf, uintmax_t val) {
int shift = ((N - 1) * 8);
while (shift >= 0) {
uintmax_t mask = (0xff << shift);
buf.push_back(uint8_t((val & mask) >> shift));
shift -= 8;
}
}
template <typename IntType>
void append_bytes(byte_buffer& buf, IntType val) {
append_fixed_width<sizeof(IntType)>(buf, uintmax_t(val));
}
int main() { // usage example
byte_buffer bytes;
append_bytes(bytes, 1); // appends sizeof(int) bytes
append_bytes(bytes, 1ul); // appends sizeof(unsigned long) bytes
append_bytes(bytes, 'a'); // appends sizeof(int) bytes :p
append_bytes(bytes, char('a')); // appends 1 byte
return 0;
}
Append_bytes will append any integer type into a byte buffer represented using a std::vector<uint8_t>. Values are packed in big endian byte order. If you need to change this, then tweak append_fixed_width to traverse the value in a different order.
These functions build a raw byte buffer so whomever is decoding it is responsible for knowing what is in there. IIRC, this is what struct.pack does as well; in other words, the caller of struct.unpack needs to provide the same format string. You can write a variant of append_fixed_width to pack a TLV instead:
template <typename TagType, typename ValueType>
void append_tlv(byte_buffer& buf, TagType t, ValueType val) {
append_fixed_width<sizeof(TagType)>(buf, uintmax_t(t));
append_fixed_width<sizeof(std::size_t)>(buf, uintmax_t(sizeof(ValueType)));
append_fixed_width<sizeof(ValueType)>(buf, uintmax_t(val));
}
I would take a serious look at Jeremy's suggestion though. I wish that it had existed when I wrote all of the binary packing code that I have now.
You need to define an exact-width integer type through a typedef; you do that in a platform-specific manner. If you use C99, int16_t is predefined in <stdint.h>. You can then cast to that type, and type the memory representation of a variable:
int16_t val = (int16_t) orig_val;
void *buf = &val;
Notice that you still need to deal with endianness.
If you don't have C99, you can either use compile-time or run-time size tests. For compile-time tests, consider using autoconf, which already computes the sizes of the various primitive types, so that you can select a good type at compile time. At run-time, just have a series of sizeof tests. Notice that this is somewhat inappropriate for run-time, as the test will always come out with the same result. As an alternative to autoconf, you can also use compiler/system identification macros for a compile-time test.
The C++ way would be to use stringstream:
stringstream ss;
int number=/*your number here*/;
ss<<number;
and to get the buffer you'd use ss.str().c_str().
I made this implementation in c/c++ to compare the execution time of the pack function between python/php/dart/c++
https://github.com/dart-lang/sdk/issues/50708
#include <span>
#include <vector>
#include <cstdio>
#include <cstdint>
#include <iomanip>
#include <iostream>
#include "time.h"
#include <map>
#define STRUCT_ENDIAN_NOT_SET 0
#define STRUCT_ENDIAN_BIG 1
#define STRUCT_ENDIAN_LITTLE 2
static int myendian = STRUCT_ENDIAN_NOT_SET;
void debug_print2(const char *str, std::vector<unsigned char> vec)
{
std::cout << str;
for (auto i : vec)
std::cout << i;
std::cout << "\r\n";
}
int struct_get_endian(void)
{
int i = 0x00000001;
if (((char *)&i)[0])
{
return STRUCT_ENDIAN_LITTLE;
}
else
{
return STRUCT_ENDIAN_BIG;
}
}
static void struct_init(void)
{
myendian = struct_get_endian();
}
static void pack_int16_t(unsigned char **bp, uint16_t val, int endian)
{
if (endian == myendian)
{
*((*bp)++) = val;
*((*bp)++) = val >> 8;
}
else
{
*((*bp)++) = val >> 8;
*((*bp)++) = val;
}
}
static void pack_int32_t(unsigned char **bp, uint32_t val, int endian)
{
if (endian == myendian)
{
*((*bp)++) = val;
*((*bp)++) = val >> 8;
*((*bp)++) = val >> 16;
*((*bp)++) = val >> 24;
}
else
{
*((*bp)++) = val >> 24;
*((*bp)++) = val >> 16;
*((*bp)++) = val >> 8;
*((*bp)++) = val;
}
}
static void pack_int64_t(unsigned char **bp, uint64_t val, int endian)
{
if (endian == myendian)
{
*((*bp)++) = val;
*((*bp)++) = val >> 8;
*((*bp)++) = val >> 16;
*((*bp)++) = val >> 24;
*((*bp)++) = val >> 32;
*((*bp)++) = val >> 40;
*((*bp)++) = val >> 48;
*((*bp)++) = val >> 56;
}
else
{
*((*bp)++) = val >> 56;
*((*bp)++) = val >> 48;
*((*bp)++) = val >> 40;
*((*bp)++) = val >> 32;
*((*bp)++) = val >> 24;
*((*bp)++) = val >> 16;
*((*bp)++) = val >> 8;
*((*bp)++) = val;
}
}
static int pack(void *b, const char *fmt, long long *values, int offset = 0)
{
unsigned char *buf = (unsigned char *)b;
int idx = 0;
const char *p;
unsigned char *bp;
int ep = myendian;
int endian;
bp = buf + offset;
auto bpp = &bp;
if (STRUCT_ENDIAN_NOT_SET == myendian)
{
struct_init();
}
for (p = fmt; *p != '\0'; p++)
{
auto value = values[idx];
switch (*p)
{
case '=': // native
ep = myendian;
break;
case '<': // little-endian
endian = STRUCT_ENDIAN_LITTLE;
ep = endian;
break;
case '>': // big-endian
endian = STRUCT_ENDIAN_BIG;
ep = endian;
break;
case '!': // network (= big-endian)
endian = STRUCT_ENDIAN_BIG;
ep = endian;
break;
case 'b':
*bp++ = value;
break;
case 'c':
*bp++ = value;
break;
case 'i':
if (ep == STRUCT_ENDIAN_LITTLE)
{
*bp++ = value;
*bp++ = value >> 8;
*bp++ = value >> 16;
*bp++ = value >> 24;
}
else
{
*bp++ = value >> 24;
*bp++ = value >> 16;
*bp++ = value >> 8;
*bp++ = value;
}
break;
case 'h':
if (ep == STRUCT_ENDIAN_LITTLE)
{
*bp++ = value;
*bp++ = value >> 8;
}
else
{
*bp++ = value >> 8;
*bp++ = value;
}
break;
case 'q':
if (ep == STRUCT_ENDIAN_LITTLE)
{
*bp++ = value;
*bp++ = value >> 8;
*bp++ = value >> 16;
*bp++ = value >> 24;
*bp++ = value >> 32;
*bp++ = value >> 40;
*bp++ = value >> 48;
*bp++ = value >> 56;
}
else
{
*bp++ = value >> 56;
*bp++ = value >> 48;
*bp++ = value >> 40;
*bp++ = value >> 32;
*bp++ = value >> 24;
*bp++ = value >> 16;
*bp++ = value >> 8;
*bp++ = value;
}
break;
}
idx++;
}
return (bp - buf);
}
int main()
{
time_t start, end;
time(&start);
// std::ios_base::sync_with_stdio(false);
std::vector<unsigned char> myVector{};
myVector.reserve(100000000 * 16);
for (int i = 0; i < 100000000; i++) // 100000000
{
char bytes[BUFSIZ] = {'\0'};
long long values[4] = {64, 65, 66, 67};
pack(bytes, "iiii", values);
for (int j = 0; j < 16; j++)
{
myVector.push_back(bytes[j]);
}
}
time(&end);
auto v2 = std::vector<unsigned char>(myVector.begin(), myVector.begin() + 16);
debug_print2("result: ", v2);
double time_taken = double(end - start);
std::cout << "pack time: " << std::fixed
<< time_taken << std::setprecision(5);
std::cout << " sec " << std::endl;
return 0;
}