I'm trying to read in formatted hex data into unsigned ints using the >> operator. The code I'm using is
#include <iostream>
#include <fstream>
int main(int argc, char** argv)
{
ifstream in(argv[1]);
unsigned int addr;
unsigned int op;
unsigned int data;
do
{
in >> hex >> addr >> hex >> op >> hex >> data;
cout << addr << " " << op << " " << data << '\n';
if (in.eof()) break;
} while(1);
return 0;
}
This works on a 300 line file just fine, but when I try it on a different file, it reads the 5th line repeatedly then seg faults, and I cannot figure out why. The first five lines are
FD2C FF EB
4FE9 FF 32
276E FF 6E
5C09 FF A3
7739 FF 36
The offending line is
7739 FF 36
Any help is appreciated. Thanks!
Edit:
I modified my code so it looks like
#include <iostream>
#include <fstream>
int main(int argc, char** argv)
{
ifstream in(argv[1]);
unsigned int addr;
unsigned int op;
unsigned int data;
while (in >> hex >> addr >> hex >> op >> hex >> data)
{
cout << addr << " " << op << " " << data << '\n';
}
return 0;
}
This solves the problem of reading the 5th line over and over again but it still segfaults, albeit on another line. I'm going to look further and see if I can pin it down.
FF is new page symbol and then it is followed by a dollar sign $ which is 36, maybe it thinks that 36 is a pointer to something. try to change unsigned int to unsigned char.
EDIT: this reads everything in without problems and output is in hex values good luck.
#include <iostream>
#include <fstream>
int main(int argc, char** argv)
{
std::ifstream in; in.open("tst.tst",std::ios::in);
unsigned char addr;
unsigned char op;
unsigned char data;
while (in >>std::hex>> addr>>std::hex>> op>>std::hex>> data)
{
std::cout << (int)addr <<std::hex << " " << (int)op<<std::hex << " " << (int)data<<std::hex<< "\n";
}
return 0;
}
Related
Below code takes a hex string(every byte is represented as its corresponidng hex value)
converts it to unsigned char * buffer and then converts back to hex string.
This code is testing the conversion from unsigned char* buffer to hex string
which I need to send over the network to a receiver process.
I chose hex string as unsigned char can be in range of 0 to 255 and there is no printable character after 127.
The below code just tells the portion that bugs me. Its in the comment.
#include <iostream>
#include <sstream>
#include <iomanip>
using namespace std;
// converts a hexstring to corresponding integer. i.e "c0" - > 192
int convertHexStringToInt(const string & hexString)
{
stringstream geek;
int x=0;
geek << std::hex << hexString;
geek >> x;
return x;
}
// converts a complete hexstring to unsigned char * buffer
void convertHexStringToUnsignedCharBuffer(string hexString, unsigned char*
hexBuffer)
{
int i=0;
while(hexString.length())
{
string hexStringPart = hexString.substr(0,2);
hexString = hexString.substr(2);
int hexStringOneByte = convertHexStringToInt (hexStringPart);
hexBuffer[i] = static_cast<unsigned char>((hexStringOneByte & 0xFF)) ;
i++;
}
}
int main()
{
//below hex string is a hex representation of a unsigned char * buffer.
//this is generated by an excryption algorithm in unsigned char* format
//I am converting it to hex string to make it printable for verification pupose.
//and takes the hexstring as inpuit here to test the conversion logic.
string inputHexString = "552027e33844dd7b71676b963c0b8e20";
string outputHexString;
stringstream geek;
unsigned char * hexBuffer = new unsigned char[inputHexString.length()/2];
convertHexStringToUnsignedCharBuffer(inputHexString, hexBuffer);
for (int i=0;i<inputHexString.length()/2;i++)
{
geek <<std::hex << std::setw(2) << std::setfill('0')<<(0xFF&hexBuffer[i]); // this works
//geek <<std::hex << std::setw(2) << std::setfill('0')<<(hexBuffer[i]); -- > this does not work
// I am not able to figure out why I need to do the bit wise and operation with unsigned char "0xFF&hexBuffer[i]"
// without this the conversion does not work for individual bytes having ascii values more than 127.
}
geek >> outputHexString;
cout << "input hex string: " << inputHexString<<endl;
cout << "output hex string: " << outputHexString<<endl;
if(0 == inputHexString.compare(outputHexString))
cout<<"hex encoding successful"<<endl;
else
cout<<"hex encoding failed"<<endl;
if(NULL != hexBuffer)
delete[] hexBuffer;
return 0;
}
// output
// can some one explain ? I am sure its something silly that I am missing.
the C++20 way:
unsigned char* data = new unsigned char[]{ "Hello world\n\t\r\0" };
std::size_t data_size = sizeof("Hello world\n\t\r\0") - 1;
auto sp = std::span(data, data_size );
std::transform( sp.begin(), sp.end(),
std::ostream_iterator<std::string>(std::cout),
[](unsigned char c) -> std::string {
return std::format("{:02X}", int(c));
});
or if you want to store result into string:
std::string result{};
result.reserve(size * 2 + 1);
std::transform( sp.begin(), sp.end(),
std::back_inserter(result),
[](unsigned char c) -> std::string {
return std::format("{:02X}", int(c));
});
Output:
48656C6C6F20776F726C640A090D00
The output of an unsigned char is like the output of a char which obviously does not what the OP expects.
I tested the following on coliru:
#include <iomanip>
#include <iostream>
int main()
{
std::cout << "Output of (unsigned char)0xc0: "
<< std::hex << std::setw(2) << std::setfill('0') << (unsigned char)0xc0 << '\n';
return 0;
}
and got:
Output of (unsigned char)0xc0: 0�
This is caused by the std::ostream::operator<<() which is chosen out of the available operators. I looked on cppreference
operator<<(std::basic_ostream) and
std::basic_ostream::operator<<
and found
template< class Traits >
basic_ostream<char,Traits>& operator<<( basic_ostream<char,Traits>& os,
unsigned char ch );
in the former (with a little bit help from M.M).
The OP suggested a fix: bit-wise And with 0xff which seemed to work. Checking this in coliru.com:
#include <iomanip>
#include <iostream>
int main()
{
std::cout << "Output of (unsigned char)0xc0: "
<< std::hex << std::setw(2) << std::setfill('0') << (0xff & (unsigned char)0xc0) << '\n';
return 0;
}
Output:
Output of (unsigned char)0xc0: c0
Really, this seems to work. Why?
0xff is an int constant (stricly speaking: an integer literal) and has type int. Hence, the bit-wise And promotes (unsigned char)0xc0 to int as well, yields the result of type int, and hence, the std::ostream::operator<< for int is applied.
This is an option to solve this. I can provide another one – just converting the unsigned char to unsigned.
Where the promotion of unsigned char to int introduces a possible sign-bit extension (which is undesired in this case), this doesn't happen when unsigned char is converted to unsigned. The output stream operator for unsigned provides the intended output as well:
#include <iomanip>
#include <iostream>
int main()
{
std::cout << "Output of (unsigned char)0xc0: "
<< std::hex << std::setw(2) << std::setfill('0') << (unsigned)(unsigned char)0xc0 << '\n';
const unsigned char c = 0xc0;
std::cout << "Output of unsigned char c = 0xc0: "
<< std::hex << std::setw(2) << std::setfill('0') << (unsigned)c << '\n';
return 0;
}
Output:
Output of (unsigned char)0xc0: c0
Output of unsigned char c = 0xc0: c0
Live Demo on coliru
I am writing a little program that reads a disk image file in binary and then checks its partition entry tables to display each partition, it's type, start sector and size.
So far it reads the first 16 bytes accurately but the rest of the partition entries are not recognized or have some kind of error.
The result looks like this:
EDIT: The first line of the output is supposed to look like this:
`Partition 0: Type: FAT-16 Start: 63 Size: 518760`
What am I missing? How do I fix the code so that all the partition entries give the appropriate result?
using namespace std;
#include <iostream>
#include <fstream>
struct Partition { char type; int start_sect; int size; } part_entry[4]; // 4 x partition table entry
int main(int argc, char *argv[])
{
//DECLARATIONS
int i, offset = 26, not_exist = 0;
char buf_part_table[64], vol_type[12];
char* diskdata;
int n;
streampos begin, end;
ifstream diskimage;
diskimage.open("Sample_1.dd", ios::in | ios::binary | ios::out);
diskdata = new char[begin];
begin = diskimage.tellg();
diskdata = new char[begin];
diskimage.seekg(446, ios::beg);
diskimage.read(buf_part_table, 64);
for (i = 0; i < 4; i++)
{
part_entry[i].type = *(char*)(buf_part_table + 0x04 + (i * offset));
if (part_entry[i].type == 0) not_exist++;
part_entry[i].start_sect = *(int*)(buf_part_table + 0x08 + (i * offset));
part_entry[i].size = *(int*)(buf_part_table + 0x0C + (i * offset));
switch (part_entry[i].type)
{
case 00: strcpy(vol_type, "NOT-VALID");
break;
case 06: strcpy(vol_type, "FAT-16");
break;
case 07: strcpy(vol_type, "NTFS");
break;
case 0x0B: strcpy(vol_type, "FAT-32");
break;
default: strcpy(vol_type, "NOT-DECODED");
break;
}
cout << "Partition " << i << ":" << " Type:" << vol_type << " Start: " << part_entry[i].start_sect << " Size: " << part_entry[i].size << endl;
}
return 0;
}
You unnecesary made program unreadable and harder to debug.
You can read whole boot sector at once and than display desired content.
Here is my quick example (it does not check if file exists, some may complain it should use memcpy for some fields etc.)
#include <iostream>
#include <fstream>
#include <cstdint>
#include <cstddef>
#include <iomanip>
using namespace std;
struct partition_t {
uint8_t status;
uint8_t start_CHS[3];
uint8_t type;
uint8_t end_CHS[3];
uint32_t start_LBA;
uint32_t size_LBA;
} __attribute__((packed));
struct mbr_t
{
uint8_t bootstrap[446];
partition_t partitions[4];
uint16_t signature;
} __attribute__((packed));
int main(int argc, char *argv[])
{
mbr_t mbr;
ifstream diskimage;
diskimage.open( "/tmp/mbr.dd", ios::in | ios::binary );
diskimage.read( reinterpret_cast<char*>(&mbr), sizeof(mbr) );
diskimage.close();
for( int idx = 0 ; idx < 4 ; idx++ )
{
string bootable = (mbr.partitions[idx].status == 128) ? "yes" : "no";
cout << " bootable : " << setw(5) << bootable <<
" type : " << setw(5) << (int)mbr.partitions[idx].type <<
" start LBA : " << setw(10) << mbr.partitions[idx].start_LBA <<
" size : " << setw(10) << mbr.partitions[idx].size_LBA << endl;
}
return 0;
}
It is easier to read, right?
I am essentially trying to use a union to cast a structure of data pieces with varying bit-widths into a nice clean array of integers. I have written a small program to illustrate my issue.
#include <stdio.h>
#include <iostream.h>
union {
struct {
long blah1;
short blah2;
long blah3;
short blah4;
int blah5;
} data;
int buffer[6];
} db;
int main(int argc, char* argv)
{
db.data.blah1 = 0x1111111111111111;
db.data.blah2 = 0x2222;
db.data.blah3 = 0x3333333333333333;
db.data.blah4 = 0x4444;
db.data.blah5 = 0x55555555;
for(int i=0;i<6;i++) cout << "Word " << i << ": " << std::hex << db.buffer[i] << endl;
}
Output:
Word 0: 11111111
Word 1: 11111111
Word 2: 2222
Word 3: 0
Word 4: 33333333
Word 5: 33333333
Expected Output:
Word 0: 11111111
Word 1: 11111111
Word 2: 33332222
Word 3: 33333333
Word 4: 44443333
Word 5: 55555555
I compiled using gcc version 4.1.2 20080704 (Red Hat 4.1.2-54)
Do I have something formatted incorrectly or am I trying to use this functionality for something other than it was intended? Is there another way to achieve my expected output without having to use bit-wise manipulation and endless shifting?
As pointed out by #happydave and Floris, result could be achieved by using pragma pack with value 1 to stop padding introduced for bit alignment.
#include <stdio.h>
#include <iostream>
using namespace std;
#pragma pack(push,1)
union {
struct {
long blah1;
short blah2;
long blah3;
short blah4;
int blah5;
} data;
int buffer[6];
} db;
#pragma pack(pop)
int main(int argc, char** argv)
{
db.data.blah1 = 0x1111111111111111;
db.data.blah2 = 0x2222;
db.data.blah3 = 0x3333333333333333;
db.data.blah4 = 0x4444;
db.data.blah5 = 0x55555555;
for(int i=0;i<6;i++) cout << "Word " << i << ": " << std::hex << db.buffer[i] << endl;
}
[Rewritten for clarity.]
I need to write and read doubles to and from files, in a format that will always have the same number of characters. The format doesn't need to be human-readable: it just needs to be quick to load (with as little dynamic memory and conversion stuff as possible, file space is important but doesn't matter quite as much).
Is there a standard (or at least safe and reliable) way to get the components of a double so that I can store the signicicand sign and mantissa sign as a '1' or '0' and the significand and mantissa separately in a hex format with a constant length?
Essentially, how can I grab the specific bit/number components from a double? Is it even possible to do this on separate systems (assuming the same OS family such as Windows) or is the standard for the components of doubles not enforced per OS?
I am using MinGW and of course compiling for Windows. I'd like to use the C Standard Library where possible, not the C++ Standard Library. Also, I'd like to avoid other libraries (like Boost) but if there are specific Windows functions then those would help a lot.
The most direct way of doing so would be to open your fstream in binary mode, and then use the write() and read() methods of fstream to read your double to/from the stream:
#include <fstream>
#include <iostream>
int main( int argc, char** argv ) {
std::fstream fp( "foo", std::fstream::in |
std::fstream::out |
std::fstream::trunc |
std::fstream::binary );
double d1, d2;
d1 = 3.14;
fp.write( (char*)&d1, sizeof( d1 ) );
fp.seekg( 0, std::fstream::beg );
fp.read( (char*)&d2, sizeof( d2 ) );
std::cout << "d1 = " << d1 << " d2 = " << d2 << std::endl;
}
Probably you want somthing like this:
#include <iostream>
#include <sstream>
#include <iomanip>
using namespace std;
template <typename T>
string convertToHex(const T &x)
{
char *xc = (char *)&x;
ostringstream s;
for (char *c = xc; c < xc + sizeof(x); ++c)
s << hex << setw(2) << setfill('0') << static_cast<int>(*c) << " ";
return s.str();
}
template <typename T>
void convertFromHex(string s, T &x)
{
char *xc = (char *)&x;
istringstream is(s);
for (char *c = xc; c < xc + sizeof(x); ++c)
{
int tmp;
is >> hex >> tmp;
*c = tmp;
}
}
int main()
{
double a = 10;
string as = convertToHex(a);
cout << "a: " << as << endl;
double b;
convertFromHex(as, b);
cout << "b: " << b << endl;
}
Output:
a: 00 00 00 00 00 00 24 40
b: 10
Here is very very simple example with boost::serializer (http://www.boost.org/doc/libs/1_54_0/libs/serialization/doc/index.html). I am using boost::archive::text_iarchive and boost::archive::text_oarchive, but you can switch it to boost::archive::binary_iarchive and boost::archive::binary_oarchive. Should work.
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <sstream>
#define _USE_MATH_DEFINES
#include <cmath>
using namespace std;
int main()
{
double a = M_PI;
string text;
{
ostringstream textStream;
boost::archive::text_oarchive oa(textStream);
oa << a;
text = textStream.str();
}
cout << "a: " << text << endl;
double b;
{
istringstream textStream(text);
boost::archive::text_iarchive ia(textStream);
ia >> b;
}
cout << "b: " << b << endl;
}
Output:
a: 22 serialization::archive 9 3.1415926535897931
b: 3.14159
Hello
I know it was asked many times but I hadn't found answer to my specific question.
I want to convert only string that contains only decimal numbers:
For example 256 is OK but 256a is not.
Could it be done without checking the string?
Thanks
The simplest way that makes error checking optional that I can think of is this:
char *endptr;
int x = strtol(str, &endptr, 0);
int error = (*endptr != '\0');
In C++ way, use stringstream:
#include <iostream>
#include <string>
#include <sstream>
using namespace std;
int main()
{
stringstream sstr;
int a = -1;
sstr << 256 << 'a';
sstr >> a;
if (sstr.failbit)
{
cout << "Either no character was extracted, or the character can't represent a proper value." << endl;
}
if (sstr.badbit)
{
cout << "Error on stream.\n";
}
cout << "Extracted number " << a << endl;
return 0;
}
An other way using c++ style : We check the number of digits to know if the string was valid or not :
#include <iostream>
#include <sstream>
#include <string>
#include <cmath>
int main(int argc,char* argv[]) {
std::string a("256");
std::istringstream buffer(a);
int number;
buffer >> number; // OK conversion is done !
// Let's now check if the string was valid !
// Quick way to compute number of digits
size_t num_of_digits = (size_t)floor( log10( abs( number ) ) ) + 1;
if (num_of_digits!=a.length()) {
std::cout << "Not a valid string !" << std::endl;
}
else {
std::cout << "Valid conversion to " << number << std::endl;
}
}