I have an uint32_t variable and I want to modify randombly the first 10 less significant bits(0-9) and then,still randomly, I want to modify bits from 10th to 23th. I wrote this simple program in C++ and it works for the first 10 bits but not for the others. I can't understand why
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <iostream>
#include <math.h>
using namespace std;
void printuint(uint32_t value);
int main(){
uint32_t initval=0xFFFFFFFF;
uint32_t address;
uint32_t value;
uint32_t final;
address=rand()%1024;
address<<=23;
printf("address \n");
printuint(address);
printf("final\n");
final = (initval & address);
printuint(final);
return 0;
}
void printuint (uint32_t value){
while (value) {
printf("%d", value & 1);
value >>= 1;
}
cout<<endl;
}
Adding this
value = rand() % 16384;
printuint(value);
and modifing final = (initval & address) & value;
Here's an example of flipping random bits:
int main(void)
{
srand(time());
unsigned int value = 0;
for (unsigned int iterations = 0;
iterations < 10;
++iterations)
{
unsigned int bit_position_to_change = rand() % sizeof(unsigned int);
unsigned int bit_value = 1 << bit_position_to_change;
value = value ^ bit_value; // flip the bit.
std::cout << "Iteration: " << iterations
<< ", value: 0x" << hex << value
<< "\n";
}
return EXIT_SUCCESS;
}
The exclusive-OR function, represented by operator ^, is good for flipping bits.
Another method is to replace bits:
unsigned int bit_pattern;
unsigned int bit_mask; // contains a 1 bit in each position to replace.
value = value & ~bit_mask; // Clear bits using the mask
value = value | bit_pattern; // Put new bit pattern in place.
Sorry I solved my problem with more patience.
What I meant to do is this:
uint32_t initval;
uint32_t address(1023);
bitset<32> bits(address);
cout << bits.to_string() << endl;
uint32_t value(16383);
value<<=10;
bitset<32> bitsvalue(value);
cout << bitsvalue.to_string() << endl;
initval = address | value;
bitset<32> bitsinit(initval);
cout << bitsinit.to_string() << endl;
return 0;
Related
i am writing program for conversion of decimal to binary but answer i am getting is not correct i had checked it multiple times but couldn't make it.
`
#include<iostream>
#include<math.h>
using namespace std;
int decitobin(int n){
int ans=0;
int i=0;
while(n!=0){
int bit=n&1;
ans=((bit * pow(10,i))+ans);
n=n>>1;
i++;
}
return ans;
}
int main(){
int n;
cin>>n;
if(n<0){
n=n*(-1);
int newans=decitobin(n);
//1stcomp
newans=(~newans);
newans=newans+1;
cout<<newans<<endl;
}
else{
cout<<decitobin(n);
}
}
`
i am getting output
100 for 5,99 for 4
and -109 for -6
i had checked each line make it match with the solution but could not figure it out
Note in C++ there is an easier way (though that probably will not be what your teacher asked for)
#include <bitset>
#include <iostream>
int main()
{
std::size_t value{ 112ul };
std::bitset<8> bits{ value };
std::cout << bits;
return 0;
}
Another way of doing it in code without even needing base 10 logic.
Just to show you numbers in memory are already in binary format.
Often in dealing with binary data you will need masks and shift operations.
#include <array>
#include <iostream>
auto get_number_of_bits(int value)
{
std::size_t n{ 1ul };
value >>= 1;
while (value != 0)
{
++n;
value >>= 1;
}
return n;
}
// note value will already be a binary number in memory
// we just need to "walk" over all the bits and
// insert a '0' or '1' to the string
std::string to_bin(const int value)
{
// calculate the number of bits present in the number
const auto number_of_bits{ get_number_of_bits(value) };
// allocate a string to hold the correct/minimal number of bits in the output
std::string string(number_of_bits,0);
int mask{ 0x01 << (number_of_bits - 1ul) }; // select which bit we want from number
// loop over the bits
for (std::size_t n{ 0ul }; n < number_of_bits; ++n)
{
string[n] = (value & mask) ? '1' : '0'; // test if bit is set if so insert a 1 otherwise a 0
mask >>= 1;
}
return string;
}
int main()
{
std::cout << to_bin(5) << "\n";
std::cout << to_bin(12345) << "\n";
return 0;
}
I am currently working on a Hash function and need to interpret a 4 byte string as a 4 byte integer number. Specifically I want every char value of the string interpreted as part of the integer number.
You can just copy the 4 bytes into a 32 bit unsigned integer variable to be able to interpret it as a 4 byte integer:
#include <cstdint>
#include <cstring>
#include <iostream>
#include <string>
std::uint32_t foo(const std::string& str) {
if(str.size() != 4) throw std::runtime_error("wrong string length");
std::uint32_t rv;
std::memcpy(&rv, str.c_str(), 4);
return rv;
}
int main() {
std::cout << (1 + 256 + 65536 + 16777216) << '\n'; // 16843009
std::cout << foo("\001\001\001\001") << '\n'; // 16843009
// The result is platform dependent, so these are possible results:
std::cout << foo("\004\003\002\001") << '\n'; // 16909060 or 67305985
std::cout << foo("\001\002\003\004") << '\n'; // 16909060 or 67305985
}
This function takes a string and interprets its char values as a 4 byte integer number.
Only tested strings with a maximum length of 4.
uint32_t buffToInteger( std::string buffer )
{
uint32_t b = 0;
for ( uint64_t i = 0; i < buffer.length(); i++ )
{
b |= static_cast< unsigned char >(buffer.at( i ) << (24 - (4 - buffer.length() + i) * 8));
}
return b;
}
First of all, I want to clarify that this question is different from the questions:
How to store a 64 bit integer in two 32 bit integers and convert back again
Is it possible to store 2 32-bit values in one long int variable?
How to combine two 32-bit integers into one 64-bit integer?
That this question is store and use, which mean I can do this
int64_t score = make_score(-15, 15);
score += make_score(-5, 5); //I can use (add, subtract) the score
int32_t a = get_a(score);
assert(a == -20); //-15 -5 = -20
int32_t b = get_b(score);
assert(b == 20);//15 + 5= 20
This is achievable for two 16-bit int in one 32-bit int (Stockfish did this):
/// Score enum stores a middlegame and an endgame value in a single integer (enum).
/// The least significant 16 bits are used to store the middlegame value and the
/// upper 16 bits are used to store the endgame value. We have to take care to
/// avoid left-shifting a signed int to avoid undefined behavior.
enum Score : int { SCORE_ZERO };
constexpr Score make_score(int mg, int eg) {
return Score((int)((unsigned int)eg << 16) + mg);
}
/// Extracting the signed lower and upper 16 bits is not so trivial because
/// according to the standard a simple cast to short is implementation defined
/// and so is a right shift of a signed integer.
inline Value eg_value(Score s) {
union { uint16_t u; int16_t s; } eg = { uint16_t(unsigned(s + 0x8000) >> 16) };
return Value(eg.s);
}
inline Value mg_value(Score s) {
union { uint16_t u; int16_t s; } mg = { uint16_t(unsigned(s)) };
return Value(mg.s);
}
I'm trying to upgrade mg and eg from int16_t to int32_t but I can't figure out how to do it, I always have trouble when ScoreA + ScoreB ruin the eg and mg inside the Score.
Here is what I tried and failed:
enum Score : int64_t { SCORE_ZERO };
constexpr Score make_score(int mg, int eg) {
return Score((int)((uint64_t)eg << 32) + mg);
}
inline Value eg_value(Score s) {
union { uint32_t u; int32_t s; } eg = { uint32_t(unsigned(s + 0x80000000) >> 32) };
return Value(eg.s);
}
inline Value mg_value(Score s) {
union { uint32_t u; int32_t s; } mg = { uint32_t(unsigned(s)) };
return Value(mg.s);
}
Use memcpy.
As the comment in the original solution pointed out, this kind of bit manipulations are a minefield of potential undefined behavior. memcpy allows you to get rid of those and is well understood by modern compilers, so it will still result in efficient machine code.
enum Score : int64_t { SCORE_ZERO };
enum Value : int32_t { FORTYTWO };
inline Score make_score(int32_t mg, int32_t eg) {
int64_t combined;
std::memcpy(&combined, &eg, 4);
std::memcpy(reinterpret_cast<char*>(&combined) + 4, &mg, 4);
return Score(combined);
}
inline Value eg_value(Score s) {
int32_t eg;
std::memcpy(&eg, &s, 4);
return Value(eg);
}
inline Value mg_value(Score s) {
int32_t mg;
std::memcpy(&mg, reinterpret_cast<char*>(&s) + 4, 4);
return Value(mg);
}
Try it on godbolt.
The problem is that you still have some "int" and "unsigned" keywords left that still convert into the 32 bit version. So replace each "int" with "int64_t" and each "unsigned" with "uint64_t" and it should work as expected.
This can be different approach for this question
#include<iostream>
#include<cstdint>
#include<bitset>
using namespace std;
int main()
{
bitset<32> bit32[2] ={ 45 ,-415152545 };
bitset<64> bit64;
// store in 64 bit varibale
int index=0;
int position=0;
for(int i=0;i<64;i++)
{
bit64[i] =bit32[index][i-position];
if(i==31)
{ index = 1; position=32; }
}
// reset 32 bit container ,index and position
bit32[2] ={0};
index=0;
position=0;
// fetching data in 32 bit container from 64 bit and assign it into a and b .
int32_t a;
int32_t b;
for(int i=0;i<64;i++)
{
bit32[index][i-position] = bit64[i];
if(i==31)
{ index = 1; position=32; }
}
a = bit32[0].to_ulong();
b = bit32[1].to_ulong();
cout<<a<<" "<<b;
}
You could use union as well:
#include <stdint.h>
#include <iostream>
union Score {
int64_t u64;
int32_t u32[2];
Score() : u64(0) {}
Score(int64_t v) : u64(v) {}
Score(int32_t a, int32_t b): u32{a, b} {}
Score & operator=(Score const & original) { if(&original != this) { u64 = original.u64; } return *this; }
int32_t get_a() {return u32[0];}
int32_t get_b() {return u32[1];}
int64_t get() {return u64;}
Score operator+(Score const & other) {
return Score(u32[0] + other.u32[0], u32[1] + other.u32[1]);
}
Score & operator+=(Score const & other) {
u32[0] += other.u32[0];
u32[1] += other.u32[1];
return *this;
}
};
int main() {
Score v(-15, 15);
std::cout << "The size is: " << sizeof(Score) << " Bytes" << std::endl;
std::cout << "A is: " << v.get_a() << std::endl;
std::cout << "B is: " << v.get_b() << std::endl;
std::cout << "adding -5, +5" << std::endl;
Score v1 = v + Score(-5, 5);
std::cout << "A is: " << v1.get_a() << std::endl;
std::cout << "B is: " << v1.get_b() << std::endl;
std::cout << "adding -10, +10" << std::endl;
v += Score(-10, 10);
std::cout << "A is: " << v.get_a() << std::endl;
std::cout << "B is: " << v.get_b() << std::endl;
return 0;
}
Output:
The size is: 8 Bytes
A is: -15
B is: 15
adding -5, +5
A is: -20
B is: 20
adding -10, +10
A is: -25
B is: 25
It's easy.
int64_t value;
int32_t* value1 = (int32_t*)&value;
int32_t* value2 = (int32_t*)&value + 1;
Example:
#include <cstdint>
int main() {
int64_t value;
int32_t* value1 = (int32_t*)&value;
int32_t* value2 = (int32_t*)&value + 1;
*value1 = 10; // Example
*value2 = 20;
return 0;
}
I am a beginner starting out in c++ and I am trying to turn a decimal byte into a binary number. However, there is something wrong with my syntax or logic but I cannot figure it out. When trying to debug, I believe the error is around userValue5 but I'm not sure why. Any tips are appreciated and I am using VS2015.
#include "stdafx.h"
#include <iostream>
#include <stdint.h>
#include <cmath>
//finds where each column is a 0 or a 1
int binaryDigit(uint16_t y, uint16_t power)
{
if ((y / (pow(2, power))) > 1)
return 1;
else
return 0;
}
int difference(uint16_t y, int x, uint16_t power)
{
if (x == 1)
return y - pow(2, power);
else
return y;
}
//takes a decimal byte and turns it into binary
int main()
{
using namespace std;
cout << "Please insert a number between 0 and 255 so that I can convert it to binary: ";
uint16_t userValue(0), power(7);
cin >> userValue;
int firstDigit = binaryDigit(userValue, power);
uint16_t userValue2 = difference(userValue, firstDigit, power);
--power;
int secondDigit = binaryDigit(userValue2, power);
uint16_t userValue3 = difference(userValue2, secondDigit, power);
--power;
int thirdDigit = binaryDigit(userValue3, power);
uint16_t userValue4 = difference(userValue3, thirdDigit, power);
--power;
int fourthDigit = binaryDigit(userValue4, power);
uint16_t userValue5 = difference(userValue4, thirdDigit, power);
--power;
int fifthDigit = binaryDigit(userValue5, power);
uint16_t userValue6 = difference(userValue5, thirdDigit, power);
--power;
int sixthDigit = binaryDigit(userValue6, power);
uint16_t userValue7 = difference(userValue6, thirdDigit, power);
--power;
int seventhDigit = binaryDigit(userValue7, power);
uint16_t userValue8 = difference(userValue7, thirdDigit, power);
--power;
int eigthDigit = binaryDigit(userValue8, power);
cout << "The number " << userValue << " in binary is ";
cout << firstDigit << secondDigit << thirdDigit << fourthDigit << " " << fifthDigit << sixthDigit << seventhDigit << eigthDigit << endl;
return 0;
}
bitset is your friend.
#include <bitset>
#include <iostream>
using namespace std;
int main()
{
int userValue = 0;
cout << "Please insert a number between " << INT_MIN << " and " << INT_MAX << " so that I can convert it to binary: ";
cin >> userValue;
cout << bitset<32>(userValue) << endl;
return 0;
}
However, if you want to convert it to a string, you'll probably need stringstream:
stringstream ss;
ss << bitset<32>(userValue);
string str = ss.str();
I figured it out. When I copied and pasted the same code to save time, I forgot to edit the arguments so that's why it wasn't working properly. I fixed it and it works great. Thank you to everyone who commented and posted things like the table of exponents and the bit masking. I've learned a lot and cannot wait to write more programs.
bitset is the way to go, but in general, maybe this is more helpful...
std::string intToBinaryString(int x)
{
// a useful class that allows easy concatenation
std::stringstream ss;
// create an integer that only has a 1 bit in its top position.
unsigned int mask = 1 << (sizeof(int) * 8 - 1);
// for each bit in x, starting at the top, check to see if its non zero...
for(int i = 0; i <sizeof(int) * 8; ++i)
{
// see if x's (top - i)'th bit is non zero
bool nonZeroBit = x & mask;
// assign the (top - i)'th "bit" of ss to be '1' if non zero and '0' otherwise
ss << (nonZeroBit ? '1' : '0');
// shift the mask down by 1.
mask >>= 1;
}
// What we get on the first iteration is
//
// mask = 10000...0000
// & x = 10101...0010
// -----------------------------
// nonZero = 10000...0000 = true
//
// the second iteration is
//
// mask = 01000...0000
// & x = 10101...0010
// -----------------------------
// nonZero = 00000...0000 = false
//
// the third iteration is
//
// mask = 00100...0000
// & x = 10101...0010
// -----------------------------
// nonZero = 00100...0000 = true
//
// ...
//
// then we just add these results into ss
// finally, convert it into a normal string
return ss.str();
}
I want to convert an unsigned short value from MSB first to LSB first. Did the below code but its not working. Can someone point the error what i did
#include <iostream>
using namespace std;
int main()
{
unsigned short value = 0x000A;
char *m_pCurrent = (char *)&value;
short temp;
temp = *(m_pCurrent+1);
temp = (temp << 8) | *(unsigned char *)m_pCurrent;
m_pCurrent += sizeof(short);
cout << "temp " << temp << endl;
return 0;
}
Here's a simple but slow implementation:
#include <cstdint>
const size_t USHORT_BIT = CHAR_BIT * sizeof(unsigned short);
unsigned short ConvertMsbFirstToLsbFirst(const unsigned short input) {
unsigned short output = 0;
for (size_t offset = 0; offset < USHORT_BIT; ++offset) {
output |= ((input >> offset) & 1) << (USHORT_BIT - 1 - offset);
}
return output;
}
You could easily template this to work with any numeric type.
What was wrong is that you first assigned the value's MSB to the temp's LSB, then shifted it again to MSB and assigned value's LSB to LSB. Basically, you had interchanged *(m_pCurrent + 1) and *m_pCurrent so the whole thing had no effect.
The simplified code:
#include <iostream>
using namespace std;
int main()
{
unsigned short value = 0x00FF;
short temp = ((char*) &value)[0]; // assign value's LSB
temp = (temp << 8) | ((char*) &value)[1]; // shift LSB to MSB and add value's MSB
cout << "temp " << temp << endl;
return 0;
}