I am trying to set a set of bits in a 64 bit int to ones.
As you can see in the loop in main I'm setting bits 40 to 47 to 1 using the setBit function.
for a reason I don't understand bits 16 to 23 are also set to 1 as you can see from the program's output:
0000000011111111000000000000000000000000111111110000000000000000
I couldn't mimic the same behavior on a regular int.
BTW I also tried using a unsigned long long instead of int64_t with the same problem.
What am I missing?
#include <iostream>
#include <cstdint>
using namespace std;
int64_t x = 0;
void setBit(int64_t *num, int index)
{
*num |= (1 << index);
}
bool retreiveBit(int64_t *num, int index)
{
return *num & (1 << index);
}
int main()
{
for (int i = 40; i < 48; ++i)
setBit(&x, i);
for (int i = 0; i < 64; ++i)
{
int digit = retreiveBit(&x, i);
cout << digit;
}
return 0;
}
In the sub-expression:
(1 << index)
the type of the constant 1 is int, so this shift is done in an int. If your int isn't 64 bits wide (it probably isn't), then this shift has undefined behaviour.
You need to use a constant that is at least 64 bits wide:
(1LL << index)
(you need to do this in both the setBit() and retrieveBit() functions).
Related
If I have very large array of bytes and want to find indices of all 1-bits, indices counting from leftmost bit, how do I do this efficiently, probably using SIMD.
(For finding the first 1-bit, see an earlier question. This question produces an array of outputs instead of 1 index.)
Of course I can do following 512-bit non-SIMD version using C++20:
Try it online!
#include <cstdint>
#include <iostream>
#include <bit>
int Find1s512(uint64_t const * p, uint16_t * idxs) {
int rpos = 0;
for (int i = 0; i < 8; ++i) {
uint64_t n = p[i];
while (true) {
int const j = std::countr_zero(n);
if (j >= 64)
break;
idxs[rpos++] = i * 64 + j;
n &= n - 1;
}
}
return rpos;
}
int main() {
uint64_t a[8] = {(1ULL << 17) | (1ULL << 63),
1ULL << 19, 1ULL << 23};
uint16_t b[512] = {};
int const cnt = Find1s512(a, b);
for (int i = 0; i < cnt; ++i)
std::cout << int(b[i]) << " ";
// Printed result: 17 63 83 151
}
And use above 512-bit version as building block to collect 1-bit positions of whole large array.
But I'd like to find out what is the most efficient way to do this, especially using SIMD 128/256/512.
I'm trying to just copy the contents of a 32-bit unsigned int to be used as float. Not casting it, just re-interpreting the integer bits to be used as float. I'm aware memcpy is the most-suggested option for this. However, when I do memcpy from uint_32 to float, and print out the individual bits, I see they are quite different.
Here is my code snippet:
#include <iostream>
#include <stdint.h>
#include <cstring>
using namespace std;
void print_bits(unsigned n) {
unsigned i;
for(i=1u<<31;i > 0; i/=2)
(n & i) ? printf("1"): printf("0");
}
union {
uint32_t u_int;
float u_float;
} my_union;
int main()
{
uint32_t my_int = 0xc6f05705;
float my_float;
//Method 1 using memcpy
memcpy(&my_float, &my_int, sizeof(my_float));
//Print using function
print_bits(my_int);
printf("\n");
print_bits(my_float);
//Print using printf
printf("\n%0x\n",my_int);
printf("%0x\n",my_float);
//Method 2 using unions
my_union.u_int = 0xc6f05705;
printf("union int = %0x\n",my_union.u_int);
printf("union float = %0x\n",my_union.u_float);
return 0;
}
Outputs:
11000110111100000101011100000101
11111111111111111000011111010101
c6f05705
400865
union int = c6f05705
union float = 40087b
Can someone explain what's happening? I expected the bits to match. Didn't work with a union either.
You need to change the function print_bits to
inline
int is_big_endian(void)
{
const union
{
uint32_t i;
char c[sizeof(uint32_t)];
} e = { 0x01000000 };
return e.c[0];
}
void print_bits( const void *src, unsigned int size )
{
//Check for the order of bytes in memory of the compiler:
int t, c;
if (is_big_endian())
{
t = 0;
c = 1;
}
else
{
t = size - 1;
c = -1;
}
for (; t >= 0 && t <= size - 1; t += c)
{ //print the bits of each byte from the MSB to the LSB
unsigned char i;
unsigned char n = ((unsigned char*)src)[t];
for(i = 1 << (CHAR_BIT - 1); i > 0; i /= 2)
{
printf("%d", (n & i) != 0);
}
}
printf("\n");
}
and call it like this:
int a = 7;
print_bits(&a, sizeof(a));
that way there won't be any type conversion when you call print_bits and it would work for any struct size.
EDIT: I replaced 7 with CHAR_BIT - 1 because the size of byte can be different than 8 bits.
EDIT 2: I added support for both little endian and big endian compilers.
Also as #M.M suggested in the comments if you want to you can use template to make the function call be: print_bits(a) instead of print_bits(&a, sizeof(a))
I want to make a simple program that will take number of bits from the input and as an output show binary numbers, written on given bits (example: I type 3: it shows 000, 001, 010, 011, 100, 101, 110, 111).
The only problem I get is in the second for-loop, when I try to assign variable in bitset<bits>, but it wants constant number.
If you could help me find the solution I would be really greatful.
Here's the code:
#include <iostream>
#include <bitset>
#include <cmath>
using namespace std;
int main() {
int maximum_value = 0,x_temp=10;
//cin >> x_temp;
int const bits = x_temp;
for (int i = 1; i <= bits; i++) {
maximum_value += pow(2, bits - i);
}
for (int i = maximum_value; i >= 0; i--)
cout << bitset<bits>(maximum_value - i) << endl;
return 0;
}
A numeric ("non-type", as C++ calls it) template parameter must be a compile-time constant, so you cannot use a user-supplied number. Use a large constant number (e.g. 64) instead. You need another integer that will limit your output:
int x_temp = 10;
cin >> x_temp;
int const bits = 64;
...
Here 64 is some sort of a maximal value you can use, because bitset has a constructor with an unsigned long long argument, which has 64 bits (at least; may be more).
However, if you use int for your intermediate calculations, your code supports a maximum of 14 bits reliably (without overflow). If you want to support more than 14 bits (e.g. 64), use a larger type, like uint32_t or uint64_t.
A problem with holding more bits than needed is that the additional bits will be displayed. To cut them out, use substr:
cout << bitset<64>(...).to_string().substr(64 - x_temp);
Here to_string converts it to string with 64 characters, and substr cuts the last characters, whose number is x_temp.
You have to define const int bits=10; as a global constant :
#include <iostream>
#include <math.h>
#include <bitset>
using namespace std;
const unsigned bits=10;
int main() {
int maximum_value = 0,x_temp=10;
for (int i = 1; i <= bits; i++) {
maximum_value += pow(2, bits - i);
}
for (int i = maximum_value; i >= 0; i--)
cout << bitset<bits>(maximum_value - i) << endl;
return 0;
}
I've written a program, which shows binary representation of a particular integer value, using bitwise operators in C++. For even numbers it works as I expect, but for odd it adds 1 to the left of the binary representation.
#include <iostream>
using std::cout;
using std::cin;
using std::endl;
int main()
{
unsigned int a = 128;
for (int i = sizeof(a) * 8; i >= 0; --i) {
if (a & (1UL << i)) { // if i-th digit is 1
cout << 1; // Output 1
}
else {
cout << 0; // Otherwise output 0
}
}
cout << endl;
system("pause");
return 0;
}
Results:
For a = 128: 000000000000000000000000010000000,
For a = 127: 100000000000000000000000001111111
You might prefer CHAR_BIT macro instead of raw 8 (#include <climits>).
Consider your start value! Assuming unsigned int having 32 bit, your start value is int i = 4*8, so 1U << i shifts the value out of range. This is undefined behaviour and could result in anything, obviously, your specific compiler or hardware shifts %32, thus you get an initial value & 1, resulting in the unexpected leading 1... Did you notice that you actually printed out 33 digits instead of only 32?
The problem is here:
for (int i = sizeof(a) * 8; i >= 0; --i) {
it should be:
for (int i = sizeof(a) * 8; i-- ; ) {
http://coliru.stacked-crooked.com/a/8cb2b745063883fa
I made a function that converts numbers to binary. For some reason it's not working. It gives the wrong output. The output is in binary format, but it always gives the wrong result for binary numbers that end with a zero(at least that's what I noticed..)
unsigned long long to_binary(unsigned long long x)
{
int rem;
unsigned long long converted = 0;
while (x > 1)
{
rem = x % 2;
x /= 2;
converted += rem;
converted *= 10;
}
converted += x;
return converted;
}
Please help me fix it, this is really frustrating..
Thanks!
Use std::bitset to do the translation:
#include <iostream>
#include <bitset>
#include <limits.h>
int main()
{
int val;
std::cin >> val;
std::bitset<sizeof(int) * CHAR_BIT> bits(val);
std::cout << bits << "\n";
}
You're reversing the bits.
You cannot use the remains of x as an indicator when to terminate the loop.
Consider e.g. 4.
After first loop iteration:
rem == 0
converted == 0
x == 2
After second loop iteration:
rem == 0
converted == 0
x == 1
And then you set converted to 1.
Try:
int i = sizeof(x) * 8; // i is now number of bits in x
while (i>0) {
--i;
converted *= 10;
converted |= (x >> i) & 1;
// Shift x right to get bit number i in the rightmost position,
// then and with 1 to remove any bits left of bit number i,
// and finally or it into the rightmost position in converted
}
Running the above code with x as an unsigned char (8 bits) with value 129 (binary 10000001)
Starting with i = 8, size of unsigned char * 8. In the first loop iteration i will be 7. We then take x (129) and shift it right 7 bits, that gives the value 1. This is OR'ed into converted which becomes 1. Next iteration, we start by multiplying converted with 10 (so now it's 10), we then shift x 6 bits right (value becomes 2) and ANDs it with 1 (value becomes 0). We OR 0 with converted, which is then still 10. 3rd-7th iteration do the same thing, converted is multiplied with 10 and one specific bit is extracted from x and OR'ed into converted. After these iterations, converted is 1000000.
In the last iteration, first converted is multiplied with 10 and becomes 10000000, we shift x right 0 bits, yielding the original value 129. We AND x with 1, this gives the value 1. 1 is then OR'ed into converted, which becomes 10000001.
You're doing it wrong ;)
http://www.bellaonline.com/articles/art31011.asp
The remain of the first division is the rightmost bit in the binary form, with your function it becomes the leftmost bit.
You can do something like this :
unsigned long long to_binary(unsigned long long x)
{
int rem;
unsigned long long converted = 0;
unsigned long long multiplicator = 1;
while (x > 0)
{
rem = x % 2;
x /= 2;
converted += rem * multiplicator;
multiplicator *= 10;
}
return converted;
}
edit: the code proposed by CygnusX1 is a little bit more efficient, but less comprehensive I think, I'll advise taking his version.
improvement : I changed the stop condition of the while loop, so we can remove the line adding x at the end.
You are actually reversing the binary number!
to_binary(2) will return 01, instead of 10. When initial 0es are truncated, it will look the same as 1.
how about doing it this way:
unsigned long long digit = 1;
while (x>0) {
if (x%2)
converted+=digit;
x/=2;
digit*=10;
}
What about std::bitset?
http://www.cplusplus.com/reference/stl/bitset/to_string/
If you want to display you number as binary, you need to format it as a string. The easiest way to do this that I know of is to use the STL bitset.
#include <bitset>
#include <iostream>
#include <sstream>
typedef std::bitset<64> bitset64;
std::string to_binary(const unsigned long long int& n)
{
const static int mask = 0xffffffff;
int upper = (n >> 32) & mask;
int lower = n & mask;
bitset64 upper_bs(upper);
bitset64 lower_bs(lower);
bitset64 result = (upper_bs << 32) | lower_bs;
std::stringstream ss;
ss << result;
return ss.str();
};
int main()
{
for(int i = 0; i < 10; ++i)
{
std::cout << i << ": " << to_binary(i) << "\n";
};
return 1;
};
The output from this program is:
0: 0000000000000000000000000000000000000000000000000000000000000000
1: 0000000000000000000000000000000000000000000000000000000000000001
2: 0000000000000000000000000000000000000000000000000000000000000010
3: 0000000000000000000000000000000000000000000000000000000000000011
4: 0000000000000000000000000000000000000000000000000000000000000100
5: 0000000000000000000000000000000000000000000000000000000000000101
6: 0000000000000000000000000000000000000000000000000000000000000110
7: 0000000000000000000000000000000000000000000000000000000000000111
8: 0000000000000000000000000000000000000000000000000000000000001000
9: 0000000000000000000000000000000000000000000000000000000000001001
If your purpose is only display them as their binary representation, then you may try itoa or std::bitset
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <bitset>
using namespace std;
int main()
{
unsigned long long x = 1234567890;
// c way
char buffer[sizeof(x) * 8];
itoa (x, buffer, 2);
printf ("binary: %s\n",buffer);
// c++ way
cout << bitset<numeric_limits<unsigned long long>::digits>(x) << endl;
return EXIT_SUCCESS;
}
void To(long long num,char *buff,int base)
{
if(buff==NULL) return;
long long m=0,no=num,i=1;
while((no/=base)>0) i++;
buff[i]='\0';
no=num;
while(no>0)
{
m=no%base;
no=no/base;
buff[--i]=(m>9)?((base==16)?('A' + m - 10):m):m+48;
}
}
Here is a simple solution.
#include <iostream>
using namespace std;
int main()
{
int num=241; //Assuming 16 bit integer
for(int i=15; i>=0; i--) cout<<((num >> i) & 1);
cout<<endl;
for(int i=0; i<16; i++) cout<<((num >> i) & 1);
cout<<endl;
return 0;
}