I'm trying to set bits in a binary string. I initially have an empty string which needs to set the give bit(i) in the string(s).
for the given example, the output should be 0x3001 as:
pos: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
bit: 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1
^ ^
MSB LSB
Which, in hex is 3001.
#include<iostream>
#include<string>
#include<sstream>
#include<iomanip>
using namespace std;
void generate_string(string& s,int i){
int sl = s.length();
int x = i/8;
std::stringstream m(s, ios_base::app);
if(sl<x){
for(int j = x-sl;j>=0;j--){
m<<'\x00';
}
}
s = m.str();
s[x] |= 1 << i%8;
}
int main(){
string s = "";
generate_string(s,15);
generate_string(s,2);
generate_string(s,3);
for(int i=0;i<s.length();i++)
cout<<hex<<(int)s[i];
return 0;
}
But this program is not showing any output.
It's actually much more simpler than you think. The only complicated part is to calculate the bit number to be set in the byte.
Oh, and why use a string for this? Why not a vector?
Here's my solution, using std::vector instead:
void set_bit(std::vector<uint8_t>& bits, unsigned bit)
{
static unsigned const bit_count = 8; // Should really use std::numeric_limits<uint8_t>::digits
unsigned index = bit / bit_count;
while (index + 1 > bits.size())
bits.push_back(0);
// Since the bit-numbers are reversed from what's "common",
// we need a little more complex calculation here.
// bit % bit_count to get the "normal" bit number
// bit_count - bit % bit_count to reverse the bit numbering
// Then -1 to get a number between 0 and 7
bits[index] |= 1 << (bit_count - bit % bit_count - 1);
}
You can use a similar solution using std::string too, but I don't see why.
Maybe like this?
#include<iostream>
#include<string>
using namespace std;
void set_bit(string& s,int i){
auto bits = ((i + 7) / 8) * 8;
if (bits > s.length())
{
auto diff = bits - s.length();
s += std::string(diff, '0');
}
s[i] = '1';
}
int main(){
string s;
set_bit(s, 2);
set_bit(s, 3);
set_bit(s, 15);
cout << s << endl;
return 0;
}
expected output:
0011000000000001
update: attempt 2 :-)
#include<iostream>
#include<iomanip>
#include<string>
using namespace std;
void set_bit(string& s,int i){
auto bytes = (i + 7) / 8;
if (bytes > s.length())
{
auto diff = bytes - s.length();
s += std::string(diff, 0);
}
s[i / 8] |= char(1 << (7-(i%8)));
}
int main(){
string s;
set_bit(s, 2);
set_bit(s, 3);
set_bit(s, 15);
std::cout << "as hex: ";
for (auto c : s) {
cout << hex << setfill('0') << setw(2) << (int(c) & 0xff);
}
cout << endl;
std::cout << "as binary: ";
auto sep = "";
for (auto c : s) {
unsigned char bits = c;
for (unsigned char mask = 0x80 ; mask ; mask >>= 1)
{
cout << sep << ((bits & mask) ? '1' : '0');
sep = " ";
}
}
cout << endl;
return 0;
}
expected output:
as hex: 3001
as binary: 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1
I don't quite understand what the output should be in Your problem, because You are mixing most/least signifanct bits with nibbles order in sample input/output, but I You would like to print number in hex as a string You can do sth like this:
#include <iostream>
#include <string>
#include <sstream>
#include <algorithm>
void feed(std::string& s, int x){
unsigned int mask = 15;
int nibblesInWord = sizeof(void*)*16;
std::stringstream ss;
while(nibblesInWord--){
std::cout << int(x & mask) <<std::endl;
ss << int(x & mask);
x >>= 4;
}
s = ss.str();
std::reverse(s.begin(), s.end());
}
int main(){
std::string s;
feed(s, 99);
std::cout << s <<std::endl;
}
Related
I'm doing practice of bit manipulation in arduino with a 74HC595 shift register.
I would like to create an algorithm that allows the binary digit to perform this way:
1 0 0 0 0 0 0 1
0 1 0 0 0 0 1 0
0 0 1 0 0 1 0 0
.
.
.
1 0 0 0 0 0 0 1
In this type of function the decimal values are: (129,66,36,24,24,36,66,129) and so on in a loop.
How can I perform this type of shifting? I don't have any fluency thinking this type of operation, I have only performed a circular shift with "an algorithm" like:
//my circular shift
myByte = myByte*128 + myByte/2
But I don't know how to perform the output that I showed.
How can I do this? Thanks
For example you can use the following approach
#include <iostream>
#include <iomanip>
#include <limits>
int main()
{
unsigned char b = 0b10000001;
int width = std::numeric_limits<unsigned char>::digits / 2;
for ( int i = 0; i < width; i++ )
{
std::cout << std::hex << static_cast<int>( b ) << " - "
<< std::dec << static_cast<int>( b ) << '\n';
b = ( b & ( 0b1111 << width ) ) >> 1 | ( b & 0b1111 ) << 1;
}
for ( int i = 0; i < width; i++ )
{
std::cout << std::hex << static_cast<int>( b ) << " - "
<< std::dec << static_cast<int>( b ) << '\n';
b = ( b & ( 0b1111 << width ) ) << 1 | ( b & 0b1111 ) >> 1;
}
return 0;
}
The program output is
81 - 129
42 - 66
24 - 36
18 - 24
18 - 24
24 - 36
42 - 66
81 - 129
You're looking for a single operation that can be applied to an 8 bit number and result in the given pattern.
You want
x_(n+1) = f(x_(n))
for all given inputs and outputs. The problem is that there are a few potential inputs that have one of two possible outputs. You want both
36 = f(66)
and
129 = f(66)
This can't be done using only one variable. You can either implement a lookup table for the sequence you want (which is what I suggest). Or you can take two variables, implement circular shifts (in opposite directions) on each, and take the bitwise OR of the results.
uint8_t n1 = 128, n2 = 1;
for(;;)
{
std::cout << n1 | n2 << "\n";
n1 = circular_right_shift(n1);
n2 = circular_left_shift(n2);
}
Noticing that:
129,66,36,24,24,36,66,129 = 128+1; 64+2 ; 32+4; 16+8; 16+8; 32+4; 64+2; 128+1;
I ended up with this code:
int latchPin = 11;
int clockPin = 9;
int dataPin = 12;
int dt = 2000;
uint8_t n1 = 128, n2 = 1;
byte myByte = 0b10000001; //in BIN
void setup() {
Serial.begin(9600);
pinMode(latchPin,OUTPUT);
pinMode(dataPin,OUTPUT);
pinMode(clockPin,OUTPUT);
}
//circular shift to the left
void loop() {
digitalWrite(latchPin,LOW);
shiftOut(dataPin,clockPin,LSBFIRST,myByte);
digitalWrite(latchPin,HIGH);
int i;
myByte = 0b10000001; //restarting the value of 129
Serial.print("BIN: ");
Serial.print(myByte,BIN);
Serial.print(" --> ");
Serial.print("HEX: ");
Serial.print(myByte,HEX);
Serial.print(" --> ");
Serial.print("DEC: ");
Serial.println(myByte,DEC);
delay(200);
for (int i = 0; i < 7; i++) {
Serial.print("i: ");
Serial.println(i);
//int i1 = i+1;
//int myGap = myByte - (pow(2,i)); //no need to round when it's raised to 0;
//int firstpart = (myGap/2);
//int secondpart = 0.5 + pow(2,i1); //because it rounds the number. (i.e --> 1.9999 = 1)
//myByte = firstpart+ secondpart;
myByte = (myByte - (pow(2,i)))/2 + (0.5 + pow(2,i+1));
//Serial.print("firstpart: ");
//Serial.println(firstpart);
//Serial.print("secondpart: ");
//Serial.println(secondpart);
//delay(3000);
Serial.print("BIN: ");
Serial.print(myByte,BIN);
Serial.print(" --> ");
Serial.print("HEX: ");
Serial.print(myByte,HEX);
Serial.print(" --> ");
Serial.print("DEC: ");
Serial.println(myByte,DEC);
digitalWrite(latchPin,LOW);
shiftOut(dataPin,clockPin,LSBFIRST,myByte);
digitalWrite(latchPin,HIGH);
delay(100);
}
//myByte = myByte*2; //shift by right //using MSBFIRTS
//delay(dt);
}
And it works.
I'm having trouble understanding how this divide and conquer function compares data elements:
int main()
{
int arr[] = { -5, -10, -16, -3, -7 };
int largest;
largest = largest_element(arr, 0, sizeof(arr) / sizeof(int));
cout << "Largest element in arr is " << largest << ".\n";
return 0;
}
int largest_element(const int *a, int begin, int past_end)
{
int result;
int mid;
int left_large;
int right_large;
assert(begin >= 0);
assert(begin < past_end);
if (past_end - begin == 1)
result = a[begin];
else {
mid = (begin + past_end) / 2;
left_large = largest_element(a, begin, mid);
right_large = largest_element(a, mid, past_end);
if (left_large >= right_large)
result = left_large;
else
result = right_large;
}
//STOP! draw tree here when begin == 1, and past_end == 2.
return result;
}
I understand that the array is simply divided into smaller sub arrays, and that once the base case is reached, it'll return a[begin]. However, based on my diagram, I don't understand how the values are truly compared if when we have an array of two elements, it simply returns the first value. For example, how will the right most element in an array be compared if it has nothing else to compare with?
Here is my diagram. I have no other diagrams to compare mine with.
I don't understand how the values are truly compared if when we have
an array of two elements, it simply returns the first value.
I am unable to reproduce that finding. When arr has only 2 elements, the function still returns the larger of the two.
Your debugger is an appropriate way to evaluate your code and identify what you are misunderstanding.
However, it might be fun to add diagnostics and allow the code to report it's progress. Here is one technique:
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
#include <cassert>
class T548_t
{
int depth;
public:
T548_t() = default;
~T548_t() = default;
int exec()
{
{
depth = 0; // 0 1 2 3 4
int arr[] = { -5, -10, -16, -3, -7 }; // -3 is largest
uint sz = sizeof(arr) / sizeof(int);
std::cout << hdr(sz);
std::cout << "\n\n Largest element in arr is "
<< largest_element(arr, 0, sz)
<< ".\n" << std::endl;
}
{
depth = 0;
int arr[] = { -10, -5 }; // -5 is largest
uint sz = sizeof(arr) / sizeof(int);
std::cout << hdr(sz);
std::cout << "\n\n Largest element in arr is "
<< largest_element(arr, 0, sz)
<< ".\n" << std::endl;
}
return 0;
}
private: // methods
int largest_element(const int* a, int begin, int past_end)
{
int result;
int mid;
int left_large;
int right_large;
assert(begin >= 0);
assert(begin < past_end);
std::cout << show(a, begin, past_end) << std::flush;
if (past_end - begin == 1)
result = a[begin];
else {
mid = (begin + past_end) / 2;
left_large = largest_element(a, begin, mid);
right_large = largest_element(a, mid, past_end);
if (left_large >= right_large)
result = left_large;
else
result = right_large;
}
//STOP! draw tree here when begin == 1, and past_end == 2.
depth -= 1;
return result;
}
std::string show(const int* a, int b, int e)
{
std::stringstream ss;
depth += 1;
ss << "\n a[" << b << " .. " << e-1 << "] "
<< depth << " ";
if(b > 0)
ss << std::setw(4*b) << " ";
for (int i = b; i < e; ++i)
ss << std::setw(4) << a[i];
return ss.str();
}
std::string hdr(uint sz)
{
std::stringstream ss;
ss << "\n "
<< " b .. e"
<< std::setw(8) << "depth"
<< std::setw(4) << " ";
for (uint i=0; i<sz; ++i)
ss << i << " ";
return ss.str();
}
}; // class T548_t
int main(int, char**)
{
T548_t t548;
return t548.exec();
}
Here is the output:
b .. e depth 0 1 2 3 4
a[0 .. 4] 1 -5 -10 -16 -3 -7
a[0 .. 1] 2 -5 -10
a[0 .. 0] 3 -5
a[1 .. 1] 3 -10
a[2 .. 4] 2 -16 -3 -7
a[2 .. 2] 3 -16
a[3 .. 4] 3 -3 -7
a[3 .. 3] 4 -3
a[4 .. 4] 4 -7
Largest element in arr is -3.
b .. e depth 0 1
a[0 .. 1] 1 -10 -5
a[0 .. 0] 2 -10
a[1 .. 1] 2 -5
Largest element in arr is -5.
something like this is only going to help you if the array is sorted and you can ignore half of the results on every recursion.
just loop through the elements to find the largest number. you have to check every value
How can I make a program that checks if binary representation of a given integer is periodical with period length m >= 2?
For example:
Binary representation of 10 is periodical
10 base 10 = 1010 base 2, periodical with period length 2
Binary representation of 9 is not periodical
9 base 10 = 1001 base 2
Binary representation of 153 is periodical
153 base 10 = 10011001 base 2, periodical with period length 4
Is there any specific algorithm for doing this?
I am working in C++.
What you can do is rotate the bits and check each time if the numbers are equal. i.e. the rotated one and the one you start with
// it is being assumed the size of int is 4bytes
int num = 153;
int temp = num;
int i = 0;
for (i=0; i<(8*sizeof(int))/2; i++){
temp = ((temp >> 1) & LONG_MAX | temp << 31) // rotate the bits by 1
if (!(temp ^ num)){ // check if the numbers are the same
break;
}
}
if (i<(8*sizeof(int))/2)
std::cout << "Period is" << i << "\n";
else
std::cout << "Not periodic";
The complexity is linear in the number of bits.
KMP is the specific algorithm to find the period of any string, of source including the binary representation of a number, which is just a string. It run in O(n) time.
#include <iostream>
#include <algorithm>
using namespace std;
int calc_period(string s) {
vector<int> pre(s.size());
// this condition is keeped true in the for-loop:
// s[0..pre[i]] is this longest suffix of s[0..i] in s[0..i]'s all prefixes (if pre[i] >= 0)
int k = -1;
pre[0] = -1;
for (int i = 1; i < int(s.size()); ++i) {
while (k >= 0 && s[k + 1] != s[i]) {
k = pre[k];
}
if (s[k + 1] == s[i]) {
++k;
}
pre[i] = k;
}
int l = s.size() - 1 - pre[s.size() - 1];
return s.size() % l == 0 ? s.size() / l : 1;
}
string to_binary(long long x) {
string s;
if (x == 0) {
s = "0";
} else {
while (x) {
s += (x & 1) ? "1" : "0";
x >>= 1;
}
reverse(s.begin(), s.end());
}
return s;
}
int main() {
int x;
cin >> x;
cout << calc_period(to_binary(x)) << endl;
return 0;
}
You can try this code out to see how it works. If you want to learn deeper about KMP, read its wiki page or related text books such as "Introduction to Algorithm".
I'm trying to implement a function that writes double to binary file in little endian byte order.
So far I have class BinaryWriter implementation:
void BinaryWriter::open_file_stream( const String& path )
{
// open output stream
m_fstream.open( path.c_str(), std::ios_base::out | std::ios_base::binary);
m_fstream.imbue(std::locale::classic());
}
void BinaryWriter::write( int v )
{
char data[4];
data[0] = static_cast<char>(v & 0xFF);
data[1] = static_cast<char>((v >> 8) & 0xFF);
data[2] = static_cast<char>((v >> 16) & 0xFF);
data[3] = static_cast<char>((v >> 24) & 0xFF);
m_fstream.write(data, 4);
}
void BinaryWriter::write( double v )
{
// TBD
}
void BinaryWriter::write( int v ) was implemented using Sven answer to What is the correct way to output hex data to a file? post.
Not sure how to implement void BinaryWriter::write( double v ).
I tried naively follow void BinaryWriter::write( int v ) implementation but it didn't work. I guess I don't fully understand the implementation.
Thank you guys
You didn't write this, but I'm assuming the machine you're running on is BIG endian, otherwise writing a double is the same as writing an int, only it's 8 bytes.
const int __one__ = 1;
const bool isCpuLittleEndian = 1 == *(char*)(&__one__); // CPU endianness
const bool isFileLittleEndian = false; // output endianness - you choose :)
void BinaryWriter::write( double v )
{
if (isCpuLittleEndian ^ isFileLittleEndian) {
char data[8], *pDouble = (char*)(double*)(&v);
for (int i = 0; i < 8; ++i) {
data[i] = pDouble[7-i];
}
m_fstream.write(data, 8);
}
else
m_fstream.write((char*)(&v), 8);
}
But don't forget generally int is 4 octects and double is 8 octets.
Other problem is static_cast. See this example :
double d = 6.1;
char c = static_cast(d); //c == 6
Solution reinterpret value with pointer :
double d = 6.1;
char* c = reinterpret_cast<char*>(&d);
After, you can use write( Int_64 *v ), which is a extension from write( Int_t v ).
You can use this method with :
double d = 45612.9874
binary_writer.write64(reinterpret_cast<int_64*>(&d));
Don't forget size_of(double) depend of system.
A little program converting doubles to an IEEE little endian representation.
Besides the test in to_little_endian, it should work on any machine.
include <cmath>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <limits>
#include <sstream>
#include <random>
bool to_little_endian(double value) {
enum { zero_exponent = 0x3ff };
uint8_t sgn = 0; // 1 bit
uint16_t exponent = 0; // 11 bits
uint64_t fraction = 0; // 52 bits
double d = value;
if(std::signbit(d)) {
sgn = 1;
d = -d;
}
if(std::isinf(d)) {
exponent = 0x7ff;
}
else if(std::isnan(d)) {
exponent = 0x7ff;
fraction = 0x8000000000000;
}
else if(d) {
int e;
double f = frexp(d, &e);
// A leading one is implicit.
// Hence one has has a zero fraction and the zero_exponent:
exponent = uint16_t(e + zero_exponent - 1);
unsigned bits = 0;
while(f) {
f *= 2;
fraction <<= 1;
if (1 <= f) {
fraction |= 1;
f -= 1;
}
++bits;
}
fraction = (fraction << (53 - bits)) & ((uint64_t(1) << 52) - 1);
}
// Little endian representation.
uint8_t data[sizeof(double)];
for(unsigned i = 0; i < 6; ++i) {
data[i] = fraction & 0xFF;
fraction >>= 8;
}
data[6] = (exponent << 4) | fraction;
data[7] = (sgn << 7) | (exponent >> 4);
// This test works on a little endian machine, only.
double result = *(double*) &data;
if(result == value || (std::isnan(result) && std::isnan(value))) return true;
else {
struct DoubleLittleEndian {
uint64_t fraction : 52;
uint64_t exp : 11;
uint64_t sgn : 1;
};
DoubleLittleEndian little_endian;
std::memcpy(&little_endian, &data, sizeof(double));
std::cout << std::hex
<< " Result: " << result << '\n'
<< "Fraction: " << little_endian.fraction << '\n'
<< " Exp: " << little_endian.exp << '\n'
<< " Sgn: " << little_endian.sgn << '\n'
<< std::endl;
std::memcpy(&little_endian, &value, sizeof(value));
std::cout << std::hex
<< " Value: " << value << '\n'
<< "Fraction: " << little_endian.fraction << '\n'
<< " Exp: " << little_endian.exp << '\n'
<< " Sgn: " << little_endian.sgn
<< std::endl;
return false;
}
}
int main()
{
to_little_endian(+1.0);
to_little_endian(+0.0);
to_little_endian(-0.0);
to_little_endian(+std::numeric_limits<double>::infinity());
to_little_endian(-std::numeric_limits<double>::infinity());
to_little_endian(std::numeric_limits<double>::quiet_NaN());
std::uniform_real_distribution<double> distribute(-100, +100);
std::default_random_engine random;
for (unsigned loop = 0; loop < 10000; ++loop) {
double value = distribute(random);
to_little_endian(value);
}
return 0;
}
I wonder how to reverse something like this. So having a mask where auto mask = 1ULL << 20; how to get 20 out from mask?
Loop-free
Many years ago when I was writing a bit-wise arithmetic for a chess engine, I found a fast implementation which is useful for your requirement, it's loop-free. This method will return the position of the first 1-bit from right-to-left (Least Significant Bit):
inline unsigned int lsb(unsigned long long value)
{
if (!value)
return -1;
value &= -value;
unsigned int lsb = (unsigned) value | (unsigned) (value >> 32);
return (((((((((((unsigned) (value >> 32) != 0) << 1)
+ ((lsb & 0xffff0000) != 0)) << 1)
+ ((lsb & 0xff00ff00) != 0)) << 1)
+ ((lsb & 0xf0f0f0f0) != 0)) << 1)
+ ((lsb & 0xcccccccc) != 0)) << 1)
+ ((lsb & 0xaaaaaaaa) != 0);
}
int main()
{
unsigned long long x = 1ULL<<20;
cout << lsb(x) << endl;
}
Output
20
I think, I had found it here.
Using log:
#include <iostream>
#include <cmath>
int main() {
auto mask = 1ULL << 20;
std::cout << log2(mask) << std::endl;
// edit out: std::cout << log(mask) / log(2) << std::endl;
return 0;
}
or loop and shift:
#include <iostream>
int main() {
auto mask = 1ULL << 20;
for (unsigned int c = 0; c < sizeof(mask) * 8 && mask; c++) {
mask >>= 1;
if (mask == 0)
std::cout << c << std::endl;
}
return 0;
}
If it's a 64-bit mask, you can compute it modulo 67 and do a table lookup.
To wit:
static int table[67] = {
-1, 0, 1,39, 2,15,40,23, 3,12,
16,59,41,19,24,54, 4,-1,13,10,
17,62,60,28,42,30,20,51,25,44,
55,47, 5,32,-1,38,14,22,11,58,
18,53,63, 9,61,27,29,50,43,46,
31,37,21,57,52, 8,26,49,45,36,
56, 7,48,35, 6,34,33};
int unmask(unsigned long long ull) {
return table[ull % 67];
}
//first if you want to make sure only 1 bit is "on" you can do that:
if ((mask & mask-1) != 0)
{
//you have more than 1 bit "on", deal with it...
}
//finding which bit is "on" can be achieve in a loop
int count 0;
while (mask > 1)
{
mask>>=1;
count++;
}
//At this point count will have the required value (20 in your example)
Option 1: iterate
while (mask && !(mask & 1)) { mask>>=1; count++; }
Option 2: iterate multiple bits at a time:
unsigned long long a=0xFFFFFFFFULL; int b=32;
while (mask>1) {
if (!(mask & a)) { count+=b; mask>>=b; }
b>>=1; mask>>=b;
}
Option 3: Convert the mask to double or float and extract the exponent.
union {
struct {
int mantissa:23;
int exp:7;
int sign:1;
} s;
float f;
} u = { (float) mask };
return u.s.exp + 1;
A simple loop will be quite okay:
for (int bit = 0; bit < sizeof(mask) * 8; bit++)
{
if ((1ULL << bit) & mask)
std::cout << "Bit " << bit << " is set in the mask\n";
}
How about a TMP solution:
#include <iostream>
template < unsigned long long MASK >
struct MaskIndex
{
enum { i = MaskIndex < MASK / 2 >::i + 1 };
};
template <>
struct MaskIndex < 1 >
{
enum { i = 0 };
};
int main()
{
const unsigned long long mask = 1ULL << 20;
std::cout << MaskIndex < mask >::i << std::endl;
return ( 0 );
}
You can try this..
if((1ULL<<20)&mask) {
cout << "20th bit is set";
}