Binary code in 8 bits - c++

So I'm doing a code that converts decimals to binary. I just want my binary number to be in 8 bits. Any suggestions?
#include <stdio.h>
#include <stdlib.h>
using namespace std;
long decimalToBinary(long n);
int main() {
long decimal;
printf("Enter a decimal number: ");
scanf_s("%d", &decimal);
printf("Binary number = ") << decimalToBinary(decimal);
return 0;
}
long decimalToBinary(long n) {
int remainder;
long binary = 0, i = 1;
while (n != 0) {
remainder = n % 2;
n = n / 2;
binary = binary + (remainder*i);
i = i * 10;
}
return binary;
}

long decimalToBinary(long n) {
return n & 0xFF;
}
or (n & 0xFF00) >> 8 or (n & 0xFF0000) >> 16 or (n & 0xFF000000) >> 24. Depends on which 8 bits you want.

Related

How to take input 128 bit unsigned integer in c++

I am new to c++. I want to take input a unsigned 128 bit integer using scanf and print it using printf. As I am new to c++ , I only know these two methods for input output. Can someone help me out?
You could use boost, but this library set must be installed yourself:
#include <boost/multiprecision/cpp_int.hpp>
#include <iostream>
int main()
{
using namespace boost::multiprecision;
uint128_t v = 0;
std::cin >> v; // read
std::cout << v << std::endl; // write
return 0;
}
If you want to get along without boost, you can store the value into two uint64_t as such:
std::string input;
std::cin >> input;
uint64_t high = 0, low = 0, tmp;
for(char c : input)
{
high *= 10;
tmp = low * 10;
if(tmp / 10 != low)
{
high += ((low >> 32) * 10 + ((low & 0xf) * 10 >> 32)) >> 32;
}
low = tmp;
tmp = low + c - '0';
high += tmp < low;
low = tmp;
}
Printing then, however, gets more ugly:
std::vector<uint64_t> v;
while(high | low)
{
uint64_t const pow10 = 100000000;
uint64_t const mod = (((uint64_t)1 << 32) % pow10) * (((uint64_t)1 << 32) % pow10) % pow10;
tmp = high % pow10;
uint64_t temp = tmp * mod % pow10 + low % pow10;
v.push_back((tmp * mod + low) % pow10);
low = low / pow10 + tmp * 184467440737 + tmp * /*0*/9551616 / pow10 + (temp >= pow10);
high /= pow10;
}
std::vector<uint64_t>::reverse_iterator i = v.rbegin();
while(i != v.rend() && *i == 0)
{
++i;
}
if(i == v.rend())
{
std::cout << 0;
}
else
{
std::cout << *i << std::setfill('0');
for(++i; i != v.rend(); ++i)
{
std::cout << std::setw(8) << *i;
}
}
Above solution works up to (including)
340282366920938463463374516198409551615
= 0x ffff ffff ffff ffff ffff ad06 1410 beff
Above, there is an error.
Note: pow10 can be varied, then some other constants need to be adjusted, e. g. pow10 = 10:
low = low / pow10 + tmp * 1844674407370955161 + tmp * 6 / pow10 + (temp >= pow10);
and
std::cout << std::setw(1) << *i; // setw also can be dropped in this case
Increasing results in reducing the maximum number for which printing still works correctly, decreasing raises the maximum. With pow10 = 10, maximum is
340282366920938463463374607431768211425
= ffff ffff ffff ffff ffff ffff ffff ffe1
I don't know where the error for the very highest numbers comes from, yet, possibly some unconsidered overflow. Any suggestions appreciated, then I'll improve the algorithm. Until then, I'd reduce pow10 to 10 and introduce a special handling for the highest 30 failing numbers:
std::string const specialValues[0] = { /*...*/ };
if(high == 0xffffffffffffffff && low > 0xffffffffffffffe1)
{
std::cout << specialValues[low - 0xffffffffffffffe2];
}
else
{
/* ... */
}
So at least, we can handle all valid 128-bit values correctly.
You can try from_string_128_bits and to_string_128_bits with 128 bits unsigned integers in C :
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
__uint128_t from_string_128_bits(const char *str) {
__uint128_t res = 0;
for (; *str; res = res * 10 + *str++ - '0');
return res;
}
static char *to_string_128_bits(__uint128_t num) {
__uint128_t mask = -1;
size_t a, b, c = 1, d;
char *s = malloc(2);
strcpy(s, "0");
for (mask -= mask / 2; mask; mask >>= 1) {
for (a = (num & mask) != 0, b = c; b;) {
d = ((s[--b] - '0') << 1) + a;
s[b] = "0123456789"[d % 10];
a = d / 10;
}
for (; a; s = realloc(s, ++c + 1), memmove(s + 1, s, c), *s = "0123456789"[a % 10], a /= 10);
}
return s;
}
int main(void) {
__uint128_t n = from_string_128_bits("10000000000000000000000000000000000001");
n *= 7;
char *s = to_string_128_bits(n);
puts(s);
free(s); // string must be freed
// print 70000000000000000000000000000000000007
}

Convert C++ double to DEC double

I want to be able to take a user given double and write out in the DEC 64 dpfp format (http://www.wsmr.army.mil/RCCsite/Documents/106%20Previous%20Versions/106-07/appendixO.pdf). Having trouble getting this to line up correctly, anyone have experience or have written conversion functions for DEC types?
This seems pretty straight forward, let me take a shot at it. Note that I don't have any way of testing this for correctness.
std::vector<unsigned char> ToDEC64Float(double d)
{
uint64_t dec_bits = 0ULL;
if (d != 0.0)
{
assert(sizeof(double) == sizeof(uint64_t));
uint64_t bits = *reinterpret_cast<uint64_t*>(&d);
uint64_t fraction = bits & 0x000fffffffffffffULL;
int exp = (int)((bits >> 52) & 0x7ff) - 1023;
bool sign = (bool)(bits & 0x8000000000000000ULL);
// convert the individual values for the new format
fraction <<= 3;
exp += 1 + 128;
if (exp > 255)
throw std::overflow_error("overflow");
if (exp < 0 || (exp == 0 && fraction != 0))
throw std::underflow_error("underflow");
dec_bits = (uint64_t)sign << 63 | (uint64_t)exp << 55 | fraction;
}
std::vector<unsigned char> result;
for (int i = 0; i < 64; i+=8)
result.push_back((unsigned char)((dec_bits >> i) & 0xff));
return result;
}
double static const DECBytesToDouble(uint64_t value)
{
//DEC Byte Conversion Constants
static const float MANTISSA_CONSTANT = 0.5;
static const int32_t EXPONENT_BIAS = 128;
uint8_t * byte_array = (uint8_t*)&value;
uint8_t first = byte_array[0];
uint8_t second = byte_array[1];
uint8_t third = byte_array[2];
uint8_t fourth = byte_array[3];
uint8_t fifth = byte_array[4];
uint8_t sixth = byte_array[5];
uint8_t seventh = byte_array[6];
uint8_t eighth = byte_array[7];
// |second |first|fourth|third|sixth|fifth|eighth|seventh|
// |s|exponent|mantissa |
bool sign = second & 0x80;
std::cout<<"(DECBytesToDouble) Sign: "<<sign<<std::endl;
int32_t exponent = ((second & 0x7F) << 1) + ((first >> 7) & 0x1);
std::cout<<"(DECBytesToDouble) Exponent: "<<exponent<<std::endl;
int64_t mantissa = ((int64_t)(first & 0x7F) << 48) + ((int64_t)fourth << 40)
+ ((int64_t)third << 32) + ((int64_t)sixth << 24) + ((int64_t)fifth << 16)
+ ((int64_t)eighth << 8) + (int64_t) seventh;
std::cout<<"(DECBytesToDouble) Fraction: "<<mantissa<<std::endl;
double fraction = MANTISSA_CONSTANT;
for (int32_t i=0; i<55; i++)
{
fraction += ((mantissa >> i) & 0x1) * pow(2,i-56);
}//for
return pow(-1,sign)*fraction*pow(2,exponent-EXPONENT_BIAS);
}//DECBytesToDouble
uint64_t static const DoubleToDECBytes(double value)
{
static const int32_t EXPONENT_BIAS = 128;
uint64_t dec_bits = 0ULL;
if (value != 0.0)
{
uint64_t bits = *reinterpret_cast<uint64_t*>(&value);
uint64_t fraction = bits & 0x000fffffffffffffULL;
int exp = (int)((bits >> 52) & 0x7ff) - 1023;
bool sign = false;
if(value < 0)
{
sign = true;
}//if
std::cout<<"(DoubleToDECBytes) Sign: "<<sign<<std::endl;
// convert the individual values for the new format
fraction <<= 3;
exp += EXPONENT_BIAS + 1;
std::cout<<"(DoubleToDECBytes) Exponent: "<<exp<<std::endl;
std::cout<<"(DoubleToDECBytes) Fraction: "<<fraction<<std::endl;
if (exp > 255)
throw std::overflow_error("overflow");
if (exp < 0 || (exp == 0 && fraction != 0))
throw std::underflow_error("underflow");
dec_bits = (uint64_t)(sign << 63) | (uint64_t)(exp << 55) | fraction;
//|second |first|fourth|third|sixth|fifth|eighth|seventh|
uint8_t * byte_array = (uint8_t*)&dec_bits;
uint8_t first = byte_array[0];
uint8_t second = byte_array[1];
uint8_t third = byte_array[2];
uint8_t fourth = byte_array[3];
uint8_t fifth = byte_array[4];
uint8_t sixth = byte_array[5];
uint8_t seventh = byte_array[6];
uint8_t eighth = byte_array[7];
byte_array[7] = second;
byte_array[6] = first;
byte_array[5] = fourth;
byte_array[4] = third;
byte_array[3] = sixth;
byte_array[2] = fifth;
byte_array[1] = eighth;
byte_array[0] = seventh;
std::cout<<"(DoubleToDECBytes) Guess ="<<dec_bits<<std::endl;
}//if
/*std::vector<unsigned char> result;
for (int i = 0; i < 64; i+=8)
{
result.push_back((unsigned char)((dec_bits >> i) & 0xff));
}//for
uint64_t final_result = 0;
memcpy(&final_result, &result[0], sizeof(uint64_t));
std::cout<<"Final result: "<<final_result<<std::endl;*/
return dec_bits;
}//DoubleToDECBytes
Output:
input uint64_t value: 9707381994276473045
(DECBytesToDouble) Sign: 0
(DECBytesToDouble) Exponent: 145
(DECBytesToDouble) Fraction: 24184718387676855
output double value: 109527.7465
(DoubleToDECBytes) Sign: 0
(DoubleToDECBytes) Exponent: 145
(DoubleToDECBytes) Fraction: 24184718387676848
(DoubleToDECBytes) Guess =9705411669439479893
Converted double, uint64_t: 9705411669439479893
uint64_t difference: 1970324836993152
(DECBytesToDouble) Sign: 0
(DECBytesToDouble) Exponent: 0
(DECBytesToDouble) Fraction: 24184718387676848
output double value: 0.0000
I came to find that integrating libvaxdata C library into my C++ solution was the best way to go. In my use case situation all that was required was some byte flipping, however the routines work flawlessly.
I recommend the libvaxdata library when dealing with conversion to/from IEEE/DEC types.
http://pubs.usgs.gov/of/2005/1424/

how to convert digits from a integer in a byte array in C++

i tried to convert the digits from a number like 9140 to a char array of bytes, i finally did it, but for some reason one of the numbers is converted wrong.
The idea is separate each digit an convert it in a byte[4] and save it a global array of bytes, that means that array have a digit each 4 positions, i insert each digit at the end of array and finally i insert the amount of digits at the end of the array.
the problem is randomly with some values, for example for the value 25 it works but for 9140 it return me 9040, which could be the problem? this is the code:
void convertCantToByteArray4Digits(unsigned char *bufferDigits,int cant){
//char bufferDigits[32];
int bufferPos=20;
double cantAux=cant;
int digit=0,cantDigits=0;
double subdigit=0;
while(cantAux > 0){
cout<<"VUELTA"<<endl;
cantAux/=10;
cout<<"cantAux/=10:"<<cantAux<<endl;
cout<<"floor"<<floor(cantAux)<<endl;
subdigit=cantAux-floor(cantAux);
cout<<"subdigit"<<subdigit<<endl;
digit=static_cast<int>(subdigit*10);
cout<<"digit:"<<subdigit*10<<endl;
cantAux=cantAux-subdigit;
cout<<"cantAux=cantAux-subdigit:"<<cantAux<<endl;
bufferDigits[bufferPos-4] = (digit >> 24) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-4])<<std::endl;
bufferDigits[bufferPos-3] = (digit >> 16) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-3])<<std::endl;
bufferDigits[bufferPos-2] = (digit >> 8) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-2])<<std::endl;
bufferDigits[bufferPos-1] = (digit) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-1])<<std::endl;
/*bufferDigits[0] = digit >> 24;
std::cout<<bufferDigits[0]<<std::endl;
bufferDigits[1] = digit >> 16;
bufferDigits[2] = digit >> 8;
bufferDigits[3] = digit;*/
bufferPos-=4;
cantDigits++;
}
cout<<"sizeof"<<sizeof(bufferDigits)<<endl;
cout<<"cantDigits"<<cantDigits<<endl;
bufferPos=24;
bufferDigits[bufferPos-4] = (cantDigits) >> 24;
//std::cout<<bufferDigits[bufferPos-4]<<std::endl;
bufferDigits[bufferPos-3] = (cantDigits) >> 16;
bufferDigits[bufferPos-2] = (cantDigits) >> 8;
bufferDigits[bufferPos-1] = (cantDigits);
}
the bufferDigits have a size of 24 bytes, the cant parameter is the number to convert, i receive any question about my code.
I feel this is the most c++ way that probably answers your question, if I understood correctly:
#include <string>
#include <iterator>
#include <iostream>
#include <algorithm>
template <typename It>
It tochars(unsigned int i, It out)
{
It save = out;
do *out++ = '0' + i%10;
while (i/=10);
std::reverse(save, out);
return out;
}
int main()
{
char buf[10];
char* end = tochars(9140, buf);
*end = 0; // null terminate
std::cout << buf << std::endl;
}
Instead of using a double and the floor function, just use an int and the modulus operator instead.
void convertCantToByteArray4Digits(unsigned char *bufferDigits,int cant)
{
int bufferPos=20;
int cantAux=cant;
int digit=0,cantDigits=0;
while(cantAux > 0)
{
cout<<"VUELTA"<<endl;
digit = cantAux % 10;
cout<<"digit:"<<digit<<endl;
cantAux /= 10;
cout<<"cantAux/=10:"<<cantAux<<endl;
bufferDigits[bufferPos-4] = (digit >> 24) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-4])<<std::endl;
bufferDigits[bufferPos-3] = (digit >> 16) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-3])<<std::endl;
bufferDigits[bufferPos-2] = (digit >> 8) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-2])<<std::endl;
bufferDigits[bufferPos-1] = (digit) & 0xFF;
std::cout<<static_cast<int>(bufferDigits[bufferPos-1])<<std::endl;
bufferPos-=4;
cantDigits++;
}
Why not use a union?
union {
int i;
char c[4];
};
i = 2530;
// now c is set appropriately
Or memcpy?
memcpy(bufferDigits, &cant, sizeof(int));
Why so complicated? Just divide and take remainders. Here's a reentrant example to which you provide a buffer, and you get back a pointer to the beginning of the converted string:
char * to_string(unsigned int n, char * buf, unsigned int len)
{
if (len < 1) return buf;
buf[--len] = 0;
if (n == 0 && len > 0) { buf[--len] = '0'; }
while (n != 0 && len > 0) { buf[--len] = '0' + (n % 10); n /= 10; }
return &buf[len];
}
Usage: char buf[100]; char * s = to_string(4160, buf, 100);

Decimal to Binary Conversion

I was writing a function for conversion between Decimal and Binary base number systems and here's my original code:
void binary(int number)
{
vector<int> binary;
while (number == true)
{
binary.insert(binary.begin(), (number % 2) ? 1 : 0);
number /= 2;
}
for (int access = 0; access < binary.size(); access++)
cout << binary[access];
}
It didn't work however until I did this:
while(number)
what's wrong with
while(number == true)
and what's the difference between the two forms?
Thanks in advance.
When you say while (number), number, which is an int, is converted to type bool. If it is zero it becomes false and if it is nonzero it becomes true.
When you say while (number == true), the true is converted to an int (to become 1) and it is the same as if you had said while (number == 1).
Here is my code....
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<unistd.h>
#include<assert.h>
#include<stdbool.h>
#define max 10000
#define RLC(num,pos) ((num << pos)|(num >> (32 - pos)))
#define RRC(num,pos) ((num >> pos)|(num << (32 - pos)))
void tobinstr(int value, int bitsCount, char* output)
{
int i;
output[bitsCount] = '\0';
for (i = bitsCount - 1; i >= 0; --i, value >>= 1)
{
output[i] = (value & 1) + '0';
}
}
int main()
{
char s[50];
tobinstr(65536,32, s);
printf("%s\n", s);
return 0;
}

Implement division with bit-wise operator

How can I implement division using bit-wise operators (not just division by powers of 2)?
Describe it in detail.
The standard way to do division is by implementing binary long-division. This involves subtraction, so as long as you don't discount this as not a bit-wise operation, then this is what you should do. (Note that you can of course implement subtraction, very tediously, using bitwise logical operations.)
In essence, if you're doing Q = N/D:
Align the most-significant ones of N and D.
Compute t = (N - D);.
If (t >= 0), then set the least significant bit of Q to 1, and set N = t.
Left-shift N by 1.
Left-shift Q by 1.
Go to step 2.
Loop for as many output bits (including fractional) as you require, then apply a final shift to undo what you did in Step 1.
Division of two numbers using bitwise operators.
#include <stdio.h>
int remainder, divisor;
int division(int tempdividend, int tempdivisor) {
int quotient = 1;
if (tempdivisor == tempdividend) {
remainder = 0;
return 1;
} else if (tempdividend < tempdivisor) {
remainder = tempdividend;
return 0;
}
do{
tempdivisor = tempdivisor << 1;
quotient = quotient << 1;
} while (tempdivisor <= tempdividend);
/* Call division recursively */
quotient = quotient + division(tempdividend - tempdivisor, divisor);
return quotient;
}
int main() {
int dividend;
printf ("\nEnter the Dividend: ");
scanf("%d", &dividend);
printf("\nEnter the Divisor: ");
scanf("%d", &divisor);
printf("\n%d / %d: quotient = %d", dividend, divisor, division(dividend, divisor));
printf("\n%d / %d: remainder = %d", dividend, divisor, remainder);
getch();
}
int remainder =0;
int division(int dividend, int divisor)
{
int quotient = 1;
int neg = 1;
if ((dividend>0 &&divisor<0)||(dividend<0 && divisor>0))
neg = -1;
// Convert to positive
unsigned int tempdividend = (dividend < 0) ? -dividend : dividend;
unsigned int tempdivisor = (divisor < 0) ? -divisor : divisor;
if (tempdivisor == tempdividend) {
remainder = 0;
return 1*neg;
}
else if (tempdividend < tempdivisor) {
if (dividend < 0)
remainder = tempdividend*neg;
else
remainder = tempdividend;
return 0;
}
while (tempdivisor<<1 <= tempdividend)
{
tempdivisor = tempdivisor << 1;
quotient = quotient << 1;
}
// Call division recursively
if(dividend < 0)
quotient = quotient*neg + division(-(tempdividend-tempdivisor), divisor);
else
quotient = quotient*neg + division(tempdividend-tempdivisor, divisor);
return quotient;
}
void main()
{
int dividend,divisor;
char ch = 's';
while(ch != 'x')
{
printf ("\nEnter the Dividend: ");
scanf("%d", &dividend);
printf("\nEnter the Divisor: ");
scanf("%d", &divisor);
printf("\n%d / %d: quotient = %d", dividend, divisor, division(dividend, divisor));
printf("\n%d / %d: remainder = %d", dividend, divisor, remainder);
_getch();
}
}
I assume we are discussing division of integers.
Consider that I got two number 1502 and 30, and I wanted to calculate 1502/30. This is how we do this:
First we align 30 with 1501 at its most significant figure; 30 becomes 3000. And compare 1501 with 3000, 1501 contains 0 of 3000. Then we compare 1501 with 300, it contains 5 of 300, then compare (1501-5*300) with 30. At so at last we got 5*(10^1) = 50 as the result of this division.
Now convert both 1501 and 30 into binary digits. Then instead of multiplying 30 with (10^x) to align it with 1501, we multiplying (30) in 2 base with 2^n to align. And 2^n can be converted into left shift n positions.
Here is the code:
int divide(int a, int b){
if (b != 0)
return;
//To check if a or b are negative.
bool neg = false;
if ((a>0 && b<0)||(a<0 && b>0))
neg = true;
//Convert to positive
unsigned int new_a = (a < 0) ? -a : a;
unsigned int new_b = (b < 0) ? -b : b;
//Check the largest n such that b >= 2^n, and assign the n to n_pwr
int n_pwr = 0;
for (int i = 0; i < 32; i++)
{
if (((1 << i) & new_b) != 0)
n_pwr = i;
}
//So that 'a' could only contain 2^(31-n_pwr) many b's,
//start from here to try the result
unsigned int res = 0;
for (int i = 31 - n_pwr; i >= 0; i--){
if ((new_b << i) <= new_a){
res += (1 << i);
new_a -= (new_b << i);
}
}
return neg ? -res : res;
}
Didn't test it, but you get the idea.
This solution works perfectly.
#include <stdio.h>
int division(int dividend, int divisor, int origdiv, int * remainder)
{
int quotient = 1;
if (dividend == divisor)
{
*remainder = 0;
return 1;
}
else if (dividend < divisor)
{
*remainder = dividend;
return 0;
}
while (divisor <= dividend)
{
divisor = divisor << 1;
quotient = quotient << 1;
}
if (dividend < divisor)
{
divisor >>= 1;
quotient >>= 1;
}
quotient = quotient + division(dividend - divisor, origdiv, origdiv, remainder);
return quotient;
}
int main()
{
int n = 377;
int d = 7;
int rem = 0;
printf("Quotient : %d\n", division(n, d, d, &rem));
printf("Remainder: %d\n", rem);
return 0;
}
Implement division without divison operator:
You will need to include subtraction. But then it is just like you do it by hand (only in the basis of 2). The appended code provides a short function that does exactly this.
uint32_t udiv32(uint32_t n, uint32_t d) {
// n is dividend, d is divisor
// store the result in q: q = n / d
uint32_t q = 0;
// as long as the divisor fits into the remainder there is something to do
while (n >= d) {
uint32_t i = 0, d_t = d;
// determine to which power of two the divisor still fits the dividend
//
// i.e.: we intend to subtract the divisor multiplied by powers of two
// which in turn gives us a one in the binary representation
// of the result
while (n >= (d_t << 1) && ++i)
d_t <<= 1;
// set the corresponding bit in the result
q |= 1 << i;
// subtract the multiple of the divisor to be left with the remainder
n -= d_t;
// repeat until the divisor does not fit into the remainder anymore
}
return q;
}
The below method is the implementation of binary divide considering both numbers are positive. If subtraction is a concern we can implement that as well using binary operators.
Code
-(int)binaryDivide:(int)numerator with:(int)denominator
{
if (numerator == 0 || denominator == 1) {
return numerator;
}
if (denominator == 0) {
#ifdef DEBUG
NSAssert(denominator == 0, #"denominator should be greater then 0");
#endif
return INFINITY;
}
// if (numerator <0) {
// numerator = abs(numerator);
// }
int maxBitDenom = [self getMaxBit:denominator];
int maxBitNumerator = [self getMaxBit:numerator];
int msbNumber = [self getMSB:maxBitDenom ofNumber:numerator];
int qoutient = 0;
int subResult = 0;
int remainingBits = maxBitNumerator-maxBitDenom;
if (msbNumber >= denominator) {
qoutient |=1;
subResult = msbNumber - denominator;
}
else {
subResult = msbNumber;
}
while (remainingBits>0) {
int msbBit = (numerator & (1 << (remainingBits-1)))>0 ? 1 : 0;
subResult = (subResult << 1) |msbBit;
if (subResult >= denominator) {
subResult = subResult-denominator;
qoutient = (qoutient << 1) | 1;
}
else {
qoutient = qoutient << 1;
}
remainingBits--;
}
return qoutient;
}
-(int)getMaxBit:(int)inputNumber
{
int maxBit =0;
BOOL isMaxBitSet = NO;
for (int i=0; i<sizeof(inputNumber)*8; i++) {
if (inputNumber & (1 << i) ) {
maxBit = i;
isMaxBitSet=YES;
}
}
if (isMaxBitSet) {
maxBit += 1;
}
return maxBit;
}
-(int)getMSB:(int)bits ofNumber:(int)number
{
int numbeMaxBit = [self getMaxBit:number];
return number >> (numbeMaxBit -bits);
}
For integers:
public class Division {
public static void main(String[] args) {
System.out.println("Division: " + divide(100, 9));
}
public static int divide(int num, int divisor) {
int sign = 1;
if((num > 0 && divisor < 0) || (num < 0 && divisor > 0))
sign = -1;
return divide(Math.abs(num), Math.abs(divisor), Math.abs(divisor)) * sign;
}
public static int divide(int num, int divisor, int sum) {
if (sum > num) {
return 0;
}
return 1 + divide(num, divisor, sum + divisor);
}
}
With the usual caveats about C's behaviour with shifts, this ought to work for unsigned quantities regardless of the native size of an int...
static unsigned int udiv(unsigned int a, unsigned int b) {
unsigned int c = 1, result = 0;
if (b == 0) return (unsigned int)-1 /*infinity*/;
while (((int)b > 0) && (b < a)) { b = b<<1; c = c<<1; }
do {
if (a >= b) { a -= b; result += c; }
b = b>>1; c = c>>1;
} while (c);
return result;
}
This is my solution to implement division with only bitwise operations:
int align(int a, int b) {
while (b < a) b <<= 1;
return b;
}
int divide(int a, int b) {
int temp = b;
int result = 0;
b = align(a, b);
do {
result <<= 1;
if (a >= b) {
// sub(a,b) is a self-defined bitwise function for a minus b
a = sub(a,b);
result = result | 1;
}
b >>= 1;
} while (b >= temp);
return result;
}
Unsigned Long Division (JavaScript) - based on Wikipedia article: https://en.wikipedia.org/wiki/Division_algorithm:
"Long division is the standard algorithm used for pen-and-paper division of multi-digit numbers expressed in decimal notation. It shifts gradually from the left to the right end of the dividend, subtracting the largest possible multiple of the divisor (at the digit level) at each stage; the multiples then become the digits of the quotient, and the final difference is then the remainder.
When used with a binary radix, this method forms the basis for the (unsigned) integer division with remainder algorithm below."
Function divideWithoutDivision at the end wraps it to allow negative operands. I used it to solve leetcode problem "Product of Array Except Self"
function longDivision(N, D) {
let Q = 0; //quotient and remainder
let R = 0;
let n = mostSignificantBitIn(N);
for (let i = n; i >= 0; i--) {
R = R << 1;
R = setBit(R, 0, getBit(N, i));
if (R >= D) {
R = R - D;
Q = setBit(Q, i, 1);
}
}
//return [Q, R];
return Q;
}
function mostSignificantBitIn(N) {
for (let i = 31; i >= 0; i--) {
if (N & (1 << i))
return i ;
}
return 0;
}
function getBit(N, i) {
return (N & (1 << i)) >> i;
}
function setBit(N, i, value) {
return N | (value << i);
}
function divideWithoutDivision(dividend, divisor) {
let negativeResult = (dividend < 0) ^ (divisor < 0);
dividend = Math.abs(dividend);
divisor = Math.abs(divisor);
let quotient = longDivision(dividend, divisor);
return negativeResult ? -quotient : quotient;
}
All these solutions are too long. The base idea is to write the quotient (for example, 5=101) as 100 + 00 + 1 = 101.
public static Point divide(int a, int b) {
if (a < b)
return new Point(0,a);
if (a == b)
return new Point(1,0);
int q = b;
int c = 1;
while (q<<1 < a) {
q <<= 1;
c <<= 1;
}
Point r = divide(a-q, b);
return new Point(c + r.x, r.y);
}
public static class Point {
int x;
int y;
public Point(int x, int y) {
this.x = x;
this.y = y;
}
public int compare(Point b) {
if (b.x - x != 0) {
return x - b.x;
} else {
return y - b.y;
}
}
#Override
public String toString() {
return " (" + x + " " + y + ") ";
}
}
Since bit wise operations work on bits that are either 0 or 1, each bit represents a power of 2, so if I have the bits
1010
that value is 10.
Each bit is a power of two, so if we shift the bits to the right, we divide by 2
1010 --> 0101
0101 is 5
so, in general if you want to divide by some power of 2, you need to shift right by the exponent you raise two to, to get that value
so for instance, to divide by 16, you would shift by 4, as 2^^4 = 16.