Implicit conversion or cast? - c++

I have a function that interleaves the bits of 32 bit words and returns a 64 bit result. For this simple test case, the bottom 3 bytes are correct, and the contents of the top 5 bytes are incorrect. intToBin_32 and intToBin_64 are convenience functions to see the binary representation of the arguments and return val. I've placed casts from the 32 bit type to the 64 bit type everywhere I think they are needed, but I'm still seeing this unexpected (to me, at least) behavior. Is there an implicit conversion going on here, or is there some other reason this doesn't work correctly?
#include <stdint.h>
#include <stdio.h>
struct intString_32 {char bstr [32 + 1 + 8];};
struct intString_64 { char bstr [64 + 1 + 8];};
intString_32 intToBin_32(int a)
{
intString_32 b;
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 5; j++)
{
if (j != 4)
{
b.bstr[5*i + j] = * ((a & (1 << (31 - (4*i + j)))) ? "1" : "0");
}
else
{
b.bstr[5*i + j] = 0x20;
}
}
}
b.bstr[40] = * ( "\0" );
return b;
}
intString_64 intToBin_64(long long a)
{
intString_64 b;
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 9; j++)
{
if (j != 8)
{
b.bstr[9*i + j] = * ((a & (1 << (63 - (8*i + j)))) ? "1" : "0");
}
else
{
b.bstr[9*i + j] = 0x20;
}
}
}
b.bstr[72] = * ( "\0" );
return b;
}
uint64_t interleaveBits(unsigned int a, unsigned int b)
{
uint64_t retVal = 0;
for (unsigned int i = 0; i < 32; i++)
{
retVal |= (uint64_t)((uint64_t)((a >> i) & 0x1)) << (2*i);
retVal |= (uint64_t)((uint64_t)((b >> i) & 0x1)) << (2*i + 1);
}
return retVal;
}
int main(int arc, char* argv)
{
unsigned int foo = 0x0004EDC7;
unsigned int bar = 0x5A5A00FF;
uint64_t bat = interleaveBits(foo, bar);
printf("foo: %s \n", intToBin_32(foo).bstr);
printf("bar: %s \n", intToBin_32(bar).bstr);
printf("bat: %s \n\n", intToBin_64(bat).bstr);
}

Through debugging I noticed it's your intToBin_64 which is wrong, to be specific, in this line:
b.bstr[9*i + j] = * ((a & (1 << (63 - (8*i + j)))) ? "1" : "0");
take a closer look on the shift:
(1 << (63 - (8*i + j)))
The literal 1 is a integer, however, shifting a integer by more than 31 bits is undefined behavior. Shift a longlong instead:
b.bstr[9*i + j] = * ((a & (1ll << (63 - (8*i + j)))) ? "1" : "0");

Related

Copy 80 bit hex number from char array to uint16_t vector or array

Say I have a text file containing the 80bit hex number
0xabcdef0123456789abcd
My C++ program reads that using fstream into a char array called buffer.
But then I want to store it in a uint16_t array such that:
uint16_t * key = {0xabcd, 0xef01, 0x2345, 0x6789, 0xabcd}
I have tried several approaches, but I continue to get decimal integers, for instance:
const std::size_t strLength = strlen(buffer);
std::vector<uint16_t> arr16bit((strLength / 2) + 1);
for (std::size_t i = 0; i < strLength; ++i)
{
arr16bit[i / 2] <<= 8;
arr16bit[i / 2] |= buffer[i];
}
Yields:
arr16bit = {24930, 25444, 25958, 12337, 12851}
There must be an easy way to do this that I'm just not seeing.
Here is the full solution I came up with based on the comments:
int hex_char_to_int(char c) {
if (int(c) < 58) //numbers
return c - 48;
else if (int(c) < 91) //capital letters
return c - 65 + 10;
else if (int(c) < 123) //lower case letters
return c - 97 + 10;
}
uint16_t ints_to_int16(int i0, int i1, int i2, int i3) {
return (i3 * 16 * 16 * 16) + (i2 * 16 * 16) + (i1 * 16) + i0;
}
void readKey() {
const int bufferSize = 25;
char buffer[bufferSize] = { NULL };
ifstream* pStream = new ifstream("key.txt");
if (pStream->is_open() == true)
{
pStream->read(buffer, bufferSize);
}
cout << buffer << endl;
const size_t strLength = strlen(buffer);
int* hex_to_int = new int[strLength - 2];
for (int i = 2; i < strLength; i++) {
hex_to_int[i - 2] = hex_char_to_int(buffer[i]);
}
cout << endl;
uint16_t* key16 = new uint16_t[5];
int j = 0;
for (int i = 0; i < 5; i++) {
key16[i] = ints_to_int16(hex_to_int[j++], hex_to_int[j++], hex_to_int[j++], hex_to_int[j++]);
cout << "0x" << hex << key16[i] << " ";
}
cout << endl;
}
This outputs:
0xabcdef0123456789abcd
0xabcd 0xef01 0x2345 0x6789 0xabcd

How to take input 128 bit unsigned integer in c++

I am new to c++. I want to take input a unsigned 128 bit integer using scanf and print it using printf. As I am new to c++ , I only know these two methods for input output. Can someone help me out?
You could use boost, but this library set must be installed yourself:
#include <boost/multiprecision/cpp_int.hpp>
#include <iostream>
int main()
{
using namespace boost::multiprecision;
uint128_t v = 0;
std::cin >> v; // read
std::cout << v << std::endl; // write
return 0;
}
If you want to get along without boost, you can store the value into two uint64_t as such:
std::string input;
std::cin >> input;
uint64_t high = 0, low = 0, tmp;
for(char c : input)
{
high *= 10;
tmp = low * 10;
if(tmp / 10 != low)
{
high += ((low >> 32) * 10 + ((low & 0xf) * 10 >> 32)) >> 32;
}
low = tmp;
tmp = low + c - '0';
high += tmp < low;
low = tmp;
}
Printing then, however, gets more ugly:
std::vector<uint64_t> v;
while(high | low)
{
uint64_t const pow10 = 100000000;
uint64_t const mod = (((uint64_t)1 << 32) % pow10) * (((uint64_t)1 << 32) % pow10) % pow10;
tmp = high % pow10;
uint64_t temp = tmp * mod % pow10 + low % pow10;
v.push_back((tmp * mod + low) % pow10);
low = low / pow10 + tmp * 184467440737 + tmp * /*0*/9551616 / pow10 + (temp >= pow10);
high /= pow10;
}
std::vector<uint64_t>::reverse_iterator i = v.rbegin();
while(i != v.rend() && *i == 0)
{
++i;
}
if(i == v.rend())
{
std::cout << 0;
}
else
{
std::cout << *i << std::setfill('0');
for(++i; i != v.rend(); ++i)
{
std::cout << std::setw(8) << *i;
}
}
Above solution works up to (including)
340282366920938463463374516198409551615
= 0x ffff ffff ffff ffff ffff ad06 1410 beff
Above, there is an error.
Note: pow10 can be varied, then some other constants need to be adjusted, e. g. pow10 = 10:
low = low / pow10 + tmp * 1844674407370955161 + tmp * 6 / pow10 + (temp >= pow10);
and
std::cout << std::setw(1) << *i; // setw also can be dropped in this case
Increasing results in reducing the maximum number for which printing still works correctly, decreasing raises the maximum. With pow10 = 10, maximum is
340282366920938463463374607431768211425
= ffff ffff ffff ffff ffff ffff ffff ffe1
I don't know where the error for the very highest numbers comes from, yet, possibly some unconsidered overflow. Any suggestions appreciated, then I'll improve the algorithm. Until then, I'd reduce pow10 to 10 and introduce a special handling for the highest 30 failing numbers:
std::string const specialValues[0] = { /*...*/ };
if(high == 0xffffffffffffffff && low > 0xffffffffffffffe1)
{
std::cout << specialValues[low - 0xffffffffffffffe2];
}
else
{
/* ... */
}
So at least, we can handle all valid 128-bit values correctly.
You can try from_string_128_bits and to_string_128_bits with 128 bits unsigned integers in C :
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
__uint128_t from_string_128_bits(const char *str) {
__uint128_t res = 0;
for (; *str; res = res * 10 + *str++ - '0');
return res;
}
static char *to_string_128_bits(__uint128_t num) {
__uint128_t mask = -1;
size_t a, b, c = 1, d;
char *s = malloc(2);
strcpy(s, "0");
for (mask -= mask / 2; mask; mask >>= 1) {
for (a = (num & mask) != 0, b = c; b;) {
d = ((s[--b] - '0') << 1) + a;
s[b] = "0123456789"[d % 10];
a = d / 10;
}
for (; a; s = realloc(s, ++c + 1), memmove(s + 1, s, c), *s = "0123456789"[a % 10], a /= 10);
}
return s;
}
int main(void) {
__uint128_t n = from_string_128_bits("10000000000000000000000000000000000001");
n *= 7;
char *s = to_string_128_bits(n);
puts(s);
free(s); // string must be freed
// print 70000000000000000000000000000000000007
}

Can you explain what this binary swapping operation is doing?

I'm currently trying to solve this programing programing puzzle. The puzzle is about encrypting messages using the following C++ code:
int main()
{
int size;
cin >> size;
unsigned int* a = new unsigned int[size / 16]; // <- input tab to encrypt
unsigned int* b = new unsigned int[size / 16]; // <- output tab
for (int i = 0; i < size / 16; i++) { // Read size / 16 integers to a
cin >> hex >> a[i];
}
for (int i = 0; i < size / 16; i++) { // Write size / 16 zeros to b
b[i] = 0;
}
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
b[(i + j) / 32] ^= ( (a[i / 32] >> (i % 32)) &
(a[j / 32 + size / 32] >> (j % 32)) & 1 ) << ((i + j) % 32); // Magic centaurian operation
}
for(int i = 0; i < size / 16; i++) {
if (i > 0) {
cout << ' ';
}
cout << setfill('0') << setw(8) << hex << b[i]; // print result
}
cout << endl;
/*
Good luck humans
*/
return 0;
}
The objective is to reverse this encoding (that should be a known mathematical operation when identified). The problem i'm facing is that i cannot understand what the encoding works and what all these binary operations are doing. Can you explain me how this encoding works?
Thank you!
To learn what the operations are, break it down loop-by-loop and line-by-line, then apply the rules of precedence. Nothing more, nothing less. If I haven't lost track somewhere in the bitwise swamp, the effect of which all boils down to exclusive XOR'ing the orignal value at index b[(i + j) / 32] by a power of 2 in the range of a signed integer (or 0). The analysis would look something like this:
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
b[(i + j) / 32] ^=
( (a[i / 32] >> (i % 32)) &
(a[j / 32 + size / 32] >>
(j % 32)) & 1 ) <<
((i + j) % 32); // Magic centaurian operation
}
}
What is the first operation:
b[(i + j) / 32] ^=
This in an exclusive OR of the value at that index. If you just let idx represent the jumble that computes the index, you can write it as:
b[idx] ^= stuff
which applying the rules of precedence (right-to-left for ^=) is the same as writing:
b[idx] = b[idx] ^ stuff
The order of precedence tells us me need to figure out stuff before we can apply it to the value of b[idx]. Looking at stuff you have:
| A | << | B |
| C | & | D | | |
| | | E | & 1 | | |
+-----------------+---+-----------------------+-----+----+-------------+
( (a[i/32]>>(i%32)) & (a[j/32+size/32]>>(j%32)) & 1 ) << ( (i+j) % 32 );
Breaking in down, you have A << B, which can be further broken down as:
( C & D ) << B
or finally:
(C & E & 1) << B
The rules of precedence relevant to (C & E & 1) << B are all applied left-to-right giving deference to the parenthesis grouping.
So what is B? It is just a number that the grouping (C & E & 1) will be shifted to the left by. In terms of the index values i and j modded with the number of bits in an integer, it will simply shift the bits in grouping (C & E & 1) to the left by 0-31 bits depending on the combined value of i+j.
The grouping (C & E & 1) is an entirely similar analysis. a[i/32]>>(i%32) is nothing more than the value at a[i/32] shifted to the right by (i%32). E is the same with slightly differnt index manipulation: (a[j/32+size/32]>>(j%32)) which is just the value at that index shifted right by (j%32). The result of both of those shifts are then ANDED with 1. What that means is the entire grouping (C & E & 1) will only have a value if both C & E are odd number values.
Why only odd values? From a binary standpoint, odd numbers are the only values that will have the one-bit 1. (e.g. 5 & 7 & 1 (101 & 111 & 1) = 1). If any of the values are even or 0, then the whole grouping will be 0.
Understanding the grouping (C & E & 1) (or what we have largely grouped as A), you can now look at:
A << B
Knowing A will be 0 or 1, you know the only way the result of the shift will have value is if A is 1, and then the result of the group will just be the value of 1 shifted left by B bits. Knowing B has the range of 0-31, then the range of values for A << B are between 0 - 2147483648, but since you are shifting by between 0 - 31, the values for A << B will only be the positive powers of two between 0 - 2147483648 (binary: 0, 1, 10, 100, 1000, etc...)
Then that finally brings us to
b[idx] = b[idx] ^ stuff
which when you exclusively OR anything by a power of two, you only serve to flip the bit at the power of two in that number. (e.g. 110101 (26) ^ 1000 (8) = 111101 (61)). All other bits are unchanged. So the final effect of all the operations is to make:
b[idx] = b[idx] ^ stuff
nothing more than:
b[idx] = b[idx] ^ (power of two)
or
b[idx] = b[idx] ^ 0 /* which is nothing more than b[idx] to begin with */
Let me know if you have any questions. You can easily dump the index calculations to look at the values, but this should cover the operations at issue.
This code snippet is doing a Carry-free Multiplication Operation on the first half parts of the array (a[0:size/32]) and the second half parts of the array (a[size/32:size/16]).
I write an equivalent version in binary below the original version, hope this might help you.
#include <iostream>
#include <iomanip>
#include <ios>
using namespace std;
int main() {
int size;
cin >> size;
unsigned int* a = new unsigned int[size / 16]; // <- input tab to encrypt
unsigned int* b = new unsigned int[size / 16]; // <- output tab
bool *a1 = new bool[size];
bool *a2 = new bool[size];
bool *bb = new bool[size * 2];
for (int i = 0; i < size / 16; i++) { // Read size / 16 integers to a
cin >> hex >> a[i];
}
for (int i = 0; i < size * 2; i++) {
if (i < size) {
a1[i] = (a[i / 32] & (1 << (i % 32))) > 0; // first `size` bits are for a1
} else {
a2[i - size] = (a[i / 32] & (1 << (i % 32))) > 0; // rest `size` bits are for a2
}
}
for (int i = 0; i < size / 16; i++) { // Write size / 16 zeros to b
b[i] = 0;
}
for (int i = 0; i < size * 2; i++) {
bb[i] = 0;
}
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
b[(i + j) / 32] ^= ( (a[i / 32] >> (i % 32)) &
(a[j / 32 + size / 32] >> (j % 32)) & 1 ) << ((i + j) % 32); // Magic centaurian operation
}
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
bb[i + j] ^= (a1[i] & a2[j] & 1); // some operation as multiply (*) do
}
for(int i = 0; i < size / 16; i++) {
if (i > 0) {
cout << ' ';
}
cout << setfill('0') << setw(8) << hex << b[i]; // print result
}
cout << endl;
for(int i = 0; i < size / 32 * 2; i++) {
if (i > 0) {
cout << ' ';
}
unsigned int hex_number = 0;
for (int j = 0; j < 32; j++) hex_number += bb[i * 32 + j] << j;
cout << setfill('0') << setw(8) << hex << hex_number; // print result
}
cout << endl;
return 0;
}

getline with unsigned char string

So, I did some searching for why my compiler gives an error saying:
49 ~\C++\SHA-1\main.cpp invalid conversion from `unsigned char*' to `char*'
And I found out that you cannot convert between unsigned char to char because they are completely different types. So this lead me to the problem of needing a getline function for and unsigned char string in my code.
#include <iostream>
#include <stdint.h>
using namespace std;
uint32_t rotl( uint32_t value, int shift)
{
if ((shift &= sizeof(value)*8 - 1) == 0) return value;
return (value << shift) | (value >> (sizeof(value)*8 - shift));
}
uint32_t rotr( uint32_t value, int shift)
{
if ((shift &= sizeof(value)*8 - 1) == 0) return value;
return (value >> shift) | (value << (sizeof(value)*8 - shift));
}
int textInput();
int hexInput();
int binInput();
unsigned char message[64];
int SHA_1();
int main()
{
int selection;
cout<<"Select Input type:\n\n\t1. Text String\n\t2. Hex String\n\t3. Binary String\n";
cin>>selection;
cin.ignore();
switch(selection)
{
case 1: textInput(); break;
case 2: hexInput(); break;
case 3: binInput(); break;
}
SHA_1();
cout<<"\ndone";
cin.get();
return 0;
}
int textInput()
{
unsigned char input[63] = {0};
cout<<"Enter a text string to be hashed\n\n";
cin.getline(input, 62, '\n');
cin.ignore();
for(int x = 0; x <= 63; x++)
{
//cout<<x<<"\n";
if (input[x] == 0x00)
{
message[x] = 0x00000080;
message[63] = x; //This might be wrong.
//cout<<std::hex<<message;
break;
}
else message[x] = input[x];
}
return 0;
}
int hexInput()
{
return 0;
}
int binInput()
{
return 0;
}
int SHA_1()
{
uint32_t h0 = 0x67452301;
uint32_t h1 = 0xEFCDAB89;
uint32_t h2 = 0x98BADCFE;
uint32_t h3 = 0x10325476;
uint32_t h4 = 0xC3D2E1F0;
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
uint32_t e;
uint32_t f;
uint32_t k;
uint32_t temp;
uint32_t w[80];
/*for( int m = 0; m <= 63; m++)
{
cout<<"message["<<m<<"]="<<std::hex<<int(message[m])<<std::dec<<"\n";
}*/
for( int i = 0; i <= 15; i++)
{
w[i] = ((message[(i*4)] << 24) | (message[(i*4) + 1] << 16) | (message[(i*4) + 2] << 8) | (message[(i*4) + 3]));
//cout<<"W["<<i<<"]="<<std::hex<<w[i]<<std::dec<<"\n";
}
for( int i = 16; i <= 79; i++)
{
w[i] = rotl((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1);
}
a = h0;
b = h1;
c = h2;
d = h3;
e = h4;
for(int iteration = 0; iteration <= 79; iteration++)
{
if((0 <= iteration) && (iteration <= 19))
{
f = ((b & c) | ((~b) & d));
k = 0x5A827999;
}
else if((20 <= iteration) && (iteration <= 39))
{
f = (b ^ c ^ d);
k = 0x6ED9EBA1;
}
else if((40 <= iteration) && (iteration <= 59))
{
f = ((b & c) | (b & d) | (c & d));
k = 0x8F1BBCDC;
}
else if((60 <= iteration) && (iteration <= 79))
{
f = (b ^ c ^ d);
k = 0xCA62C1D6;
}
temp = (rotl( a, 5) + f + e + k + w[iteration]);
e = d;
d = c;
c = rotl( b, 30);
b = a;
a = temp;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
cout<<hex<<h0<<" "<<h1<<" "<<h2<<" "<<h3<<" "<<h4;
return 0;
}
If anyone could give me some suggestions that would be helpful.
Thanks
This is related to strict-aliasing constraints: you're not permitted to reference an unsigned char array via a char pointer.
#include <iostream>
using namespace std;
void aliasing_fun(char* arr) {
arr[0] = 42;
}
int main() {
unsigned char arr[10] = {0};
aliasing_fun(arr); // Not allowed
return 0;
}
http://ideone.com/r4OVZi
You might cast your array to char* in order to do that.
Cast your array to (char *) when calling getline:
getline((char *)input, ...);

Assignment operator that calls a constructor is broken

I've implemented some of the changes suggested in this question, and (thanks very much) it works quite well, however... in the process I've seemed to break the post-declaration assignment operator. With the following code:
#include <cstdio>
#include "ucpp"
main() {
ustring a = "test";
ustring b = "ing";
ustring c = "- -";
ustring d = "cafe\xcc\x81";
printf("%s\n", (a + b + c[1] + d).encode());
}
I get a nice "testing cafeĢ" message. However, if I modify the code slightly so that the const char * conversion is done separately, post-declaration:
#include <cstdio>
#include "ucpp"
main() {
ustring a = "test";
ustring b = "ing";
ustring c = "- -";
ustring d;
d = "cafe\xcc\x81";
printf("%s\n", (a + b + c[1] + d).encode());
}
the ustring named d becomes blank, and all that is output is "testing ". My new code has three constructors, one void (which is probably the one being incorrectly used, and is used in the operator+ function), one that takes a const ustring &, and one that takes a const char *. The following is my new library code:
#include <cstdlib>
#include <cstring>
class ustring {
int * values;
long len;
public:
long length() {
return len;
}
ustring() {
len = 0;
values = (int *) malloc(0);
}
ustring(const ustring &input) {
len = input.len;
values = (int *) malloc(sizeof(int) * len);
for (long i = 0; i < len; i++)
values[i] = input.values[i];
}
ustring operator=(ustring input) {
ustring result(input);
return result;
}
ustring(const char * input) {
values = (int *) malloc(0);
long s = 0; // s = number of parsed chars
int a, b, c, d, contNeed = 0, cont = 0;
for (long i = 0; input[i]; i++)
if (input[i] < 0x80) { // ASCII, direct copy (00-7f)
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = input[i];
} else if (input[i] < 0xc0) { // this is a continuation (80-bf)
if (cont == contNeed) { // no need for continuation, use U+fffd
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = 0xfffd;
}
cont = cont + 1;
values[s - 1] = values[s - 1] | ((input[i] & 0x3f) << ((contNeed - cont) * 6));
if (cont == contNeed) cont = contNeed = 0;
} else if (input[i] < 0xc2) { // invalid byte, use U+fffd (c0-c1)
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = 0xfffd;
} else if (input[i] < 0xe0) { // start of 2-byte sequence (c2-df)
contNeed = 1;
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = (input[i] & 0x1f) << 6;
} else if (input[i] < 0xf0) { // start of 3-byte sequence (e0-ef)
contNeed = 2;
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = (input[i] & 0x0f) << 12;
} else if (input[i] < 0xf5) { // start of 4-byte sequence (f0-f4)
contNeed = 3;
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = (input[i] & 0x07) << 18;
} else { // restricted or invalid (f5-ff)
values = (int *) realloc(values, sizeof(int) * ++s);
values[s - 1] = 0xfffd;
}
len = s;
}
ustring operator=(const char * input) {
ustring result(input);
return result;
}
ustring operator+(ustring input) {
ustring result;
result.len = len + input.len;
result.values = (int *) malloc(sizeof(int) * result.len);
for (long i = 0; i < len; i++)
result.values[i] = values[i];
for (long i = 0; i < input.len; i++)
result.values[i + len] = input.values[i];
return result;
}
ustring operator[](long index) {
ustring result;
result.len = 1;
result.values = (int *) malloc(sizeof(int));
result.values[0] = values[index];
return result;
}
char * encode() {
char * r = (char *) malloc(0);
long s = 0;
for (long i = 0; i < len; i++) {
if (values[i] < 0x80)
r = (char *) realloc(r, s + 1),
r[s + 0] = char(values[i]),
s += 1;
else if (values[i] < 0x800)
r = (char *) realloc(r, s + 2),
r[s + 0] = char(values[i] >> 6 | 0x60),
r[s + 1] = char(values[i] & 0x3f | 0x80),
s += 2;
else if (values[i] < 0x10000)
r = (char *) realloc(r, s + 3),
r[s + 0] = char(values[i] >> 12 | 0xe0),
r[s + 1] = char(values[i] >> 6 & 0x3f | 0x80),
r[s + 2] = char(values[i] & 0x3f | 0x80),
s += 3;
else
r = (char *) realloc(r, s + 4),
r[s + 0] = char(values[i] >> 18 | 0xf0),
r[s + 1] = char(values[i] >> 12 & 0x3f | 0x80),
r[s + 2] = char(values[i] >> 6 & 0x3f | 0x80),
r[s + 3] = char(values[i] & 0x3f | 0x80),
s += 4;
}
return r;
}
};
operator= should modify *this. The returned value (which you better make a reference) is only used in chaining situations:
a = b = c;
(a = b).foo();
//etc.
Both assignment operators are broken, e.g., this:
ustring operator=(const char * input) {
ustring result(input);
return result;
}
Does nothing to the target object. It just creates a local temporary and returns that. Write them like this instead:
ustring& operator=(ustring input) {
swap(input);
return *this;
}
ustring& operator=(const char * input) {
swap(ustring(input));
return *this;
}
void swap(ustring& s) {
int* tv = values; values = s.values; s.values = tv;
long tl = len; len = s.len; s.len = tl;
}