Set specific byte in DWORD - c++

How i can set specific byte in 4 bytes length DWORD variable?
DWORD color_argb;
unsigned char a = 11; // first byte
unsigned char r = 22; // second byte
unsigned char g = 33; // third byte
unsigned char b = 44; // fouth byte
zumalifeguard, if I understand you correctly - i can use next macroses:
#define SET_COLOR_A(color, a) color |= (a << 24)
#define SET_COLOR_R(color, r) color |= (r << 16)
#define SET_COLOR_G(color, g) color |= (g << 8)
#define SET_COLOR_B(color, b) color |= (b << 0)
?

Try these macros instead:
#define SET_COLOR_A(color, a) color = (DWORD(color) & 0x00FFFFFF) | ((DWORD(a) & 0xFF) << 24)
#define SET_COLOR_R(color, r) color = (DWORD(color) & 0xFF00FFFF) | ((DWORD(r) & 0xFF) << 16)
#define SET_COLOR_G(color, g) color = (DWORD(color) & 0xFFFF00FF) | ((DWORD(g) & 0xFF) << 8)
#define SET_COLOR_B(color, b) color = (DWORD(color) & 0xFFFFFF00) | (DWORD(b) & 0xFF)
The important thing is to preserve existing bits that are not being manipulated, while removing existing bits that are being replaced. Simply OR'ing the new bits is not enough if there are already bits present in the location being assigned to.

DWORD color_argb;
unsigned char a = 11; // first byte
unsigned char r = 22; // second byte
unsigned char g = 33; // third byte
unsigned char b = 44; // fouth byte
color_argb = 0;
int byte_number; // first byte = 1, second byte = 2, etc.
// Set first byte to a;
byte_number = 1;
color_argb |= ( a << (8 * (4 - byte_number) ) );
// Set first byte to a;
byte_number = 2;
color_argb |= ( b << (8 * (4 - byte_number) ) );
// Set first byte to a;
byte_number = 3;
color_argb |= ( c << (8 * (4 - byte_number) ) );
// Set first byte to a;
byte_number = 4;
color_argb |= ( d << (8 * (4 - byte_number) ) );

Related

Copy n Bits from 8-bit Array into 64-bit Integer?

I am trying to copy n bits from any position of an array of uint8_ts into a single 64 bit integer. Here is a working solution that can copy an arbitrary amount of bits into a 64 bit integer starting at the beginning of the array, but I want to be able to start at any position of the array.
For example I might want to copy bits 2 through 11 of the array:
{7, 128, 7}
In binary that would be:
00000111 1000000 00000111
And I want an integer with value:
0001111000
std::uint64_t key_reg(std::uint8_t* bytes, std::size_t n)
{
std::uint64_t reg = 0;
// The amount of bits that fit into an entire element of an array
// ex, if I'm copying 17 bits, even_bytes == 2
std::size_t even_bytes = (n - (n % 8)) / 8;
// what's left over after the even bytes
// in this case, remainder == 1
std::size_t remainder = n - even_bytes * 8;
// copy each byte into the integer
for(std::size_t i = 0; i < even_bytes; ++i)
if(remainder)
reg |= (std::uint64_t)bytes[i] << (8 * (even_bytes - i));
else
reg |= (std::uint64_t)bytes[i] << (8 * (even_bytes - i - 1));
// if there is an uneven number of bits, copy them in
if(remainder)
reg |= (std::uint64_t)bytes[even_bytes];
return reg;
}
Do you have any idea how to implement
std::uint64_t key_reg(std::uint8_t* bytes, std::size_t pos, std::size_t n);
I didn't think anyone would answer so fast, so here was a solution I came up with in the same style. I found this bitfieldmask function on stackoverflow, but I'm unable to find the question to credit the author.
template<typename R>
static constexpr R bitfieldmask(unsigned int const a, unsigned int const b)
{
return ((static_cast<R>(-1) >> (((sizeof(R) * CHAR_BIT) - 1) - (b)))
& ~((1 << (a)) - 1));
}
std::uint64_t key_reg(std::uint8_t* bytes, std::size_t pos, std::size_t n)
{
std::uint64_t reg = 0;
std::size_t starting_byte = (pos < 8) ? 0 : ((pos - (pos % 8)) / 8);
std::size_t even_bytes = (n - (n % 8)) / 8;
std::size_t remainder = n - even_bytes * 8;
for(std::size_t i = 0; i < even_bytes; ++i)
if(remainder)
reg |= (std::uint64_t)bytes[starting_byte + i] << (8 * (even_bytes - i));
else
reg |= (std::uint64_t)bytes[starting_byte + i] << (8 * (even_bytes - i - 1));
if(remainder)
reg |= (std::uint64_t)bytes[even_bytes];
// mask out anything before the first bit
if(pos % 8 != 0) {
std::size_t a = n - pos;
std::size_t b = n;
auto mask = bitfieldmask<std::uint64_t>(a, b);
reg = (reg & ~mask);
}
return reg;
}
I think it is just simpler to copy all necessary bytes and then mask extra bits:
std::uint64_t key_reg(std::uint8_t* bytes, std::size_t n)
{
std::uint64_t reg = 0;
std::reverse_copy( bytes, bytes + n / 8 + ( n % 8 != 0 ),
reinterpret_cast<char *>( &reg ) );
reg >>= n % 8;
reg &= ~( -1UL << n );
return reg;
}
using pos would be little more complex:
std::uint64_t key_reg(std::uint8_t* bytes, std::size_t pos, std::size_t n)
{
std::uint64_t reg = 0;
auto endpos = pos + n;
auto start = bytes + pos / 8;
auto end = bytes + endpos / 8 + ( endpos % 8 != 0 );
std::reverse_copy( start, end, reinterpret_cast<char *>( &reg ) );
reg >>= endpos % 8;
reg &= ~( -1UL << n );
return reg;
}
live example
Your basic approach looks sound. To handle bit offsets that aren't multiples of 8, you just need to first read in a single partial byte and then proceed with the rest:
uint64_t key_reg(const uint8_t* bytes, size_t pos, size_t n) {
const uint8_t* ptr = bytes + pos / 8;
uint64_t result = 0;
if (pos % 8 > 0) {
/* read the first partial byte, masking off unwanted bits */
result = *(ptr++) & (0xFF >> (pos % 8));
if (n <= 8 - pos % 8) {
/* we need no more bits; shift off any excess and return early */
return result >> (8 - pos % 8 - n);
} else {
/* reduce the requested bit count by the number we got from this byte */
n -= 8 - pos % 8;
}
}
/* read and shift in as many whole bytes as we need */
while (n >= 8) {
result = (result << 8) + *(ptr++);
n -= 8;
}
/* finally read and shift in the last partial byte */
if (n > 0) {
result = (result << n) + (*ptr >> (8-n));
}
return result;
}
Here's an online demo with a simple test harness, demonstrating that this code indeed works correctly in all the edge cases I could find, such as reading a full 64 bits starting from the middle of a byte or reading only part of a single byte (which is actually a non-trivial special case, handled in a separate branch with its own return statement in the code above).
(Note that I wrote the code above in plain C since, like your original code, it doesn't really make use of any C++ specific features. Feel free to "C++ify" it by adding std:: where appropriate.)
One feature that the test harness doesn't check, but which I believe this code should possess, is that it never reads more bytes from the input array than necessary. In particular, the bytes array is not accessed at all if n == 0 (although a pointer to pos / 8 bytes after the start of the array is still calculated).
I have the following
struct MyType
{
std::array<uint8_t, 892> m_rguID;
uint16_t m_bitLength;
void GetBits(uint16_t startBit, uint16_t nBits, uint64_t & bits) const
};
void MyType::GetBits(uint16_t startBit, uint16_t nBits, uint64_t & bits) const
{
if(startBit + nBits > m_bitLength)
throw std::runtime_error("Index is out of range");
uint32_t num1 = startBit % 8U;
uint32_t num2 = 8U - num1;
uint32_t num3 = nBits >= num2 ? num2 : nBits;
uint32_t num4 = startBit >> 3;
bits = (uint64_t)(((int64_t)((uint64_t)m_rguID[num4] >> (8 - num3 - num1)) & (int64_t)((1 << num3) - 1)) << (nBits - num3));
uint32_t num5 = num4 + 1U;
int num6 = nBits - num3;
if(num6 <= 0)
return;
int num7 = num6 - 8;
int num8 = 8 - num6;
do
{
if(num6 >= 8)
{
bits |= (uint64_t)m_rguID[num5] << num7;
++num5;
}
else
{
bits |= (uint64_t)m_rguID[num5] >> num8;
++num5;
}
num6 += -8;
num7 += -8;
num8 += 8;
} while(num6 > 0);
}

Convert X8B8G8R8 to R8G8B8 C++ code

I would like to convert a hardware pixel buffer that is in the format X8B8G8R8 into unsigned int 24 bit memory buffer.
Here is my attempt:
// pixels is uin32_t;
src.pixels = new pixel_t[src.width*src.height];
readbuffer->lock( Ogre::HardwareBuffer::HBL_DISCARD );
const Ogre::PixelBox &pb = readbuffer->getCurrentLock();
/// Update the contents of pb here
/// Image data starts at pb.data and has format pb.format
uint32 *data = static_cast<uint32*>(pb.data);
size_t height = pb.getHeight();
size_t width = pb.getWidth();
size_t pitch = pb.rowPitch; // Skip between rows of image
for ( size_t y = 0; y<height; ++y )
{
for ( size_t x = 0; x<width; ++x )
{
src.pixels[pitch*y + x] = data[pitch*y + x];
}
}
This should do
uint32_t BGRtoRGB(uint32_t col) {
return (col & 0x0000ff00) | ((col & 0x000000ff) << 16) | ((col & 0x00ff0000) >> 16)
}
With
src.pixels[pitch*y + x] = BGRtoRGB(data[pitch*y + x]);
Note: BGRtoRGB here converts both ways if you want it to, but remember it throws away whatever you have in the X8 bits (alpha?), but it should keep the values themselves.
To convert the other way around with an alpha of 0xff
uint32_t RGBtoXBGR(uint32_t col) {
return 0xff000000 | (col & 0x0000ff00) | ((col & 0x000000ff) << 16) | ((col & 0x00ff0000) >> 16)
}

reverseBytes using Bitwise operators

reverseBytes - reverse bytes
Example: reverseBytes(0x0123456789abcdef) = 0xefcdab8967452301
Legal ops: ! ~ & ^ | + << >>
I'm required to solve the above problem. There is no limit on no. of operators. I already have a different solution to this. But I would like to know what's the problem with the following solution I came up with? Thank you.
long reverseBytes(long x) {
int a = x; //Get first bytes, first 8 bits
int b = (a >> 8); //Get 2nd byte
int c = (b >> 8); //3rd byte
int d = (c >> 8); //4th
int e = (d >> 8); //5th
int f = (e >> 8); //6th
int g = (f >> 8); //7th
int h = (g >> 8); //8th
a = a & 0xFF; //Remove the rest except LSB byte
b = b & 0xFF; // same
c = c & 0xFF;
d = d & 0xFF;
e = e & 0xFF;
f = f & 0xFF;
g = g & 0xFF;
h = h & 0xFF;
return ( (a << 56) + (b << 48) + (c << 40) + (d << 32) + (e << 24) + (f << 16) + (g << 8) + (h) );
}
For practical application, you can use library function bswap64()
For student practice, you needed write a simple loop like:
unsigned long reverseBytes(unsigned long x) {
unsigned long rc;
for(int i = 0; i < 8; i++, x >>= 8)
rc = (rc << 8) | (unsigned char)x;
return rc;
}

Convert 0x1234 to 0x11223344

How do I expand the hexadecimal number 0x1234 to 0x11223344 in a high-performance way?
unsigned int c = 0x1234, b;
b = (c & 0xff) << 4 | c & 0xf | (c & 0xff0) << 8
| (c & 0xff00) << 12 | (c & 0xf000) << 16;
printf("%p -> %p\n", c, b);
Output:
0x1234 -> 0x11223344
I need this for color conversion. Users provide their data in the form 0xARGB, and I need to convert it to 0xAARRGGBB. And yes, there could be millions, because each could be a pixel. 1000x1000 pixels equals to one million.
The actual case is even more complicated, because a single 32-bit value contains both foreground and background colors. So 0xARGBargb become: [ 0xAARRGGBB, 0xaarrggbb ]
Oh yes, one more thing, in a real application I also negate alpha, because in OpenGL 0xFF is non-transparent and 0x00 is most transparent, which is inconvenient in most cases, because usually you just need an RGB part and transparency is assumed to be non-present.
This can be done using SSE2 as follows:
void ExpandSSE2(unsigned __int64 in, unsigned __int64 &outLo, unsigned __int64 &outHi) {
__m128i const mask = _mm_set1_epi16((short)0xF00F);
__m128i const mul0 = _mm_set1_epi16(0x0011);
__m128i const mul1 = _mm_set1_epi16(0x1000);
__m128i v;
v = _mm_cvtsi64_si128(in); // Move the 64-bit value to a 128-bit register
v = _mm_unpacklo_epi8(v, v); // 0x12 -> 0x1212
v = _mm_and_si128(v, mask); // 0x1212 -> 0x1002
v = _mm_mullo_epi16(v, mul0); // 0x1002 -> 0x1022
v = _mm_mulhi_epu16(v, mul1); // 0x1022 -> 0x0102
v = _mm_mullo_epi16(v, mul0); // 0x0102 -> 0x1122
outLo = _mm_extract_epi64(v, 0);
outHi = _mm_extract_epi64(v, 1);
}
Of course you’d want to put the guts of the function in an inner loop and pull out the constants. You will also want to skip the x64 registers and load values directly into 128-bit SSE registers. For an example of how to do this, refer to the SSE2 implementation in the performance test below.
At its core, there are five instructions, which perform the operation on four color values at a time. So, that is only about 1.25 instructions per color value. It should also be noted that SSE2 is available anywhere x64 is available.
Performance tests for an assortment of the solutions here
A few people have mentioned that the only way to know what's faster is to run the code, and this is unarguably true. So I've compiled a few of the solutions into a performance test so we can compare apples to apples. I chose solutions which I felt were significantly different from the others enough to require testing. All the solutions read from memory, operate on the data, and write back to memory. In practice some of the SSE solutions will require additional care around the alignment and handling cases when there aren't another full 16 bytes to process in the input data. The code I tested is x64 compiled under release using Visual Studio 2013 running on a 4+ GHz Core i7.
Here are my results:
ExpandOrig: 56.234 seconds // From asker's original question
ExpandSmallLUT: 30.209 seconds // From Dmitry's answer
ExpandLookupSmallOneLUT: 33.689 seconds // from Dmitry's answer
ExpandLookupLarge: 51.312 seconds // A straightforward lookup table
ExpandAShelly: 43.829 seconds // From AShelly's answer
ExpandAShellyMulOp: 43.580 seconds // AShelly's answer with an optimization
ExpandSSE4: 17.854 seconds // My original SSE4 answer
ExpandSSE4Unroll: 17.405 seconds // My original SSE4 answer with loop unrolling
ExpandSSE2: 17.281 seconds // My current SSE2 answer
ExpandSSE2Unroll: 17.152 seconds // My current SSE2 answer with loop unrolling
In the test results above you'll see I included the asker's code, three lookup table implementations including the small lookup table implementation proposed in Dmitry's answer. AShelly's solution is included too, as well as a version with an optimization I made (an operation can be eliminated). I included my original SSE4 implementation, as well as a superior SSE2 version I made later (now reflected as the answer), as well as unrolled versions of both since they were the fastest here, and I wanted to see how much unrolling sped them up. I also included an SSE4 implementation of AShelly's answer.
So far I have to declare myself the winner. But the source is below, so anyone can test it out on their platform, and include their own solution into the testing to see if they've made a solution that's even faster.
#define DATA_SIZE_IN ((unsigned)(1024 * 1024 * 128))
#define DATA_SIZE_OUT ((unsigned)(2 * DATA_SIZE_IN))
#define RERUN_COUNT 500
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <utility>
#include <emmintrin.h> // SSE2
#include <tmmintrin.h> // SSSE3
#include <smmintrin.h> // SSE4
void ExpandOrig(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
u = (u & 0x00FF) << 4
| (u & 0x000F)
| (u & 0x0FF0) << 8
| (u & 0xFF00) << 12
| (u & 0xF000) << 16;
v = (v & 0x00FF) << 4
| (v & 0x000F)
| (v & 0x0FF0) << 8
| (v & 0xFF00) << 12
| (v & 0xF000) << 16;
// Store data
*(unsigned*)(out) = u;
*(unsigned*)(out + 4) = v;
in += 4;
out += 8;
} while (in != past);
}
unsigned LutLo[256],
LutHi[256];
void MakeLutLo(void) {
for (unsigned i = 0, x; i < 256; ++i) {
x = i;
x = ((x & 0xF0) << 4) | (x & 0x0F);
x |= (x << 4);
LutLo[i] = x;
}
}
void MakeLutHi(void) {
for (unsigned i = 0, x; i < 256; ++i) {
x = i;
x = ((x & 0xF0) << 20) | ((x & 0x0F) << 16);
x |= (x << 4);
LutHi[i] = x;
}
}
void ExpandLookupSmall(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
u = LutHi[u >> 8] | LutLo[u & 0xFF];
v = LutHi[v >> 8] | LutLo[v & 0xFF];
// Store data
*(unsigned*)(out) = u;
*(unsigned*)(out + 4) = v;
in += 4;
out += 8;
} while (in != past);
}
void ExpandLookupSmallOneLUT(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
u = ((LutLo[u >> 8] << 16) | LutLo[u & 0xFF]);
v = ((LutLo[v >> 8] << 16) | LutLo[v & 0xFF]);
// Store data
*(unsigned*)(out) = u;
*(unsigned*)(out + 4) = v;
in += 4;
out += 8;
} while (in != past);
}
unsigned LutLarge[256 * 256];
void MakeLutLarge(void) {
for (unsigned i = 0; i < (256 * 256); ++i)
LutLarge[i] = LutHi[i >> 8] | LutLo[i & 0xFF];
}
void ExpandLookupLarge(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
u = LutLarge[u];
v = LutLarge[v];
// Store data
*(unsigned*)(out) = u;
*(unsigned*)(out + 4) = v;
in += 4;
out += 8;
} while (in != past);
}
void ExpandAShelly(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v, w, x;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
w = (((u & 0xF0F) * 0x101) & 0xF000F) + (((u & 0xF0F0) * 0x1010) & 0xF000F00);
x = (((v & 0xF0F) * 0x101) & 0xF000F) + (((v & 0xF0F0) * 0x1010) & 0xF000F00);
w += w * 0x10;
x += x * 0x10;
// Store data
*(unsigned*)(out) = w;
*(unsigned*)(out + 4) = x;
in += 4;
out += 8;
} while (in != past);
}
void ExpandAShellyMulOp(unsigned char const *in, unsigned char const *past, unsigned char *out) {
unsigned u, v;
do {
// Read in data
u = *(unsigned const*)in;
v = u >> 16;
u &= 0x0000FFFF;
// Do computation
u = ((((u & 0xF0F) * 0x101) & 0xF000F) + (((u & 0xF0F0) * 0x1010) & 0xF000F00)) * 0x11;
v = ((((v & 0xF0F) * 0x101) & 0xF000F) + (((v & 0xF0F0) * 0x1010) & 0xF000F00)) * 0x11;
// Store data
*(unsigned*)(out) = u;
*(unsigned*)(out + 4) = v;
in += 4;
out += 8;
} while (in != past);
}
void ExpandSSE4(unsigned char const *in, unsigned char const *past, unsigned char *out) {
__m128i const mask0 = _mm_set1_epi16((short)0x8000),
mask1 = _mm_set1_epi8(0x0F),
mul = _mm_set1_epi16(0x0011);
__m128i u, v, w, x;
do {
// Read input into low 8 bytes of u and v
u = _mm_load_si128((__m128i const*)in);
v = _mm_unpackhi_epi8(u, u); // Expand each single byte to two bytes
u = _mm_unpacklo_epi8(u, u); // Do it again for v
w = _mm_srli_epi16(u, 4); // Copy the value into w and shift it right half a byte
x = _mm_srli_epi16(v, 4); // Do it again for v
u = _mm_blendv_epi8(u, w, mask0); // Select odd bytes from w, and even bytes from v, giving the the desired value in the upper nibble of each byte
v = _mm_blendv_epi8(v, x, mask0); // Do it again for v
u = _mm_and_si128(u, mask1); // Clear the all the upper nibbles
v = _mm_and_si128(v, mask1); // Do it again for v
u = _mm_mullo_epi16(u, mul); // Multiply each 16-bit value by 0x0011 to duplicate the lower nibble in the upper nibble of each byte
v = _mm_mullo_epi16(v, mul); // Do it again for v
// Write output
_mm_store_si128((__m128i*)(out ), u);
_mm_store_si128((__m128i*)(out + 16), v);
in += 16;
out += 32;
} while (in != past);
}
void ExpandSSE4Unroll(unsigned char const *in, unsigned char const *past, unsigned char *out) {
__m128i const mask0 = _mm_set1_epi16((short)0x8000),
mask1 = _mm_set1_epi8(0x0F),
mul = _mm_set1_epi16(0x0011);
__m128i u0, v0, w0, x0,
u1, v1, w1, x1,
u2, v2, w2, x2,
u3, v3, w3, x3;
do {
// Read input into low 8 bytes of u and v
u0 = _mm_load_si128((__m128i const*)(in ));
u1 = _mm_load_si128((__m128i const*)(in + 16));
u2 = _mm_load_si128((__m128i const*)(in + 32));
u3 = _mm_load_si128((__m128i const*)(in + 48));
v0 = _mm_unpackhi_epi8(u0, u0); // Expand each single byte to two bytes
u0 = _mm_unpacklo_epi8(u0, u0); // Do it again for v
v1 = _mm_unpackhi_epi8(u1, u1); // Do it again
u1 = _mm_unpacklo_epi8(u1, u1); // Again for u1
v2 = _mm_unpackhi_epi8(u2, u2); // Again for v1
u2 = _mm_unpacklo_epi8(u2, u2); // Again for u2
v3 = _mm_unpackhi_epi8(u3, u3); // Again for v2
u3 = _mm_unpacklo_epi8(u3, u3); // Again for u3
w0 = _mm_srli_epi16(u0, 4); // Copy the value into w and shift it right half a byte
x0 = _mm_srli_epi16(v0, 4); // Do it again for v
w1 = _mm_srli_epi16(u1, 4); // Again for u1
x1 = _mm_srli_epi16(v1, 4); // Again for v1
w2 = _mm_srli_epi16(u2, 4); // Again for u2
x2 = _mm_srli_epi16(v2, 4); // Again for v2
w3 = _mm_srli_epi16(u3, 4); // Again for u3
x3 = _mm_srli_epi16(v3, 4); // Again for v3
u0 = _mm_blendv_epi8(u0, w0, mask0); // Select even bytes from w, and odd bytes from v, giving the the desired value in the upper nibble of each byte
v0 = _mm_blendv_epi8(v0, x0, mask0); // Do it again for v
u1 = _mm_blendv_epi8(u1, w1, mask0); // Again for u1
v1 = _mm_blendv_epi8(v1, x1, mask0); // Again for v1
u2 = _mm_blendv_epi8(u2, w2, mask0); // Again for u2
v2 = _mm_blendv_epi8(v2, x2, mask0); // Again for v2
u3 = _mm_blendv_epi8(u3, w3, mask0); // Again for u3
v3 = _mm_blendv_epi8(v3, x3, mask0); // Again for v3
u0 = _mm_and_si128(u0, mask1); // Clear the all the upper nibbles
v0 = _mm_and_si128(v0, mask1); // Do it again for v
u1 = _mm_and_si128(u1, mask1); // Again for u1
v1 = _mm_and_si128(v1, mask1); // Again for v1
u2 = _mm_and_si128(u2, mask1); // Again for u2
v2 = _mm_and_si128(v2, mask1); // Again for v2
u3 = _mm_and_si128(u3, mask1); // Again for u3
v3 = _mm_and_si128(v3, mask1); // Again for v3
u0 = _mm_mullo_epi16(u0, mul); // Multiply each 16-bit value by 0x0011 to duplicate the lower nibble in the upper nibble of each byte
v0 = _mm_mullo_epi16(v0, mul); // Do it again for v
u1 = _mm_mullo_epi16(u1, mul); // Again for u1
v1 = _mm_mullo_epi16(v1, mul); // Again for v1
u2 = _mm_mullo_epi16(u2, mul); // Again for u2
v2 = _mm_mullo_epi16(v2, mul); // Again for v2
u3 = _mm_mullo_epi16(u3, mul); // Again for u3
v3 = _mm_mullo_epi16(v3, mul); // Again for v3
// Write output
_mm_store_si128((__m128i*)(out ), u0);
_mm_store_si128((__m128i*)(out + 16), v0);
_mm_store_si128((__m128i*)(out + 32), u1);
_mm_store_si128((__m128i*)(out + 48), v1);
_mm_store_si128((__m128i*)(out + 64), u2);
_mm_store_si128((__m128i*)(out + 80), v2);
_mm_store_si128((__m128i*)(out + 96), u3);
_mm_store_si128((__m128i*)(out + 112), v3);
in += 64;
out += 128;
} while (in != past);
}
void ExpandSSE2(unsigned char const *in, unsigned char const *past, unsigned char *out) {
__m128i const mask = _mm_set1_epi16((short)0xF00F),
mul0 = _mm_set1_epi16(0x0011),
mul1 = _mm_set1_epi16(0x1000);
__m128i u, v;
do {
// Read input into low 8 bytes of u and v
u = _mm_load_si128((__m128i const*)in);
v = _mm_unpackhi_epi8(u, u); // Expand each single byte to two bytes
u = _mm_unpacklo_epi8(u, u); // Do it again for v
u = _mm_and_si128(u, mask);
v = _mm_and_si128(v, mask);
u = _mm_mullo_epi16(u, mul0);
v = _mm_mullo_epi16(v, mul0);
u = _mm_mulhi_epu16(u, mul1); // This can also be done with a right shift of 4 bits, but this seems to mesure faster
v = _mm_mulhi_epu16(v, mul1);
u = _mm_mullo_epi16(u, mul0);
v = _mm_mullo_epi16(v, mul0);
// write output
_mm_store_si128((__m128i*)(out ), u);
_mm_store_si128((__m128i*)(out + 16), v);
in += 16;
out += 32;
} while (in != past);
}
void ExpandSSE2Unroll(unsigned char const *in, unsigned char const *past, unsigned char *out) {
__m128i const mask = _mm_set1_epi16((short)0xF00F),
mul0 = _mm_set1_epi16(0x0011),
mul1 = _mm_set1_epi16(0x1000);
__m128i u0, v0,
u1, v1;
do {
// Read input into low 8 bytes of u and v
u0 = _mm_load_si128((__m128i const*)(in ));
u1 = _mm_load_si128((__m128i const*)(in + 16));
v0 = _mm_unpackhi_epi8(u0, u0); // Expand each single byte to two bytes
u0 = _mm_unpacklo_epi8(u0, u0); // Do it again for v
v1 = _mm_unpackhi_epi8(u1, u1); // Do it again
u1 = _mm_unpacklo_epi8(u1, u1); // Again for u1
u0 = _mm_and_si128(u0, mask);
v0 = _mm_and_si128(v0, mask);
u1 = _mm_and_si128(u1, mask);
v1 = _mm_and_si128(v1, mask);
u0 = _mm_mullo_epi16(u0, mul0);
v0 = _mm_mullo_epi16(v0, mul0);
u1 = _mm_mullo_epi16(u1, mul0);
v1 = _mm_mullo_epi16(v1, mul0);
u0 = _mm_mulhi_epu16(u0, mul1);
v0 = _mm_mulhi_epu16(v0, mul1);
u1 = _mm_mulhi_epu16(u1, mul1);
v1 = _mm_mulhi_epu16(v1, mul1);
u0 = _mm_mullo_epi16(u0, mul0);
v0 = _mm_mullo_epi16(v0, mul0);
u1 = _mm_mullo_epi16(u1, mul0);
v1 = _mm_mullo_epi16(v1, mul0);
// write output
_mm_store_si128((__m128i*)(out ), u0);
_mm_store_si128((__m128i*)(out + 16), v0);
_mm_store_si128((__m128i*)(out + 32), u1);
_mm_store_si128((__m128i*)(out + 48), v1);
in += 32;
out += 64;
} while (in != past);
}
void ExpandAShellySSE4(unsigned char const *in, unsigned char const *past, unsigned char *out) {
__m128i const zero = _mm_setzero_si128(),
v0F0F = _mm_set1_epi32(0x0F0F),
vF0F0 = _mm_set1_epi32(0xF0F0),
v0101 = _mm_set1_epi32(0x0101),
v1010 = _mm_set1_epi32(0x1010),
v000F000F = _mm_set1_epi32(0x000F000F),
v0F000F00 = _mm_set1_epi32(0x0F000F00),
v0011 = _mm_set1_epi32(0x0011);
__m128i u, v, w, x;
do {
// Read in data
u = _mm_load_si128((__m128i const*)in);
v = _mm_unpackhi_epi16(u, zero);
u = _mm_unpacklo_epi16(u, zero);
// original source: ((((a & 0xF0F) * 0x101) & 0xF000F) + (((a & 0xF0F0) * 0x1010) & 0xF000F00)) * 0x11;
w = _mm_and_si128(u, v0F0F);
x = _mm_and_si128(v, v0F0F);
u = _mm_and_si128(u, vF0F0);
v = _mm_and_si128(v, vF0F0);
w = _mm_mullo_epi32(w, v0101); // _mm_mullo_epi32 is what makes this require SSE4 instead of SSE2
x = _mm_mullo_epi32(x, v0101);
u = _mm_mullo_epi32(u, v1010);
v = _mm_mullo_epi32(v, v1010);
w = _mm_and_si128(w, v000F000F);
x = _mm_and_si128(x, v000F000F);
u = _mm_and_si128(u, v0F000F00);
v = _mm_and_si128(v, v0F000F00);
u = _mm_add_epi32(u, w);
v = _mm_add_epi32(v, x);
u = _mm_mullo_epi32(u, v0011);
v = _mm_mullo_epi32(v, v0011);
// write output
_mm_store_si128((__m128i*)(out ), u);
_mm_store_si128((__m128i*)(out + 16), v);
in += 16;
out += 32;
} while (in != past);
}
int main() {
unsigned char *const indat = new unsigned char[DATA_SIZE_IN ],
*const outdat0 = new unsigned char[DATA_SIZE_OUT],
*const outdat1 = new unsigned char[DATA_SIZE_OUT],
* curout = outdat0,
* lastout = outdat1,
* place;
unsigned start,
stop;
place = indat + DATA_SIZE_IN - 1;
do {
*place = (unsigned char)rand();
} while (place-- != indat);
MakeLutLo();
MakeLutHi();
MakeLutLarge();
for (unsigned testcount = 0; testcount < 1000; ++testcount) {
// Solution posted by the asker
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandOrig(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandOrig:\t\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
// Dmitry's small lookup table solution
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandLookupSmall(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandSmallLUT:\t\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// Dmitry's small lookup table solution using only one lookup table
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandLookupSmallOneLUT(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandLookupSmallOneLUT:\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// Large lookup table solution
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandLookupLarge(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandLookupLarge:\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// AShelly's Interleave bits by Binary Magic Numbers solution
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandAShelly(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandAShelly:\t\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// AShelly's Interleave bits by Binary Magic Numbers solution optimizing out an addition
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandAShellyMulOp(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandAShellyMulOp:\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// My SSE4 solution
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandSSE4(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandSSE4:\t\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// My SSE4 solution unrolled
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandSSE4Unroll(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandSSE4Unroll:\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// My SSE2 solution
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandSSE2(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandSSE2:\t\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// My SSE2 solution unrolled
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandSSE2Unroll(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandSSE2Unroll:\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
// AShelly's Interleave bits by Binary Magic Numbers solution implemented using SSE2
start = clock();
for (unsigned rerun = 0; rerun < RERUN_COUNT; ++rerun)
ExpandAShellySSE4(indat, indat + DATA_SIZE_IN, curout);
stop = clock();
std::cout << "ExpandAShellySSE4:\t\t" << (((stop - start) / 1000) / 60) << ':' << (((stop - start) / 1000) % 60) << ":." << ((stop - start) % 1000) << std::endl;
std::swap(curout, lastout);
if (memcmp(outdat0, outdat1, DATA_SIZE_OUT))
std::cout << "INCORRECT OUTPUT" << std::endl;
}
delete[] indat;
delete[] outdat0;
delete[] outdat1;
return 0;
}
NOTE:
I had an SSE4 implementation here initially. I found a way to implement this using SSE2, which is better because it will run on more platforms. The SSE2 implementation is also faster. So, the solution presented at the top is now the SSE2 implementation and not the SSE4 one. The SSE4 implementation can still be seen in the performance tests or in the edit history.
I'm not sure what the most efficient way would be, but this is a little shorter:
#include <stdio.h>
int main()
{
unsigned x = 0x1234;
x = (x << 8) | x;
x = ((x & 0x00f000f0) << 4) | (x & 0x000f000f);
x = (x << 4) | x;
printf("0x1234 -> 0x%08x\n",x);
return 0;
}
If you need to do this repeatedly and very quickly, as suggested in your edit, you could consider generating a lookup table and using that instead. The following function dynamically allocates and initializes such a table:
unsigned *makeLookupTable(void)
{
unsigned *tbl = malloc(sizeof(unsigned) * 65536);
if (!tbl) return NULL;
int i;
for (i = 0; i < 65536; i++) {
unsigned x = i;
x |= (x << 8);
x = ((x & 0x00f000f0) << 4) | (x & 0x000f000f);
x |= (x << 4);
/* Uncomment next line to invert the high byte as mentioned in the edit. */
/* x = x ^ 0xff000000; */
tbl[i] = x;
}
return tbl;
}
After that each conversion is just something like:
result = lookuptable[input];
..or maybe:
result = lookuptable[input & 0xffff];
Or a smaller, more cache-friendly lookup table (or pair) could be used with one lookup each for the high and low bytes (as noted by #LưuVĩnhPhúc in the comments). In that case, table generation code might be:
unsigned *makeLookupTableLow(void)
{
unsigned *tbl = malloc(sizeof(unsigned) * 256);
if (!tbl) return NULL;
int i;
for (i = 0; i < 256; i++) {
unsigned x = i;
x = ((x & 0xf0) << 4) | (x & 0x0f);
x |= (x << 4);
tbl[i] = x;
}
return tbl;
}
...and an optional second table:
unsigned *makeLookupTableHigh(void)
{
unsigned *tbl = malloc(sizeof(unsigned) * 256);
if (!tbl) return NULL;
int i;
for (i = 0; i < 256; i++) {
unsigned x = i;
x = ((x & 0xf0) << 20) | ((x & 0x0f) << 16);
x |= (x << 4);
/* uncomment next line to invert high byte */
/* x = x ^ 0xff000000; */
tbl[i] = x;
}
return tbl;
}
...and to convert a value with two tables:
result = hightable[input >> 8] | lowtable[input & 0xff];
...or with one (just the low table above):
result = (lowtable[input >> 8] << 16) | lowtable[input & 0xff];
result ^= 0xff000000; /* to invert high byte */
If the upper part of the value (alpha?) doesn't change much, even the single large table might perform well since consecutive lookups would be closer together in the table.
I took the performance test code #Apriori posted, made some adjustments, and added tests for the other responses that he hadn't included originally... then compiled three versions of it with different settings. One is 64-bit code with SSE4.1 enabled, where the compiler can make use of SSE for optimizations... and then two 32-bit versions, one with SSE and one without. Although all three were run on the same fairly recent processor, the results show how the optimal solution can change depending on the processor features:
64b SSE4.1 32b SSE4.1 32b no SSE
-------------------------- ---------- ---------- ----------
ExpandOrig time: 3.502 s 3.501 s 6.260 s
ExpandLookupSmall time: 3.530 s 3.997 s 3.996 s
ExpandLookupLarge time: 3.434 s 3.419 s 3.427 s
ExpandIsalamon time: 3.654 s 3.673 s 8.870 s
ExpandIsalamonOpt time: 3.784 s 3.720 s 8.719 s
ExpandChronoKitsune time: 3.658 s 3.463 s 6.546 s
ExpandEvgenyKluev time: 6.790 s 7.697 s 13.383 s
ExpandIammilind time: 3.485 s 3.498 s 6.436 s
ExpandDmitri time: 3.457 s 3.477 s 5.461 s
ExpandNitish712 time: 3.574 s 3.800 s 6.789 s
ExpandAdamLiss time: 3.673 s 5.680 s 6.969 s
ExpandAShelly time: 3.524 s 4.295 s 5.867 s
ExpandAShellyMulOp time: 3.527 s 4.295 s 5.852 s
ExpandSSE4 time: 3.428 s
ExpandSSE4Unroll time: 3.333 s
ExpandSSE2 time: 3.392 s
ExpandSSE2Unroll time: 3.318 s
ExpandAShellySSE4 time: 3.392 s
The executables were compiled on 64-bit Linux with gcc 4.8.1, using -m64 -O3 -march=core2 -msse4.1, -m32 -O3 -march=core2 -msse4.1 and -m32 -O3 -march=core2 -mno-sse respectively. #Apriori's SSE tests were omitted for the 32-bit builds (crashed on 32-bit with SSE enabled, and obviously won't work with SSE disabled).
Among the adjustments made was to use actual image data instead of random values (photos of objects with transparent backgrounds), which greatly improved the performance of the large lookup table but made little difference for the others.
Essentially, the lookup tables win by a landslide when SSE is unnavailable (or unused)... and the manually coded SSE solutions win otherwise. However, it's also noteworthy that when the compiler could use SSE for optimizations, most of the bit manipulation solutions were almost as fast as the manually coded SSE -- still slower, but only marginally.
Here's another attempt, using eight operations:
b = (((c & 0x0F0F) * 0x0101) & 0x00F000F) +
(((c & 0xF0F0) * 0x1010) & 0xF000F00);
b += b * 0x10;
printf("%x\n",b); //Shows '0x11223344'
*Note, this post originally contained quite different code, based on Interleave bits by Binary Magic Numbers from Sean Anderson's bithacks page. But that wasn't quite what the OP was asking. so it has ben removed. The majority of the comments below refer to that missing version.
I wanted to add this link into the answer pool because I think it is extremely important when talking about optimization, to remember the hardware we are running on, as well as the technologies compiling our code for said platform.
Blog post Playing with the CPU pipeline is about looking into optimizing a set of code for the CPU pipelining. It actually shows an example of where he tries to simplify the math down to the fewest actual mathematical operations, yet it was FAR from the most optimal solution in terms of time. I have seen a couple of answers here speaking to that effect, and they may be correct, they may not. The only way to know is to actually measure the time from start to finish of your particular snippet of code, in comparison to others. Read this blog; it is EXTREMELY interesting.
I think I should mention that I am in this particular case not going to put ANY code up here unless I have truly tried multiple attempts, and actually gotten on that is particularly faster through multiple tries.
I think that the lookup table approach suggested by Dimitri is a good choice, but I suggest to go one step further and generate the table in compile time; doing the work at compile time will obviously lessen the execution time.
First, we create a compile-time value, using any of the suggested methods:
constexpr unsigned int transform1(unsigned int x)
{
return ((x << 8) | x);
}
constexpr unsigned int transform2(unsigned int x)
{
return (((x & 0x00f000f0) << 4) | (x & 0x000f000f));
}
constexpr unsigned int transform3(unsigned int x)
{
return ((x << 4) | x);
}
constexpr unsigned int transform(unsigned int x)
{
return transform3(transform2(transform1(x)));
}
// Dimitri version, using constexprs
template <unsigned int argb> struct aarrggbb_dimitri
{
static const unsigned int value = transform(argb);
};
// Adam Liss version
template <unsigned int argb> struct aarrggbb_adamLiss
{
static const unsigned int value =
(argb & 0xf000) * 0x11000 +
(argb & 0x0f00) * 0x01100 +
(argb & 0x00f0) * 0x00110 +
(argb & 0x000f) * 0x00011;
};
And then, we create the compile-time lookup table with whatever method we have available, I'll wish to use the C++14 integer sequence but I don't know which compiler will the OP be using. So another possible approach would be to use a pretty ugly macro:
#define EXPAND16(x) aarrggbb<x + 0>::value, \
aarrggbb<x + 1>::value, \
aarrggbb<x + 2>::value, \
aarrggbb<x + 3>::value, \
aarrggbb<x + 4>::value, \
aarrggbb<x + 5>::value, \
aarrggbb<x + 6>::value, \
... and so on
#define EXPAND EXPAND16(0), \
EXPAND16(0x10), \
EXPAND16(0x20), \
EXPAND16(0x30), \
EXPAND16(0x40), \
... and so on
... and so on
See demo here.
PS: The Adam Liss approach could be used without C++11.
If multiplication is cheap and 64-bit arithmetics is available, you could use this code:
uint64_t x = 0x1234;
x *= 0x0001000100010001ull;
x &= 0xF0000F0000F0000Full;
x *= 0x0000001001001001ull;
x &= 0xF0F0F0F000000000ull;
x = (x >> 36) * 0x11;
std::cout << std::hex << x << '\n';
In fact, it uses the same idea as the original attempt by AShelly.
This works and may be easier to understand, but bit manipulations are so cheap that I wouldn't worry much about efficiency.
#include <stdio.h>
#include <stdlib.h>
void main() {
unsigned int c = 0x1234, b;
b = (c & 0xf000) * 0x11000 + (c & 0x0f00) * 0x01100 +
(c & 0x00f0) * 0x00110 + (c & 0x000f) * 0x00011;
printf("%x -> %x\n", c, b);
}
Assuming that, you want to always convert 0xWXYZ to 0xWWXXYYZZ, I believe that below solution would be little faster than the one you suggested:
unsigned int c = 0x1234;
unsigned int b = (c & 0xf) | ((c & 0xf0) << 4) |
((c & 0xf00) << 8) | ((c & 0xf000) << 12);
b |= (b << 4);
Notice that, one &(and) operation is saved from your solution. :-)
Demo.
Another way is:
DWORD OrVal(DWORD & nible_pos, DWORD input_val, DWORD temp_val, int shift)
{
if (nible_pos==0)
nible_pos = 0x0000000F;
else
nible_pos = nible_pos << 4;
DWORD nible = input_val & nible_pos;
temp_val |= (nible << shift);
temp_val |= (nible << (shift + 4));
return temp_val;
}
DWORD Converter2(DWORD input_val)
{
DWORD nible_pos = 0x00000000;
DWORD temp_val = 0x00000000;
temp_val = OrVal(nible_pos, input_val, temp_val, 0);
temp_val = OrVal(nible_pos, input_val, temp_val, 4);
temp_val = OrVal(nible_pos, input_val, temp_val, 8);
temp_val = OrVal(nible_pos, input_val, temp_val, 12);
return temp_val;
}
DWORD val2 = Converter2(0x1234);
An optimized version (3 times faster):
DWORD Converter3(DWORD input_val)
{
DWORD nible_pos = 0;
DWORD temp_val = 0;
int shift = 0;
DWORD bit_nible[4] = { 0x000F, 0x000F0, 0x0F00, 0xF000 };
for ( ; shift < 16; shift+=4 )
{
if (nible_pos==0)
nible_pos = 0x0000000F;
else
nible_pos = nible_pos << 4;
DWORD nible = input_val & nible_pos;
temp_val |= (nible << shift);
temp_val |= (nible << (shift + 4));
}
return temp_val;
}
Perhaps this could be more simpler & efficient.
unsigned int g = 0x1234;
unsigned int ans = 0;
ans = ( ( g & 0xf000 ) << 16) + ( (g & 0xf00 ) << 12)
+ ( ( g&0xf0 ) << 8) + ( ( g&0xf ) << 4);
ans = ( ans | ans>>4 );
printf("%p -> %p\n", g, ans);
unsigned long transform(unsigned long n)
{
/* n: 00AR
* 00GB
*/
n = ((n & 0xff00) << 8) | (n & 0x00ff);
/* n: 0AR0
* 0GB0
*/
n <<= 4;
/* n: AAR0
* GGB0
*/
n |= (n & 0x0f000f00L) << 4;
/* n: AARR
* GGBB
*/
n |= (n & 0x00f000f0L) >> 4;
return n;
}
The alpha and red components are shifted into the higher 2 bytes where they belong, and the result is then shifted left by 4 bits, resulting in every component being exactly where it needs to be.
With a form of 0AR0 0GB0, a bit mask and left-shift combination is OR'ed with the current value. This copies the A and G components to the position just left of them. The same thing is done for the R and B components, except in the opposite direction.
If you are going to do this for OpenGL, I suggest you to use a glTexImageXD function with type parameter set to GL_UNSIGNED_SHORT_4_4_4_4. Your OpenGL driver should do the rest. And about the transparency inversion you can always manipulate blending via the glBlendFunc and glBlendEquation functions.
While others operate on hard-core optimization...
Take this as your best bet:
std::string toAARRGGBB(const std::string &argb)
{
std::string ret("0x");
int start = 2; //"0x####";
// ^^ skipped
for (int i = start;i < argb.length(); ++i)
{
ret += argb[i];
ret += argb[i];
}
return ret;
}
int main()
{
std::string argb = toAARRGGBB("0xACED"); //!!!
}
Haha

A memory-efficient SHA1 implementation

I'm working with a very restrictive embedded processor, which only has 128 bytes of ram. I'd like to implement SHA1 on it. RFC3174 describes, in 'method 2', a way of implementing SHA1 that doesn't require allocating an array of 80 32-bit words (which, at 320 bytes, is obviously not practical), and seems like it ought to be usable on my processor. I'm unable to find any implementations of 'method 2', though, and the sample code in the RFC only implements the default method.
Is anyone aware of a memory-efficient implementation of SHA1 in C or C++?
You should be able to quickly adapt the method 1 source to method 2. The function to change is Sha1ProcessMessageBlock() in method 1. Initialize w[0:15] from message, then do a loop of 0 to 79, where you only do w[] manipulation after iteration 16, and temp calculation depends on ts value (0-19 uses one, 20-39 uses another, etc). The important thing to remember is using index%16 or index & 0x0f whenever you are addressing the w[] array.
A quick modification would be something like this (double check all accesses to w to make sure I haven't missed the t & 0x0f):
void SHA1ProcessMessageBlock(SHA1Context *context)
{
const uint32_t K[] = { /* Constants defined in SHA-1 */
0x5A827999,
0x6ED9EBA1,
0x8F1BBCDC,
0xCA62C1D6
};
int t; /* Loop counter */
uint32_t temp; /* Temporary word value */
uint32_t W[16]; /* Word sequence */
uint32_t A, B, C, D, E; /* Word buffers */
/*
* Initialize the first 16 words in the array W. You can move this to your
* context.
*/
for(t = 0; t < 16; t++)
{
W[t] = context->Message_Block[t * 4] << 24;
W[t] |= context->Message_Block[t * 4 + 1] << 16;
W[t] |= context->Message_Block[t * 4 + 2] << 8;
W[t] |= context->Message_Block[t * 4 + 3];
}
A = context->Intermediate_Hash[0];
B = context->Intermediate_Hash[1];
C = context->Intermediate_Hash[2];
D = context->Intermediate_Hash[3];
E = context->Intermediate_Hash[4];
for(t = 0; t < 80; t++) {
if (t >= 16) {
W[t&0xf] = SHA1CircularShift(1,W[(t-3)&0xf] ^ W[(t-8)&0xf] ^ W[(t-14)&0xf] ^ W[t&0xf]);
}
if (t<20) {
temp = SHA1CircularShift(5,A) +
((B & C) | ((~B) & D)) + E + W[t&0xf] + K[0];
}
else if (t<40) {
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t&0xf] + K[1];
}
else if (t < 60) {
temp = SHA1CircularShift(5,A) +
((B & C) | (B & D) | (C & D)) + E + W[t&0xf] + K[2];
}
else {
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t&0xf] + K[3];
}
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
context->Intermediate_Hash[0] += A;
context->Intermediate_Hash[1] += B;
context->Intermediate_Hash[2] += C;
context->Intermediate_Hash[3] += D;
context->Intermediate_Hash[4] += E;
context->Message_Block_Index = 0;
}
There are still savings to be made: get rid of W[] array on stack and put it in context pre-initialized with the data you get.
Also, you need a lot of pre-processing before calling this function. For example, if all your messages are less than 55 bytes, you can put it in W array, add padding, and process immediately. If not, you'll have to call process twice: first with your partially padded input, and again with the rest of the pad, etc. That sort of thing would be very application specific, and I doubt you'll be able to find the code to do it for you.
By the way, the code above is a straight adaptation from the type 1 source from your link. You can probably squeeze a bit more out of it if you try to optimize it further.
I couldn't think of a way to get any savings on the intermediate hash, so you will need a total of 108 bytes for this (109 if counter is also in RAM), and 24 of which is local to this function, and can be reused in other places - so long as they are also temporary. So it is very hard to do what you want to do.
EDIT: If all your messages are less than 55 bytes, you can save another 20 bytes in your context by getting rid of the intermediate_hash[] storage. Simply initialize A-E from the constants, and add the constants at the end. Finally, instead of storing them in a separate variable, overwrite your input when this function ends.
I have implemented SHA-1 for several memory-constrained environments. You can get by with
DWORD W[16] ; // instead of H[80]
DWORD H[5] ; // Intermediate hash value
DWORD BitCount[2] ; // Probably a single DWORD is enough here
plus a few bytes of housekeeping. W is updated on the fly, as a circular buffer, instead of being generated at the start of each round.
working example:
#include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include<string>
using namespace std;
unsigned CircularShift(int bits, unsigned word)
{
return ((word << bits) & 0xFFFFFFFF) | ((word & 0xFFFFFFFF) >> (32-bits));
}
int main(void)
{
string mess;
cin >> mess;
unsigned int lm = mess.length();
unsigned int lmb = lm*8;
unsigned char *messc;
messc=(unsigned char*)malloc((sizeof(unsigned char))*64);
for (unsigned short int i =0;i<64;i++)
{
messc[i]=char(0x00);
}
for(int i=0;i<mess.length();i++)
{
messc[i]=mess[i];
}
messc[lm]=(unsigned char)128;
messc[56] = (lmb >> 24) & 0xFF;
messc[57] = (lmb >> 16) & 0xFF;
messc[58] = (lmb >> 8) & 0xFF;
// messc[59] = (lmb) & 0xFF;
messc[60] = (lmb >> 24) & 0xFF;
messc[61] = (lmb >> 16) & 0xFF;
messc[62] = (lmb >> 8) & 0xFF;
messc[63] = (lmb) & 0xFF;
for(int i =0 ;i<64;i++)
{
cout<< hex << (int)messc[i] << " ";
}
unsigned *H;
H=(unsigned*)malloc(5*sizeof(unsigned));
H[0] = 0x67452301;
H[1] = 0xEFCDAB89;
H[2] = 0x98BADCFE;
H[3] = 0x10325476;
H[4] = 0xC3D2E1F0;
const unsigned K[]={0x5A827999,0x6ED9EBA1,0x8F1BBCDC,0xCA62C1D6};
int t;
unsigned temp;
unsigned *W;
unsigned A, B, C, D, E;
W=(unsigned*)malloc(80*sizeof(unsigned));
unsigned char *messh;
messh=(unsigned char*)malloc(64*sizeof(unsigned char));
int k;
for(t = 0; t < 16; t++)
{
W[t] = ((unsigned) messc[t * 4])<< 24; ;
W[t] |= ((unsigned) messc[t * 4 + 1])<< 16;
W[t] |= ((unsigned) messc[t * 4 + 2]) << 8;
W[t] |= ((unsigned) messc[t * 4 + 3]);
}
for(t = 16; t < 80; t++)
{
W[t] = CircularShift(1,W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]);
}
A = H[0];
B = H[1];
C = H[2];
D = H[3];
E = H[4];
for(t = 0; t < 20; t++)
{
temp = CircularShift(5,A) + ((B & C) | ((~B) & D)) + E + W[t] + K[0];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = CircularShift(30,B);
B = A;
A = temp;
}
for(t = 20; t < 40; t++)
{
temp = CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[1];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = CircularShift(30,B);
B = A;
A = temp;
}
for(t = 40; t < 60; t++)
{
temp = CircularShift(5,A) +
((B & C) | (B & D) | (C & D)) + E + W[t] + K[2];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = CircularShift(30,B);
B = A;
A = temp;
}
for(t = 60; t < 80; t++)
{
temp = CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[3];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = CircularShift(30,B);
B = A;
A = temp;
}
H[0] = (H[0] + A) & 0xFFFFFFFF;
H[1] = (H[1] + B) & 0xFFFFFFFF;
H[2] = (H[2] + C) & 0xFFFFFFFF;
H[3] = (H[3] + D) & 0xFFFFFFFF;
H[4] = (H[4] + E) & 0xFFFFFFFF;
cout <<"\nTHIS IS SHHHHHAAAAAAAAAAA\n";
for(int i=0;i<5;i++)
{
cout << hex << H[i] << " ";
}
//Message_Block_Index = 0;
}
All things considered, looking at your requirements, I think you are going to have to change your specs. Either a bigger chip, or a simpler algorithm. Even implementing SHA-1 (without HMAC) would be a challenge, but it should be doable.