Performant Way to create checkerboard pattern - c++

So I have an image that I want to overlay with a checkerboard pattern.
This is what I have come up with so far:
for ( uint_8 nRow = 0; nRow < image.width(); ++nRow)
for (uint_8 nCol = 0; nCol < image.height(); ++nCol)
if(((nRow/20 + nCol/20) % 2) == 0)
memset(&image.data[nCol + nRow], 0, 1);
Produces a white image unfortunately. I dont think this is very performant because memset is called for every single pixel in the image instead of multiple.
Why does this code not produce a chckerboard pattern? How would you improve it?

For better performance, don't treat the image as a 2-dimensional entity. Instead, look at it as a 1D array of continuous data, where all lines of the image are arranged one after the other.
With this approach, you can write the pattern in one go with a single loop, where in every iteration you memset() multiple adjacent pixels and increase the index by twice the amount of pixels you set:
int data_size = image.width() * image.height();
for (auto it = image.data; it < image.data + data_size; it += 20) {
memset(it, 0, 20);
if (((it - data) + 40) % (20 * 400) == 0) {
it += 40;
} else if (((it - data) + 20) % (20 * 400) != 0) {
it += 20;
}
}
(Replace auto with the type of image.data if you're not using C++11; I suspect it's unsigned char*.)
This is quite friendly for the CPU cache prefetch. It's also friendly for the compiler, which can potentially vectorize and/or perform loop unrolling.

If you have an image's dimensions which are multiple of the checker square size :
(I coded in C but it is fairly easy to transpose to C++)
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define uint unsigned int
#define WIDTH 40
#define HEIGHT 40
#define BLOCK_SIZE 5
void create_checker_row(uint* row, uint size_block, uint nb_col, uint offset )
{
uint ic;
for (ic = size_block*offset ; ic < nb_col; ic+= 2*size_block )
{
memset( (row + ic) , 0, size_block*sizeof(uint) );
}
}
int main()
{
uint ir,ic;
// image creation
uint* pixels = (uint*) malloc(WIDTH*HEIGHT*sizeof(uint));
for (ir = 0; ir < WIDTH; ir++)
{
for ( ic = 0; ic < HEIGHT; ic++)
{
// arbitrary numbers
pixels[ir*WIDTH + ic] = (ir*WIDTH + ic) % 57 ;
printf("%d,", pixels[ir*WIDTH + ic] );
}
printf("\n");
}
for (ir = 0; ir < WIDTH; ir++)
{
create_checker_row( pixels + ir*WIDTH , // pointer at the beggining of n-th row
BLOCK_SIZE , // horizontal length for square
WIDTH , // image width
(ir/BLOCK_SIZE) % 2 // offset to create the checker pattern
);
}
// validation
printf("\n");
printf("Validation \n");
printf("\n");
for (ir = 0; ir < WIDTH; ir++)
{
for ( ic = 0; ic < HEIGHT; ic++)
{
printf("%d,", pixels[ir*WIDTH + ic] );
}
printf("\n");
}
return 0;
}
Seems pretty checkered for me : http://ideone.com/gp9so6

I use this and stb_image_write.h
#include <stdlib.h>
#include <stb_image_write.h>
int main(int argc, char *argv[])
{
const int w = 256, h = 256, ch = 4, segments = 8, box_sz = w / segments;
unsigned char rgba_fg[4] = {255, 255, 0, 255}; //yellow
unsigned char rgba_bg[4] = {255, 0, 0, 255}; //red
unsigned char* data = calloc(w * h * ch, sizeof(unsigned char));
int swap = 0;
int fill = 0; /* set to 1 to fill fg first*/
unsigned char* col = NULL;
for(int i = 0; i < w * h; i++)
{
if(i % (w * box_sz) == 0 && i != 0)
swap = !swap;
if(i % box_sz == 0 && i != 0)
fill = !fill;
if(fill)
{
if(swap)
col = rgba_bg;
else
col = rgba_fg;
}else
{
if(swap)
col = rgba_fg;
else
col = rgba_bg;
}
for(int j = 0; j < ch; j++)
{
data[i*ch + j] = col[j];
}
}
stbi_write_png("checker.png", w, h, ch, data, 0);
free(data);
return 0;
}
Its a bit slow with large images but gets the job done if you cache them

Related

how do I fill the gaps between pixel in bmp file after loading the pixel array into the array with padded rows

I wrote a program in c++ to draw the pixel of bmp image into the console using SetPixel windows function, but after loading the pixel array into the array the image got printed on the console with gaps between the pixels. Thanks in advance for your help!
This is the output of the printed image on the console.
This is the original Image I provided to it.
As you can see here the image width also changes after the print on the console.
// bmp bitmap
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
using namespace std;
#pragma pack(1)
struct BitmapFileHeader {
unsigned short type;
unsigned int size;
unsigned short reserved1;
unsigned short reserved2;
unsigned int offset;
};
#pragma pack(0)
unsigned char grayScale(unsigned char r, unsigned char g, unsigned char b) {
return ((r + g + b) / 3);
}
int main() {
char *data;
FILE *filePointer;
int **ImageArray;
BitmapFileHeader *bmp = (struct BitmapFileHeader*)malloc(sizeof(struct BitmapFileHeader));
BITMAPINFOHEADER *BitmapInfoHeader = (BITMAPINFOHEADER*)malloc(sizeof(BITMAPINFOHEADER));
HWND console = GetConsoleWindow();
HDC context = ::GetDC(console) ;
filePointer = fopen("tom.bmp", "rb");
if(!filePointer) {
perror("");
}
fread(reinterpret_cast<BitmapFileHeader*>(bmp), sizeof(BitmapFileHeader), 1, filePointer);
fread(reinterpret_cast<BITMAPINFOHEADER*>(BitmapInfoHeader), sizeof(BITMAPINFOHEADER), 1, filePointer);
if(BitmapInfoHeader->biSize == 40 && BitmapInfoHeader->biCompression == BI_BITFIELDS) {
printf("This types of image uses Extra bit masks\n");
}
// row pading
int RowSize = ((BitmapInfoHeader->biBitCount * BitmapInfoHeader->biWidth + 31) / 32) * 4;
int PixelArraySize = RowSize * BitmapInfoHeader->biHeight;
int height = BitmapInfoHeader->biHeight * 5;
int width = BitmapInfoHeader->biWidth * 5;
printf("RowSize: %d PixelArraySize: %d\n", RowSize, PixelArraySize);
ImageArray = (int**)malloc(sizeof(int*)*height);
// memory allocation
for(int i = 0; i < height; i++)
ImageArray[i] = (int*)malloc(sizeof(int)*width);
data = (char*)malloc(PixelArraySize);
fseek(filePointer, bmp->offset, SEEK_SET);
// set image into array
for(int ii = 0; ii < height; ii+=3) {
fread(data, RowSize, 3, filePointer);
for(int jj = 0; jj < width; jj+=3) {
ImageArray[ii][jj] = grayScale(data[jj+2], data[jj+1], data[jj]);
SetPixel(context, -jj+1000, -ii+500, RGB(data[jj+2], data[jj+1], data[jj]));
}
}
fclose(filePointer);
return 0;
}
here is the code, which I wrote.
A pixel is described by three bytes, one for each RGB channel. You are dealing with two indices here: The index of the pixel in the row data and the position of the pixel in width direction. You place the pixel and access the row data with the same index.
So:
for (int jj = 0; jj < width; jj++) { // jj: position
int kk = 3 * jj; // kk: data index
ImageArray[ii][jj] = grayScale(data[kk + 2], data[kk + 1], data[kk]);
SetPixel(context, -jj + 1000, -ii + 500, RGB(data[kk + 2], data[kk + 1], data[kk]));
}
The vertical gaps, i.e. the blank lines, come from incrementing by 3, where you should just increment by 1. (You have no "data index" here, because you read your data row-wide for the current row ii.)
If you want to enlarge your image, as the multiplication of width and height by 5 suggests, you must add a third index: You now have two positions, the source and target positions. This will be easier if you separate your loops: Create ImageArray of the source image in a first nested loop, then draw your scaled target image to the console with a loop over the target oordinates:
int scale = 5;
int ww = scale * w;
int hh = scale * h;
// read ImageArray
for (int y = 0; y < h; y++) {
fread(data, RowSize, 3, filePointer);
for (int x = 0; x < w; x++) {
ImageArray[y][x] = ...;
SetPixel(context, -jj+1000, -ii+500, RGB(data[jj+2], data[jj+1], data[jj]));
}
}
for (int yy = 0; yy < hh; yy++) {
fread(data, RowSize, 3, filePointer);
for (int xx = 0; xx < ww; xx++) {
int x = xx / scale;
int y = yy / scale;
SetPixel(context, yy, xx, ImageArray[y][x]);
}
}
(Here, single letters re source values, double leters are target values.)

How to reorder raw image color data to achieve a specific 2 by 2 format from four images? (C++)

I have the raw color data for four images, let's call them 1, 2, 3, and 4. I am storing the data in an unsigned char * with allocated memory. Individually I can manipulate or encode the images but when trying to concatenate or order them into a single image it works but takes more time than I would like.
I would like to create a 2 by 2 of the raw image data to encode as a single image.
1 2
3 4
For my example each image is 400 by 225 with RGBA (360000 bytes). Iim doing a for loop with memcpy where
for (int j = 0; j < 225; j++)
{
std::memcpy(dest + (j * (400 + 400) * 4), src + (j * 400 * 4), 400 * 4); //
}
for each image with an offset for the starting position added in (the example above would only work for the top left of course).
This works but I'm wondering if this is a solved problem with a better solution, either in an algorithm described somewhere or a small library.
#include <iostream>
const int width = 6;
const int height = 4;
constexpr int n = width * height;
int main()
{
unsigned char a[n], b[n], c[n], d[n];
unsigned char dst[n * 4];
int i = 0, j = 0;
/* init data */
for (; i < n; i++) {
a[i] = 'a';
b[i] = 'b';
c[i] = 'c';
d[i] = 'd';
}
/* re-order */
i = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++, i++, j++) {
dst[i ] = a[j];
dst[i + width] = b[j];
dst[i + n * 2 ] = c[j];
dst[i + n * 2 + width] = d[j];
}
i += width;
}
/* print result */
i = 0;
for (int y = 0; y < height * 2; y++) {
for (int x = 0; x < width * 2; x++, i++)
std::cout << dst[i];
std::cout << '\n';
}
return 0;
}

Generate Image from generated byte array in UWP vc++

Reference with this Question & answer by #Decade Moon
How can i use that method for generate image from byte array instead of image file.
i tried like below but nothing works. no image are shown
std::vector<char> data= std::vector<char>(imgx->Height * imgx->Width * 4);
int offset;
for (int row = 0; row < imgx->Height; row++)
{
for (int col = 0; col < imgx->Width; col++)
{
offset = (row * (int)(imgx->Width * 4)) + (col * 4);
data[offset] = 0x58; // Red
data[offset + 1] = 0x58; // Green
data[offset + 2] = 0x58; // Blue
data[offset + 3] = 0x58; // Alpha
}
};
My approach is little bit different from the reply you reffered to, but it works pretty well.
#include <wrl.h>
#include <robuffer.h>
using namespace Windows::UI::Xaml::Media::Imaging;
using namespace Windows::Storage::Streams;
using namespace Microsoft::WRL;
typedef uint8 byte;
byte* GetPointerToPixelData(IBuffer^ pixelBuffer, unsigned int *length)
{
if (length != nullptr)
{
*length = pixelBuffer ->Length;
}
// Query the IBufferByteAccess interface.
ComPtr<IBufferByteAccess> bufferByteAccess;
reinterpret_cast<IInspectable*>(pixelBuffer)->QueryInterface(IID_PPV_ARGS(&bufferByteAccess));
// Retrieve the buffer data.
byte* pixels = nullptr;
bufferByteAccess->Buffer(&pixels);
return pixels;
}
MainPage::MainPage()
{
InitializeComponent();
auto bitmap = ref new WriteableBitmap(50, 50);
image->Source = bitmap;
unsigned int length;
byte* sourcePixels = GetPointerToPixelData(bitmap->PixelBuffer, &length);
const unsigned int width = bitmap->PixelWidth;
const unsigned int height = bitmap->PixelHeight;
create_async([this, width, height, sourcePixels] {
byte* temp = sourcePixels;
// generate RED - BLUE gradient
for(unsigned int k = 0; k < height; k++) {
for (unsigned int i = 0; i < (width * 4); i += 4) {
int pos = k * (width * 4) + (i);
temp[pos] = (byte)(0xFF * k / (float)height); // B
temp[pos + 1] = 0x0; // G
temp[pos + 2] = 0xFF - (byte)(0xFF * k / (float)height); // R
temp[pos + 3] = 0xFF; // A
}
}
});
}

How do I create a dynamic array of arrays (of arrays)?

I'm trying to create a dynamic array of arrays (of arrays). But for some reason the data gets corrupted. I'm using the data to generate a texture in a OpenGL application.
The following code works fine:
unsigned char imageData[64][64][3];
for (int i = 0; i < 64; i++)
{
for (int j = 0; j < 64; j++)
{
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
Problem is, I want to be able to create a texture of any size (not just 64*64). So I'm trying this:
unsigned char*** imageData = new unsigned char**[64]();
for (int i = 0; i < 64; i++)
{
imageData[i] = new unsigned char*[64]();
for (int j = 0; j < 64; j++)
{
imageData[i][j] = new unsigned char[3]();
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
But that doesn't work, the image gets all messed up so I assume I'm creating the array of arrays (of arrays) incorrectly? What am I doing wrong?
Also, I guess I should be using vectors instead. But how can I cast the vector of vectors of vectors data into a (void *) ?
This line contains multiple bugs:
unsigned char* pixel = &(imageData[(y * height) + x]);
You should multiply x by height and add y. And there's also the fact that each pixel is actually 3 bytes. Some issues that led to this bug in your code (and will lead to to others)
You should also be using std::vector. You can call std::vector::data to get a pointer to the underlying data to interface to C API's.
You should have a class that represents a pixel. This will handle the offsetting correctly and give things names and made the code clearer.
Whenever you are working with a multi dimensional array that you encode into a single dimensional one, you should try to carefully write an access function that takes care of indexing so you can test it separately.
(end bulleted list... oh SO).
struct Pixel {
unsigned char red;
unsigned char blue;
unsigned char green;
};
struct TwoDimPixelArray {
TwoDimArray(int width, int height)
: m_width(width), m_height(height)
{
m_vector.resize(m_width * m_height);
}
Pixel& get(int x, int y) {
return m_vector[x*height + y];
}
Pixel* data() { return m_vector.data(); }
private:
int m_width;
int m_height;
std::vector<Pixel> m_vector;
}
int width = 64;
int height = 64;
TwoDimPixelArray imageData(width, height);
for (int x = 0; x != width ; ++ x) {
for (int y = 0; y != height ; ++y) {
auto& pixel = imageData.get(x, y);
// ... pixel.red = something, pixel.blue = something, etc
}
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData.data());
You need to use continuous memory for it to work with opengl.
My solution is inspired by previous answers, with a different indexing system
unsigned char* imageData = new unsigned char[width*height*3];
unsigned char r, g, b;
const unsigned int row_size_bytes = width * 3;
for( unsigned int x = 0; x < width; x++ ) {
unsigned int current_row_offset_bytes = x * 3;
for( unsigned int y = 0; y < height; y++ ) {
unsigned int one_dim_offset = y * row_size_bytes + current_row_offset_bytes
unsigned char* pixel = &(imageData[one_dim_offset]);
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
}
Unfortunnately it's untested, but i'm confident assuming sizeof(char) is 1.

Generate a plane with triangle strips

What would be the best algorithm to generate a list of vertices to draw a plane using triangle strips?
I'm looking for a function which receives the plane's width and height and returns a float array containing correctly indexed vertices.
width represents the number of vertices per row.
height represents the number of vertices per column.
float* getVertices( int width, int height ) {
...
}
void render() {
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, getVertices(width,heigth));
glDrawArrays(GL_TRIANGLE_STRIP, 0, width*height);
glDisableClientState(GL_VERTEX_ARRAY);
}
Thanks you all. I've coded this. Is it correct? Or is the generated strip somehow wrong?
int width;
int height;
float* vertices = 0;
int* indices = 0;
int getVerticesCount( int width, int height ) {
return width * height * 3;
}
int getIndicesCount( int width, int height ) {
return (width*height) + (width-1)*(height-2);
}
float* getVertices( int width, int height ) {
if ( vertices ) return vertices;
vertices = new float[ getVerticesCount( width, height ) ];
int i = 0;
for ( int row=0; row<height; row++ ) {
for ( int col=0; col<width; col++ ) {
vertices[i++] = (float) col;
vertices[i++] = 0.0f;
vertices[i++] = (float) row;
}
}
return vertices;
}
int* getIndices( int width, int height ) {
if ( indices ) return indices;
indices = new int[ iSize ];
int i = 0;
for ( int row=0; row<height-1; row++ ) {
if ( (row&1)==0 ) { // even rows
for ( int col=0; col<width; col++ ) {
indices[i++] = col + row * width;
indices[i++] = col + (row+1) * width;
}
} else { // odd rows
for ( int col=width-1; col>0; col-- ) {
indices[i++] = col + (row+1) * width;
indices[i++] = col - 1 + + row * width;
}
}
}
if ( (mHeight&1) && mHeight>2 ) {
mpIndices[i++] = (mHeight-1) * mWidth;
}
return indices;
}
void render() {
glEnableClientState( GL_VERTEX_ARRAY );
glVertexPointer( 3, GL_FLOAT, 0, getVertices(width,height) );
glDrawElements( GL_TRIANGLE_STRIP, getIndicesCount(width,height), GL_UNSIGNED_INT, getIndices(width,height) );
glDisableClientState( GL_VERTEX_ARRAY );
}
With width=4 and height=4 this is what I got:
And here I'm modifying some vertex height:
Here is some code that does this (not tested, but you get the idea at least):
void make_plane(int rows, int columns, float *vertices, int *indices) {
// Set up vertices
for (int r = 0; r < rows; ++r) {
for (int c = 0; c < columns; ++c) {
int index = r*columns + c;
vertices[3*index + 0] = (float) c;
vertices[3*index + 1] = (float) r;
vertices[3*index + 2] = 0.0f;
}
}
// Set up indices
int i = 0;
for (int r = 0; r < rows - 1; ++r) {
indices[i++] = r * columns;
for (int c = 0; c < columns; ++c) {
indices[i++] = r * columns + c;
indices[i++] = (r + 1) * columns + c;
}
indices[i++] = (r + 1) * columns + (columns - 1);
}
}
The first loop sets up the vertex array in a standard rectangular grid. There are R*C vertices.
The second loop sets up the indices. In general, there are two vertices per square in the grid. Each vertex will cause a new triangle to be drawn (with the previous two vertices), so each square is drawn with two triangles.
The first and last vertex at the start and end of each row is duplicated. This means there are two triangles of zero area (degenerate triangles) between each row. This allows us to draw the entire grid in one big triangle strip. This technique is called stitching.
none of the code above gives a correct mesh generation. A very good article about how to make a strip of triangles on a simple plane: http://www.learnopengles.com/android-lesson-eight-an-introduction-to-index-buffer-objects-ibos/
Here is my test code that actually tested and fully working:
int plane_width = 4; // amount of columns
int plane_height = 2; // amount of rows
int total_vertices = (plane_width + 1) * (plane_height + 1);
planeVert = new CIwFVec2[total_vertices];
memset(planeVert, 0, sizeof(CIwFVec2) * total_vertices);
int numIndPerRow = plane_width * 2 + 2;
int numIndDegensReq = (plane_height - 1) * 2;
int total_indices = numIndPerRow * plane_height + numIndDegensReq;
planeInd = new uint16[total_indices];
make_plane(plane_width, plane_height, planeVert, planeInd);
...
void make_plane(int width, int height, CIwFVec2 *vertices, uint16 *indices)
{
width++;
height++;
int size = sizeof(CIwFVec2);
// Set up vertices
for(int y = 0; y < height; y++)
{
int base = y * width;
for(int x = 0; x < width; x++)
{
int index = base + x;
CIwFVec2 *v = vertices + index;
v->x = (float) x;
v->y = (float) y;
Debug::PrintDebug("%d: %f, %f", index, v->x, v->y);
}
}
Debug::PrintDebug("-------------------------");
// Set up indices
int i = 0;
height--;
for(int y = 0; y < height; y++)
{
int base = y * width;
//indices[i++] = (uint16)base;
for(int x = 0; x < width; x++)
{
indices[i++] = (uint16)(base + x);
indices[i++] = (uint16)(base + width + x);
}
// add a degenerate triangle (except in a last row)
if(y < height - 1)
{
indices[i++] = (uint16)((y + 1) * width + (width - 1));
indices[i++] = (uint16)((y + 1) * width);
}
}
for(int ind=0; ind < i; ind++)
Debug::PrintDebug("%d ", indices[ind]);
}
I was doing something similar and using the first two answers I have come up with this (tested, C#, XNA)
// center x,z on origin
float offset = worldSize / 2.0f, scale = worldSize / (float)vSize;
// create local vertices
VertexPositionColor[] vertices = new VertexPositionColor[vSize * vSize];
for (uint z = 0; z < vSize; z++) {
for (uint x = 0; x < vSize; x++) {
uint index = x + (z * vSize);
vertices[index].Position = new Vector3((scale*(float)x) - offset,
heightValue,
(scale*(float)z) - offset);
vertices[index].Color = Color.White;
}
}
// create local indices
var indices = new System.Collections.Generic.List<IndexType>();
for (int z = 0; z < vSize - 1; z++) {
// degenerate index on non-first row
if (z != 0) indices.Add((IndexType)(z * vSize));
// main strip
for (int x = 0; x < vSize; x++) {
indices.Add((IndexType)(z * vSize + x));
indices.Add((IndexType)((z + 1) * vSize + x));
}
// degenerate index on non-last row
if (z != (vSize-2)) indices.Add((IndexType)((z + 1) * vSize + (vSize - 1)));
}
This is easily convertable to c++, just make indices an std::vector.
The notable features for my solution are that:
a) It doesn't need to change the winding order per substrip - adding two points creates two degenerate triangles, so the order is correct for the next substrip.
b) You should conditionally add the first and last dg triangle vertices.