Rotate and Flip an image pixel by pixel - c++

I have the following image, I would like to rotate and flip it so that it fits the full screen resolution.
I'm doing the following conversion, to rotate, but it doesn't work.
Here is the source image.
size_t bpp = Ogre::PixelUtil::getNumElemBytes(source.getFormat());
const unsigned char *srcData = source.getData();
unsigned char *dstData = new unsigned char[width * height * bpp];
size_t srcPitch = source.getRowSpan();
size_t dstPitch = width * bpp;
ImageDescriptor sourceImage(source.getWidth(), source.getHeight(), bpp);
ImageDescriptor rotatedTarget(source.getHeight(), source.getWidth(), bpp); // note width/height swap
unsigned char *rotated = new unsigned char[source.getHeight() * source.getWidth() * bpp];
for (std::size_t row = 0; row < rotatedTarget.mHeight; ++row) {
for (std::size_t col = 0; col < rotatedTarget.mWidth; ++col) {
for (std::size_t chan = 0; chan < rotatedTarget.mChannels; ++chan) {
rotated[rotatedTarget.offset(col, row, chan)] =
srcData[sourceImage.offset(row, col, chan)];
}
}
}
struct ImageDescriptor {
std::size_t mWidth;
std::size_t mHeight;
std::size_t mChannels;
ImageDescriptor(std::size_t width, std::size_t height, std::size_t channels)
{
mWidth = width;
mHeight = height;
mChannels = channels;
}
std::size_t stride() const { return mWidth * mChannels; }
const std::size_t offset(std::size_t row, std::size_t col, std::size_t chan) {
assert(0 <= row && row < mHeight);
assert(0 <= col && col < mWidth);
assert(0 <= chan && chan < mChannels);
// return row*stride() + col*mChannels + chan;
// or, depending on your coordinate system ...
return (mHeight - row - 1)*stride() + col*mChannels + chan;
}
std::size_t size() const { return mHeight * stride(); }
};
Here is the result image.
Answer Result.
Any ideas?

Simple enough, invert the rows, change
rotated[rotatedTarget.offset(col, row, chan)] =
srcData[sourceImage.offset(row, col, chan)];
to
rotated[rotatedTarget.offset(rotatedTarget.mWidth-col-1, row, chan)] =
srcData[sourceImage.offset(row, col, chan)];
Something like that anyway. I'm a bit confused about your code.

Related

Generate gradient with CImg and dynamic array

I'm trying to generate png with different resolution. But if i use dynamic array its generate only gray area. This is source of my code (C++ 16 bit grayscale gradient image from 2D array)
void generate_horizontal_gradient(char fileName[], int width, int height, int offset, bool direction)
{
unsigned short** buffer = new unsigned short* [height];
for (int i = 0; i < height; i++)
{
buffer[i] = new unsigned short[width];
}
for (int i = 0; i < height; i++)
{
unsigned short temp_data = 65535;
if (direction == true) {
for (int j = width; j > 0; j--)
{
buffer[i][j] = temp_data;
if (j < width - offset)
{
temp_data -= 65535 / (width - offset);
}
}
}
else
{
for (int j = 0; j < width; j++)
{
buffer[i][j] = temp_data;
if (j > offset)
{
temp_data -= 65535 / (width - offset);
}
}
}
}
auto hold_arr = (unsigned short*) &buffer[0][0];
cimg_library::CImg<unsigned short> img(hold_arr, width, height);
img.save_png(fileName);
}
Apparently I don’t understand something yet in two-dimensional arrays. Solved the problem through a one-dimensional array:
void generate_horizontal_gradient(char fileName[], int width, int height, int offset, bool direction)
{
unsigned short* buffer = new unsigned short[height * width];
//Add values to array.
for (int i = 0; i < height; i++)
{
unsigned short temp_data = 65535;
if (direction == true) {
for (int j = width; j > 0; j--)
{
buffer[i* width +j] = temp_data;
if (j < width - offset) temp_data -= 65535 / (width - offset);
}
}
else
{
for (int j = 0; j < width; j++)
{
buffer[i * width + j] = temp_data;
if (j > offset) temp_data -= 65535 / (width - offset);
}
}
}
unsigned short* hold_arr = (unsigned short*)& buffer[0*0];
cimg_library::CImg<unsigned short> img(buffer, width, height);
img.save_png(fileName);
}

How to correct horizontal flip function to prevent it from altering images' colors

stb_image.h provides a method to flip an image vertically and it works fine. I tried to implement an horizontal flip aka mirror but it alters the image colors.
On pictures that only have 3 colors you could get bluish or reddish or even magenta colored pictures instead of their actual colors. It's the same if we're talking about JPEG or PNG images, you get the same strange results. Curiously if you flip that very same image vertically, its colors look normal.
I've tried testing pretty much any function you could find here and the code I'm providing you with has been the only one that got me close to my actual goal.
// Function I've been trying to implement to enable Horizontal Flip
static void stbi_horizontal_flip(void *image, int w, int h, int bytes_per_pixel)
{
size_t line_bytes = (size_t)w * bytes_per_pixel;
stbi_uc temp[line_bytes];
stbi_uc *bytes = (stbi_uc *)image;
Debug() << line_bytes;
for (int col = 0; col < h; col++) {
stbi_uc *line = bytes + col * line_bytes;
memcpy(&temp, line, line_bytes);
for (int row = 0; row < line_bytes; row++) {
line[row] = temp[line_bytes - row - bytes_per_pixel];
}
}
stbi_horizontally_flip_on_load = false;
}
// stb_image's function for Vertical Flip
static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel)
{
int row;
size_t bytes_per_row = (size_t)w * bytes_per_pixel;
stbi_uc temp[2048];
stbi_uc *bytes = (stbi_uc *)image;
for (row = 0; row < (h>>1); row++) {
stbi_uc *row0 = bytes + row * bytes_per_row;
stbi_uc *row1 = bytes + (h - row - 1) * bytes_per_row;
size_t bytes_left = bytes_per_row;
while (bytes_left) {
size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
memcpy(temp, row0, bytes_copy);
memcpy(row0, row1, bytes_copy);
memcpy(row1, temp, bytes_copy);
row0 += bytes_copy;
row1 += bytes_copy;
bytes_left -= bytes_copy;
}
}
}
static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
{
stbi__result_info ri;
void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8);
if (result == NULL) return NULL;
if (ri.bits_per_channel != 8) {
STBI_ASSERT(ri.bits_per_channel == 16);
result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
ri.bits_per_channel = 8;
}
// #TODO: move stbi__convert_format to here
if (stbi_horizontally_flip_on_load) {
int channels = req_comp ? req_comp : *comp;
stbi_horizontal_flip(result, *x, *y, channels * sizeof(stbi_uc));
}
if (stbi__vertically_flip_on_load) {
int channels = req_comp ? req_comp : *comp;
stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
}
return (unsigned char *) result;
}
STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
{
unsigned char *result;
stbi__context s;
stbi__start_file(&s,f);
result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
if (result) {
// need to 'unget' all the characters in the IO buffer
fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
}
return result;
}
STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
{
FILE *f = stbi__fopen(filename, "rb");
unsigned char *result;
if (!f) return stbi__errpuc("can't fopen", "Unable to open file");
result = stbi_load_from_file(f,x,y,comp,req_comp);
fclose(f);
return result;
}
STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
{
stbi__context s;
stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
}
void Gosu::load_image_file(Gosu::Bitmap& bitmap, const string& filename)
{
Buffer buffer;
load_file(buffer, filename);
load_image_file(bitmap, buffer.front_reader());
}
void Gosu::load_image_file(Gosu::Bitmap& bitmap, Reader input)
{
bool needs_color_key = is_bmp(input);
stbi_io_callbacks callbacks;
callbacks.read = read_callback;
callbacks.skip = skip_callback;
callbacks.eof = eof_callback;
int x, y, n;
stbi_uc* bytes = stbi_load_from_callbacks(&callbacks, &input, &x, &y, &n, STBI_rgb_alpha);
if (bytes == nullptr) {
throw runtime_error("Cannot load image: " + string(stbi_failure_reason()));
}
bitmap.resize(x, y);
printf("Channels %d, Gosu Color size %d, unsigned char size %d, bytes array size %d",
n, sizeof(Gosu::Color), sizeof(stbi_uc), sizeof(bytes));
// Output: Channels 3 or 4, Gosu Color size 4, unsigned char size 1, bytes array 8
memcpy(bitmap.data(), bytes, x * y * sizeof(Gosu::Color));
stbi_image_free(bytes);
if (needs_color_key) apply_color_key(bitmap, Gosu::Color::FUCHSIA);
}```
// Output: Channels 3 or 4, Gosu Color size 4, unsigned char size 1, bytes array 8
That is what I got back from stb_image, but I'd prefer to get an 8bit array instead. Even so what actually matters is to get rid of that unexpected color change.
Thanks to Igor's comment I could focus on my immediate problem and not long after I came up with the code I've posted below.
What I've been wondering since I finally could flip the images horizontally was why the other methods I found either on the web or as part of image processors' code didn't work as expected. O_o? Sometimes I copied and pasted them only changing some variables' names or types to match stb_image's and they still failed either to compile or display a decent result.
By the way, I tried before to subtract positions to the right value to no avail but it made me think some of them could be used as nice color blend effects. XD
// Horizontal Flip by Kyonides Arkanthes shared under GPLv2 or v3
static void stbi_kyon_horizontal_flip(void *image, int w, int h, int bytes_per_pixel)
{
size_t line_bytes = (size_t)w * bytes_per_pixel;
stbi_uc temp[line_bytes];
stbi_uc *bytes = (stbi_uc *)image;
int lpos, rpos;
for (int col = 0; col < h; col++) {
stbi_uc *line = bytes + col * line_bytes;
memcpy(&temp, line, line_bytes);
for (int row = 0; row < w; row++) {
lpos = row * bytes_per_pixel;
rpos = line_bytes - row * bytes_per_pixel - 1;
line[lpos] = temp[rpos - 3];
line[lpos + 1] = temp[rpos - 2];
line[lpos + 2] = temp[rpos - 1];
line[lpos + 3] = temp[rpos];
}
}
stbi_kyon_horizontally_flip_on_load = false;
}```
You just reversed the order of RGBA, you try to use this, I tested, the effect is normal.
for (int row = 0; row < Qimg2.width(); row++) {
lpos = row * bytes_per_pixel;
rpos = line_bytes - row * bytes_per_pixel - 1;
line[lpos] = temp[rpos - 2];
line[lpos + 1] = temp[rpos - 1];
line[lpos + 2] = temp[rpos - 3];
line[lpos + 3] = temp[rpos];
}

Rotate an image by 90 degree clockwise [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I'm trying to rotate an image pixel by pixel by 90 degrees, It seems there is a math problem that I couldn't figure out...and array out of bounds exception
here is my attempt
const unsigned char *srcData = source.getData();
unsigned char *dstData = new unsigned char[width * height * bpp];
size_t srcPitch = source.getRowSpan();
size_t dstPitch = width * bpp;
for(int i=0; i<height; i++)
{
for(int j=0; j<width * bpp; j++)
{
rotatedData[(i * dstPitch) + j]= dstData[(height-i) * dstPitch + j];
}
}
First, let's build an image descriptor to keep track of the dimensions.
struct ImageDescriptor {
std::size_t width;
std::size_t height;
std::size_t channels;
std::size_t stride() const { return width * channels; }
std::size_t offset(std::size_t row, std::size_t col, std::size_t chan) {
assert(0 <= row && row < height);
assert(0 <= col && col < width);
assert(0 <= chan && chan < channels);
return row*stride() + col*channels + chan;
// or, depending on your coordinate system ...
// return (height - row - 1)*stride() + col*channels + chan;
}
std::size_t size() const { return height * stride(); }
};
Now we'll need two ImageDescriptors to keep track of the dimensions of our two images. Note that, unless the original image is square, the rotated image will have a different width and height (and thus stride). Specifically, the width of the rotated image will be the height of the source image (and vice versa).
const ImageDescriptor source(width, height, bpp);
ImageDescriptor target(height, width, bpp); // note width/height swap
A common way to do a transformation is to loop over the destination pixels and look up the source pixels.
unsigned char *rotated = new[target.size()];
for (std::size_t row = 0; row < target.height; ++row) {
for (std::size_t col = 0; col < target.width; ++col) {
for (std::size_t chan = 0; chan < target.channels; ++chan) {
rotated[target.offset(row, col, chan)] =
original[source.offset(col, row, chan)];
}
}
}
Once you get it right, you can work to eliminate unnecessary computation. The first opportunity is to just step our way through the destination image, since all that is in memory order. The second opportunity is, to hoist the source offset calculation out of the channel loop. Finally, if bpp is a constant, you can unroll the innermost loop.
unsigned char *p = rotated;
for (std::size_t row = 0; row < target.height; ++row) {
for (std::size_t col = 0; col < target.width; ++col) {
const std::size_t base = source.offset(col, row, 0);
for (std::size_t chan = 0; chan < target.channels; ++chan) {
*p++ = original[base + chan];
}
}
}
Try this one:
for (int j = 0; j<width * bpp; j++)
{
for (int i = 0 ; i<height; i++)
{
rotatedData[(height)*(dstPitch - j - 1) + i] = dstData[(i * dstPitch) + j];
}
}
and if dstData isn't squared:
//define rotatedData_height before.
rotatedData[(rotatedData_height)*(dstPitch - j - 1) + i] = dstData[(i * dstPitch) + j];

How do I create a dynamic array of arrays (of arrays)?

I'm trying to create a dynamic array of arrays (of arrays). But for some reason the data gets corrupted. I'm using the data to generate a texture in a OpenGL application.
The following code works fine:
unsigned char imageData[64][64][3];
for (int i = 0; i < 64; i++)
{
for (int j = 0; j < 64; j++)
{
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
Problem is, I want to be able to create a texture of any size (not just 64*64). So I'm trying this:
unsigned char*** imageData = new unsigned char**[64]();
for (int i = 0; i < 64; i++)
{
imageData[i] = new unsigned char*[64]();
for (int j = 0; j < 64; j++)
{
imageData[i][j] = new unsigned char[3]();
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
But that doesn't work, the image gets all messed up so I assume I'm creating the array of arrays (of arrays) incorrectly? What am I doing wrong?
Also, I guess I should be using vectors instead. But how can I cast the vector of vectors of vectors data into a (void *) ?
This line contains multiple bugs:
unsigned char* pixel = &(imageData[(y * height) + x]);
You should multiply x by height and add y. And there's also the fact that each pixel is actually 3 bytes. Some issues that led to this bug in your code (and will lead to to others)
You should also be using std::vector. You can call std::vector::data to get a pointer to the underlying data to interface to C API's.
You should have a class that represents a pixel. This will handle the offsetting correctly and give things names and made the code clearer.
Whenever you are working with a multi dimensional array that you encode into a single dimensional one, you should try to carefully write an access function that takes care of indexing so you can test it separately.
(end bulleted list... oh SO).
struct Pixel {
unsigned char red;
unsigned char blue;
unsigned char green;
};
struct TwoDimPixelArray {
TwoDimArray(int width, int height)
: m_width(width), m_height(height)
{
m_vector.resize(m_width * m_height);
}
Pixel& get(int x, int y) {
return m_vector[x*height + y];
}
Pixel* data() { return m_vector.data(); }
private:
int m_width;
int m_height;
std::vector<Pixel> m_vector;
}
int width = 64;
int height = 64;
TwoDimPixelArray imageData(width, height);
for (int x = 0; x != width ; ++ x) {
for (int y = 0; y != height ; ++y) {
auto& pixel = imageData.get(x, y);
// ... pixel.red = something, pixel.blue = something, etc
}
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData.data());
You need to use continuous memory for it to work with opengl.
My solution is inspired by previous answers, with a different indexing system
unsigned char* imageData = new unsigned char[width*height*3];
unsigned char r, g, b;
const unsigned int row_size_bytes = width * 3;
for( unsigned int x = 0; x < width; x++ ) {
unsigned int current_row_offset_bytes = x * 3;
for( unsigned int y = 0; y < height; y++ ) {
unsigned int one_dim_offset = y * row_size_bytes + current_row_offset_bytes
unsigned char* pixel = &(imageData[one_dim_offset]);
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
}
Unfortunnately it's untested, but i'm confident assuming sizeof(char) is 1.

row-major or column-major access of thread index in cuda?

I'm confused whether an image is stored in row-major or column-major order in global memory of the device.
I'am getting two different outputs of an image while accessing the image in both the orders.
When accessing in row-major order-
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int m = numCols * y + x;
if (x >= numCols || y >= numRows)
return;
//marking column boundaries
if (x <= 2){
d_Image[m].x = 255;
d_Image[m].y = 0;
d_Image[m].z = 0;
}
else if (x >= numCols-2){
d_Image[m].x = 0;
d_Image[m].y = 0;
d_Image[m].z = 255;
}
else{
d_Image[m].x = d_sample[m].x;
d_Image[m].y = d_sample[m].y;
d_Image[m].z = d_sample[m].z;
}
d_Image[m].w = d_sample[m].w;
output using row-major
when accessing in column-major order-
int m = x * numRows + y;
output using col-major
Dimensions-
const dim3 blockSize(16,16);
const dim3 gridSize(numCols/16+1, numRows/16+1, 1);
blur << < gridSize, blockSize >> >(d_Image, d_sample, numRows, numCols);
I'm loading and saving the image using opencv.
In the first output red and blue dots are scattered all over the image. And in the second output(col-major) the boundary rows are marked while i'm trying to mark the columns. I'm too much confused.
Edit
void helper(uchar4* d_sample, uchar4* d_Image, size_t numRows, size_t numCols);
cv::Mat sample;
cv::Mat Image;
size_t numRows() { return sample.rows; }
size_t numCols() { return sample.cols; }
__global__ void blur(const uchar4 *d_sample, uchar4* d_Image, size_t numRows, size_t numCols){
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int m = y*numCols + x;
if (x >= numCols || y >= numRows)
return;
if (x <= 2){
d_Image[m].x = 255;
d_Image[m].y = 0;
d_Image[m].z = 0;
}
else if (x >= (numCols-2)){
d_Image[m].x = 0;
d_Image[m].y = 0;
d_Image[m].z = 255;
}
else{
d_Image[m].x = d_sample[m].x;
d_Image[m].y = d_sample[m].y;
d_Image[m].z = d_sample[m].z;
}
d_Image[m].w = d_sample[m].w;
}
int main(){
uchar4 *h_sample, *d_sample, *d_Image, *h_Image;
int filter[9];
sample = cv::imread("sample.jpg", CV_LOAD_IMAGE_COLOR);
if (sample.empty()){
std::cout << "error in loading image.";
system("pause");
}
cv::cvtColor(sample,sample,CV_BGR2RGBA);
Image.create(numRows(), numCols(), CV_8UC4);
if (!sample.isContinuous() || !Image.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
system("pause");
exit(1);
}
cv::cvtColor(Image,Image,CV_BGR2RGBA);
h_sample = (uchar4*)sample.data;
h_Image = (uchar4*)Image.data;
size_t numPixels = numRows() * numCols();
//allocate mmeory on device
checkCudaErrors(cudaMalloc((void**)&d_sample, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMalloc((void**)&d_Image, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMemset(d_sample, 0, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMemset(d_Image, 0, sizeof(uchar4) * numPixels));
//copy to device
checkCudaErrors(cudaMemcpy(d_sample, h_sample, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice));
helper(d_sample, d_Image, numCols(), numRows());
//copy back to host
checkCudaErrors(cudaMemcpy(h_Image, d_Image, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost));
cv::cvtColor(Image,Image,CV_RGBA2BGR);
cv::namedWindow("Image", CV_WINDOW_AUTOSIZE);
cv::imshow("Image", Image);
cv::waitKey(0);
cv::imwrite("sample.jpg", Image);
return 0;
}
void helper(uchar4* d_sample, uchar4* d_Image, size_t numRows, size_t numCols){
const dim3 blockSize(16,16);
const dim3 gridSize(numCols/16+1, numRows/16+1, 1);
blur << < gridSize, blockSize >> >(d_sample, d_Image, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void helper(uchar4* d_sample, uchar4* d_Image, size_t numRows, size_t numCols){
and you call
helper(d_sample, d_Image, numCols(), numRows());
I think you may have switched cols and rows when you call helper...