I'm trying to rotate an image using openFrameworks, but I have a problem. My rotated image is red instead of its original color.
void testApp::setup(){
image.loadImage("abe2.jpg");
rotatedImage.allocate(image.width, image.height, OF_IMAGE_COLOR);
imageCenterX = image.getWidth() / 2;
imageCenterY = image.getHeight() / 2;
w = image.getWidth();
h = image.getHeight();
int degrees = 180;
float radians = (degrees*(PI / 180));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int index = image.getPixelsRef().getPixelIndex(x, y);
int newX = (cos(radians) * (x - imageCenterX) - sin(radians) * (y - imageCenterY) + imageCenterX);
int newY = (sin(radians) * (x - imageCenterX) + cos(radians) * (y - imageCenterY) + imageCenterY);
int newIndex = rotatedImage.getPixelsRef().getPixelIndex(newX, newY);
rotatedImage.getPixelsRef()[newIndex] = image.getPixelsRef()[index];
}
}
rotatedImage.update();
}
void testApp::update(){
}
void testApp::draw(){
image.draw(0,0);
rotatedImage.draw(0,400);
}
Can someone tell me what I am doing wrong?
If your image has three color components (Red, Green, Blue), you need to transform all three of those. The following should do the trick:
rotatedImage.getPixelsRef()[newIndex] = image.getPixelsRef()[index];
rotatedImage.getPixelsRef()[newIndex+1] = image.getPixelsRef()[index+1];
rotatedImage.getPixelsRef()[newIndex+2] = image.getPixelsRef()[index+2];
Related
I have 1d array (size = 4 * width * height + 1) of pixels of RGBA png image. I want to rotate image by X degrees clockwise. I already know how to do it for 90 degrees, but I guess I have some problem with trigonometry.
Here's the code:
std::pair<int, int> move(int x, int y, double rad) {
return {x * cos(rad) - y * sin(rad), x * cos(rad) + y * sin(rad)};
}
void turn(int deg) {
if (deg < 0) {
deg = 360 + deg;
}
double rad = deg * (M_PI / (double)180);
unsigned int oldWidth = width;
width = lround(sqrt(height * height + width * width));
height = lround(sqrt(height * height + oldWidth * oldWidth));
std::vector<unsigned char> output(rawPixels.size());
for (int X = 0; X < width; ++X) {
for (int Y = 0; Y < height; ++Y) {
for (int chan = 0; chan < CHANNELS_COUNT; ++chan) {
std::pair<int, int> xy = move(X, Y, rad);
output[(X * height + Y) * CHANNELS_COUNT + chan] = rawPixels[
((height - 1 - xy.second) * width + xy.first) * CHANNELS_COUNT + chan];
}
}
}
rawPixels = output;
}
It's ok to use addition arrays, but I don't want to use OpenCV or any other libraries.
Hello I'm currently trying to rotate blocks that are within a square. But when I start to rotate them it starts to create weird spaces between blocks that I don't want. Could you help me to fix the problem of spaces beetween blocks? Here are some code and screenshots how does it look.
https://imgur.com/a/BLuO7FF
I have already checked if all angles and radiuses are calculated correctly and I don't see any problem there.
World.h
#pragma once
#include <SFML/Graphics.hpp>
class World
{
public:
World(sf::Vector2f Wpos);
float AngleToRadian(int angle);
void RotateWorld();
void draw(sf::RenderWindow &window);
sf::Texture tx;
sf::Sprite** Block;
sf::Vector2f Pos;
sf::Vector2i Size;
float** radius;
float** angle;
};
World.cpp
#include "World.h"
#include <SFML/Graphics.hpp>
#include <iostream>
#include <cmath>
#define PI 3.14159
World::World(sf::Vector2f Wpos)
{
Pos = Wpos;
Size = sf::Vector2i(10, 10);
Block = new sf::Sprite*[Size.y];
radius = new float*[Size.y];
angle = new float*[Size.y];
for (int i = 0; i < Size.y; i++)
{
Block[i] = new sf::Sprite[Size.x];
radius[i] = new float[Size.x];
angle[i] = new float[Size.x];
}
tx.loadFromFile("Img/Block.png");
sf::Vector2i off(Size.x * tx.getSize().x / 2, Size.y * tx.getSize().y / 2); //tx size is 32px x 32px
for (int y = 0; y < Size.y; y++)
{
for (int x = 0; x < Size.x; x++)
{
Block[y][x].setTexture(tx);
Block[y][x].setOrigin(tx.getSize().x / 2, tx.getSize().y / 2);
Block[y][x].setPosition(x*tx.getSize().x + Wpos.x - off.x + Block[y][x].getOrigin().x, y*tx.getSize().y + Wpos.y - off.y + Block[y][x].getOrigin().y);
radius[y][x] = sqrt(pow(Pos.x - Block[y][x].getPosition().x, 2) + pow(Pos.y - Block[y][x].getPosition().y, 2));
angle[y][x] = (atan2(Block[y][x].getPosition().y - Pos.y, Block[y][x].getPosition().x - Pos.x) * 180.0) / PI;
if ((atan2(Block[y][x].getPosition().y - Pos.y, Block[y][x].getPosition().x - Pos.x) * 180.0) / PI < 0)
{
angle[y][x] += 360;
}
//angle[y][x] = round(angle[y][x]);
/*radius[y][x] = round(radius[y][x]);*/
}
}
}
void World::RotateWorld()
{
float dx = 0, dy = 0;
if (sf::Keyboard::isKeyPressed(sf::Keyboard::E))
{
for (int y = 0; y < Size.y; y++)
{
for (int x = 0; x < Size.x; x++)
{
Block[y][x].rotate(1);
if (angle[y][x] >= 360)
{
angle[y][x] = 0;
}
angle[y][x]++;
dx = cos(AngleToRadian(angle[y][x])) * radius[y][x];
dy = sin(AngleToRadian(angle[y][x])) * radius[y][x];
Block[y][x].setPosition(Pos.x + dx, Pos.y + dy);
}
}
}
if (sf::Keyboard::isKeyPressed(sf::Keyboard::Q))
{
for (int y = 0; y < Size.y; y++)
{
for (int x = 0; x < Size.x; x++)
{
Block[y][x].rotate(-1);
if (angle[y][x] >= 360)
{
angle[y][x] = 0;
}
angle[y][x]--;
dx = cos(AngleToRadian(angle[y][x])) * radius[y][x];
dy = sin(AngleToRadian(angle[y][x])) * radius[y][x];
Block[y][x].setPosition(Pos.x + dx, Pos.y + dy);
}
}
}
}
I expected it to rotate withouth any spaces between. I would be really thankfull if someone would help me.
I would try with setting the origin of the sf::Sprite using it's getGlobalBounds() method instead of the sf::Texture size getter.
The difference seems minor and something like that might be the case.
Block[y][x].setTexture(tx);
Block[y][x].setOrigin(Block[y][x].getGlobalBouds().width / 2, Block[y][x].getGlobalBouds().height / 2);
Block[y][x].setPosition(x*Block[y][x].getGlobalBouds().width + Wpos.x - off.x + Block[y][x].getOrigin().x, y*Block[y][x].getGlobalBouds().height + Wpos.y - off.y + Block[y][x].getOrigin().y);
I have this sample of code that I try to understand it:
__global__ void
d_boxfilter_rgba_x(unsigned int *od, int w, int h, int r)
{
float scale = 1.0f / (float)((r << 1) + 1);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
if (y < h)
{
float4 t = make_float4(0.0f);
for (int x = -r; x <= r; x++)
{
t += tex2D(rgbaTex, x, y);
}
od[y * w] = rgbaFloatToInt(t * scale);
for (int x = 1; x < w; x++)
{
t += tex2D(rgbaTex, x + r, y);
t -= tex2D(rgbaTex, x - r - 1, y);
od[y * w + x] = rgbaFloatToInt(t * scale);
}
}
}
__global__ void
d_boxfilter_rgba_y(unsigned int *id, unsigned int *od, int w, int h, int r)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id = &id[x];
od = &od[x];
float scale = 1.0f / (float)((r << 1) + 1);
float4 t;
// partea din stanga
t = rgbaIntToFloat(id[0]) * r;
for (int y = 0; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[y*w]);
}
od[0] = rgbaFloatToInt(t * scale);
for (int y = 1; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[0]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// right side
for (int y = h - r; y < h; y++)
{
t += rgbaIntToFloat(id[(h - 1) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
}
This should be a box filter with CUDA.
From what I have read this should make an average with a given radius.
But in d_boxfilter_rgba_y make something like this:
od[0] = rgbaFloatToInt(t * scale);
I don't understand why is used this scale and why are made all that loops when there should be just one. To calculate the value from -r to +r and divide this by a number of pixels.
Can somebody help me?
To calculate the average of a box with radius 1 (3 values), you do:
(box[0] + box[1] + box[2]) / 3 // which is equal to
(box[0] + box[1] + box[2] * 1/3 // which is equal to your scale factor
The calculation of scale is:
1.0f / (float)((r << 1) + 1); // equal to
1 / ((r * 2) + 1) // equal to
1 / (2r + 1) // 2r because you go to the left and right and +1 for the middle
The two for loops are used, because the "sliding window" optimisation is used. First the first box is calculated:
for (int x = -r; x <= r; x++)
{
t += tex2D(rgbaTex, x, y);
}
And then for each step to the right, the value right of the box is added and the most left value of the box is removed. That way you can calculate the sum of the box with just 2 operations instead of 2*r + 1 operations.
for (int x = 1; x < w; x++)
{
t += tex2D(rgbaTex, x + r, y);
t -= tex2D(rgbaTex, x - r - 1, y);
od[y * w + x] = rgbaFloatToInt(t * scale);
}
}
I am using objective c language.
I want to convert my image to cylindrical shape. Here I am using below cpp file code to convert image.
cv::Mat CylindricalWarper2 (Mat img)
{
cv::Mat destImgMat(img.size(), CV_8U);
for(int y = 0; y < img.rows; y++)
{
for(int x = 0; x < img.cols; x++)
{
cv::Point2f current_pos(x,y);
current_pos = convert_pt1dd(current_pos, img.cols, img.rows);
cv::Point2i top_left((int)current_pos.x,(int)current_pos.y);
if(top_left.x < 0 || top_left.x > img.cols-2 || top_left.y < 0 ||
top_left.y > img.rows-2)
{
continue;
}
//bilinear interpolation
float dx = current_pos.x-top_left.x;
float dy = current_pos.y-top_left.y;
float weight_tl = (1.0 - dx) * (1.0 - dy);
float weight_tr = (dx) * (1.0 - dy);
float weight_bl = (1.0 - dx) * (dy);
float weight_br = (dx) * (dy);
uchar value = weight_tl * img.at<uchar>(top_left) +
weight_tr * img.at<uchar>(top_left.y,top_left.x+1) +
weight_bl * img.at<uchar>(top_left.y+1,top_left.x) +
weight_br * img.at<uchar>(top_left.y+1,top_left.x+1);
destImgMat.at<uchar>(y,x) = value;
}
}
return destImgMat;
}
cv::Point2f convert_pt1dd(cv::Point2f point,int w,int h)
{
cv::Point2f pc(point.x-w/2,point.y-h/2);
float f = w;
float r = w;
float omega = w/2;
float z0 = f - sqrt(r*r-omega*omega);
float zc = (2*z0+sqrt(4*z0*z0-4*(pc.x*pc.x/(f*f)+1)*(z0*z0-r*r)))/(2*
(pc.x*pc.x/(f*f)+1));
cv::Point2f final_point(pc.x*zc/f,pc.y*zc/f);
final_point.x += w/2;
final_point.y += h/2;
return final_point;
}
With this code I got the cylindrical shape but my image cut down. Not get the full cylindrical projection image,My image look like below,
Image link
I want to display my full image in cylindrical shape.
If some source or help provided, greatly Appreciated.
Thanks in advance
For an embedded design I am attempting to implement sobel's edge detection on a board without the use of a buffer. i.e. I am reading and writing directly from the screen. I can however, store about one or two imge width full of data to be referenced later. This is due to limitations set forth by the board. However I have fallen into some issue. All that I recieve is noise regardless if I attempt to do sobel or another edge detection algorithm. The code is below, does anyone have any suggestions
Version 1
void sobelEdgeDetection2() {
int GX[3][3];
int GY[3][3];
int sumX[3];
int sumY[3];
int SUM[3];
int piX = 0;
int piY = 0;
//uint8_t R, G, B = 0;
int I, J = 0;
//UnpackedColour pixVal;
uint16_t *buffer;
// allocate space for even scan lines and odd scan lines
buffer = new uint16_t[_gl->getWidth()];
//buffer for previous line
uint16_t *bufT;
// allocate space for even scan lines and odd scan lines
bufT = new uint16_t[_gl->getWidth()];
// Masks //////////////////////////////////////
//X//
GX[0][0] = -1;
GX[0][1] = 0;
GX[0][2] = 1;
GX[1][0] = -2;
GX[1][1] = 0;
GX[1][2] = 2;
GX[2][0] = -1;
GX[2][1] = 0;
GX[2][2] = 1;
//Y//
GY[0][0] = 1;
GY[0][1] = 2;
GY[0][2] = 1;
GY[1][0] = 0;
GY[1][1] = 0;
GY[1][2] = 0;
GY[2][0] = -1;
GY[2][1] = -2;
GY[2][2] = -1;
for (int Y = 0; Y < _gl->getHeight(); Y++) {
for (int X = 0; X < _gl->getWidth(); X++) {
sumX[0] = sumX[1] = sumX[2] = 0;
sumY[0] = sumY[1] = sumY[2] = 0;
if (Y == 0 || Y == _gl->getHeight() - 1) {
SUM[0] = SUM[1] = SUM[2] = 0;
} else if (X == 0 || X == _gl->getWidth() - 1) {
SUM[0] = SUM[1] = SUM[2] = 0;
} else {
for (I = -1; I <= 1; I++) {
for (J = -1; J <= 1; J++) {
piX = J + X;
piY = I + Y;
pixel16 pix = getPixel(piX, piY);
uint8_t Red = pix.Red;
uint8_t Green = pix.Green;
uint8_t Blue = pix.Blue;
sumX[0] += (Red) * GX[J + 1][I + 1];
sumX[1] += (Green) * GX[J + 1][I + 1];
sumX[2] += (Blue) * GX[J + 1][I + 1];
sumY[0] += (Red) * GY[J + 1][I + 1];
sumY[1] += (Green) * GY[J + 1][I + 1];
sumY[2] += (Blue) * GY[J + 1][I + 1];
}
}
SUM[0] = abs(sumX[0]) + abs(sumY[0]);
SUM[1] = abs(sumX[1]) + abs(sumY[1]);
SUM[2] = abs(sumX[2]) + abs(sumY[2]);
}
if (SUM[0] > 255)
SUM[0] = 255;
if (SUM[0] < 0)
SUM[0] = 0;
if (SUM[1] > 255)
SUM[1] = 255;
if (SUM[1] < 0)
SUM[1] = 0;
if (SUM[2] > 255)
SUM[2] = 255;
if (SUM[2] < 0)
SUM[2] = 0;
int newPixel[3];
newPixel[0] = (255 - ((unsigned char) (SUM[0])));
newPixel[1] = (255 - ((unsigned char) (SUM[1])));
newPixel[2] = (255 - ((unsigned char) (SUM[2])));
pixel16 pix(newPixel[0], newPixel[1], newPixel[2]);
buffer[X] = packColour(pix).packed565;
}
//Need to move cursor back
// draw it
this->paintRow(Point(0, Y), buffer, _gl->getWidth());
}
delete[] buffer;
}
Version2
/**
* https://www.cl.cam.ac.uk/projects/raspberrypi/tutorials/image-processing/edge_detection.html
* 1 Iterate over every pixel in the image
* 2 Apply the x gradient kernel
* 3 Apply the y gradient kernel
* 4 Find the length of the gradient using pythagoras' theorem
* 5 Normalise the gradient length to the range 0-255
* 6 Set the pixels to the new values
*/
void sobelEdgeDetection4() {
UnpackedColour colour;
for (int x = 1; x < _gl->getWidth() - 1; x++) {
for (int y = 1; y < _gl->getHeight() - 1; y++) {
// initialise Gx and Gy to 0
int Gx = 0;
int Gy = 0;
unsigned int intensity = 0;
// Left column
pixel16 pixel = this->getPixel(x - 1, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -intensity;
Gy += -intensity;
pixel = this->getPixel(x - 1, y);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -2 * intensity;
pixel = this->getPixel(x - 1, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -intensity;
Gy += +intensity;
// middle column
pixel = this->getPixel(x, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gy += -2 * intensity;
pixel = this->getPixel(x, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gy += +2 * intensity;
// right column
pixel = this->getPixel(x + 1, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +intensity;
Gy += -intensity;
pixel = this->getPixel(x + 1, y);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +2 * intensity;
pixel = this->getPixel(x + 1, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +intensity;
Gy += +intensity;
// calculate the gradient length
unsigned int length = (unsigned int) sqrt(
(float) (Gx * Gx) + (float) (Gy * Gy));
// normalise the length to 0 to 255
length = length / 17;
// draw the pixel on the edge image
pixel16 pixel2(length,length,length);
this->setPixel(x, y, pixel2);
}
}
}
Version 3
// sobel map for the x axis
const double _SOBEL_Gx[3][3] = { { -1.0, +0.0, +1.0 }, { -2.0, +0.0, +2.0 },
{ -1.0, +0.0, +1.0 } };
// sobel map for the y axis
const double _SOBEL_Gy[3][3] = { { +1.0, +2.0, +1.0 }, { +0.0, +0.0, +0.0 },
{ -1.0, -2.0, -1.0 } };
double get_sobel_gradient(int width, int height, int x, int y) {
double sobel_gradient_x = 0, sobel_gradient_y = 0;
int mx = 0, my = 0, sx = 0, sy = 0;
for (mx = x; mx < x + 3; mx++) {
sy = 0;
for (my = y; my < y + 3; my++) {
if (mx < width && my < height) {
//int r, g, b, idx;
int idx = (mx + width * my) * 3;
pixel16 pixVal = this->getPixel(idx);
//r = pixVal.Red;
//g = pixVal.Green;
//b = pixVal.Blue;
UnpackedColour col = this->packColour(pixVal);
sobel_gradient_x += col.packed565 * _SOBEL_Gx[sx][sy];
sobel_gradient_y += col.packed565 * _SOBEL_Gy[sx][sy];
}
sy++;
}
sx++;
}
return abs(sobel_gradient_x) + abs(sobel_gradient_y);
}
void sobelEdgeDetection3() {
double threshold = 50000.0;
UnpackedColour colour;
for (int y = 0; y < _gl->getHeight(); y++) {
for (int x = 0; x < _gl->getWidth(); x++) {
if (get_sobel_gradient(_gl->getWidth(), _gl->getHeight(), x, y)
>= threshold) {
colour.packed565 = 0x0000; //set white
} else {
colour.packed565 = 0xFFFF; //set black
}
this->setPixel(x, y, colour);
}
}
}
For Version 1, after you allocate 2 buffers (just use buffer and bufT), create 2 pointers to point to the current and previous rows, like this:
uint16_t *currentRow = buffer;
uint16_t *prevRow = bufT;
Inside the row loop, write to currentRow instead of buffer:
pixel16 pix(newPixel[0], newPixel[1], newPixel[2]);
currentRow[X] = packColour(pix).packed565;
Because the Sobel filter reads from the previous row, you can't overwrite a row until after you have finished calculating the filtered values for the row after it. So at the end of the loop, where you are currently calling paintRow(), draw the previous row (if one exists), and then swap the buffers so that the current becomes the previous, and the previous becomes the new current row (to be overwritten on the next pass through the loop). On the last row the current row is also drawn, because otherwise it won't be since the outer loop is about to terminate.
if(Y > 0) // draw the previous row if this is not the first row:
this->paintRow(Point(0, Y-1), prevRow, _gl->getWidth());
if(Y == _gl->getHeight()-1) // draw the current row if it is the last:
this->paintRow(Point(0, Y), currentRow, _gl->getWidth());
// swap row pointers:
uint16_t *temp = prevRow;
prevRow = currentRow;
currentRow = temp;
The same strategy should work for the other versions.