I am using SFML and it has a color function that takes values in RGB. example.. (255,0,0). I would like to be able to cycle these numbers though a loop so that the displayed colour cycles though the hue...
So if I am using (76,204,63) the function will adjust those 3 numbers. So I need the function to intake rgb convert to HSV and then return rgb.
Any ideas how I would go about this?
the sfml code I wish to use is...
_sprite.setColor(76,204,63); This will set the sprite to a colour... I ma trying to work out how once that is done with those numbers to cycle the colour though the hue.
With a bit googling I've found this answer and converted the code to C++ with SFML in mind.
I'm casting around pretty badly, so feel free to make it better. I guess it should even be possible to replace the 3x3 array.
sf::Uint8 clampAndConvert(float v)
{
if(v < 0)
return 0;
if( v > 255)
return 255;
return static_cast<sf::Uint8>(v);
}
sf::Color RGBRotate(sf::Color old, float degrees)
{
float cosA = cos(degrees*3.14159265f/180);
float sinA = sin(degrees*3.14159265f/180);
float rot = 1.f/3.f * (1.0f - cosA) + sqrt(1.f/3.f) * sinA;
float rx = old.r * (cosA + (1.0f - cosA) / 3.0f) + old.g * rot + old.b * rot;
float gx = old.r * rot + old.g * (cosA + 1.f/3.f*(1.0f - cosA)) + old.b * rot;
float bx = old.r * rot + old.g * rot + old.b * cosA + 1.f/3.f * (1.0f - cosA);
return sf::Color(clampAndConvert(rx), clampAndConvert(gx), clampAndConvert(bx), old.a);
}
Edit: Removed unnecessary casts.
Edit: Got rid of the matrix.
Edit: As I've noticed the code doesn't really work as wanted, but here's a hardcoded solution that works perfectly, just isn't that compact and nice.
#include <SFML/Graphics.hpp>
int main()
{
sf::RenderWindow Screen (sf::VideoMode (800, 600, 32), "Game", sf::Style::Close);
Screen.setFramerateLimit(60);
sf::RectangleShape rect(sf::Vector2f(350.f, 350.f));
rect.setPosition(150, 150);
int dr = 0;
int dg = 0;
int db = 0;
sf::Uint8 r = 255, g = 0, b = 0;
while (Screen.isOpen())
{
sf::Event Event;
while (Screen.pollEvent (Event))
{
if (Event.type == sf::Event::Closed)
Screen.close();
}
r += dr;
g += dg;
b += db;
if(r == 255 && g == 0 && b == 0)
{
dr = 0; dg = 1; db = 0;
}
if(r == 255 && g == 255 && b == 0)
{
dr = -1; dg = 0; db = 0;
}
if(r == 0 && g == 255 && b == 0)
{
dr = 0; dg = 0; db = 1;
}
if(r == 0 && g == 255 && b == 255)
{
dr = 0; dg = -1; db = 0;
}
if(r == 0 && g == 0 && b == 255)
{
dr = 1; dg = 0; db = 0;
}
if(r == 255 && g == 0 && b == 255)
{
dr = 0; dg = 0; db = -1;
}
rect.setFillColor(sf::Color(r, g, b));
Screen.clear();
Screen.draw(rect);
Screen.display();
}
return 0;
}
Convert RGB to HSL or HSV, modify the hue, then convert the result back to RGB.
Jerry's answer above is one correct way. If you don't care about preserving luminance (which if you do - don't use HSV, either), you can simply rotate your RGB color along the R=G=B axis. This is just a matrix multiply and saves you the conversion to and from HLS or HSV space.
Related
I have a vector<vector<double>> heightmap that is dynamically loaded from a CSV file of GPS data to be around 4000x4000. However, only provides 140,799 points.
It produces a greyscale map as shown bellow:
I wish to interpolate the heights between all the points to generate a height map of the area.
The below code finds all known points will look in a 10m radius of the point to find any other known points. If another point is found then it will linearly interpolate between the 2 points. Interpolated points are defined by - height and unset values are defined as -1337.
This approach is incredibly slow I am sure there are better ways to achieve this.
bool run_interp = true;
bool interp_interp = false;
int counter = 0;
while (run_interp)
{
for (auto x = 0; x < map.size(); x++)
{
for (auto y = 0; y < map.at(x).size(); y++)
{
const auto height = map.at(x).at(y);
if (height == -1337) continue;
if (!interp_interp && height < 0) continue;
//Look in a 10m radius of a known value to see if there
//Is another known value to linearly interp between
//Set height to a negative if it has been interped
const int radius = (1 / resolution) * 10;
for (auto rxi = 0; rxi < radius * 2; rxi++)
{
//since we want to expand outwards
const int rx = x + ((rxi % 2 == 0) ? rxi / 2 : -(rxi - 1) / 2);
if (rx < 0 || rx >= map.size()) continue;
for (auto ryi = 0; ryi < radius * 2; ryi++)
{
const int ry = y + ((rxi % 2 == 0) ? rxi / 2 : -(rxi - 1) / 2);
if (ry < 0 || ry >= map.at(x).size()) continue;
const auto new_height = map.at(rx).at(ry);
if (new_height == -1337) continue;
//First go around we don't want to interp
//Interps
if (!interp_interp && new_height < 0) continue;
//We have found a known point within 10m
const auto delta = new_height - height;
const auto distance = sqrt((rx- x) * (rx - x)
+ (ry - y) * (ry - y));
const auto angle = atan2(ry - y, rx - x);
const auto ratio = delta / distance;
//Backtrack from found point until we get to know point
for (auto radi = 0; radi < distance; radi++)
{
const auto new_x = static_cast<int>(x + radi * cos(angle));
const auto new_y = static_cast<int>(y + radi * sin(angle));
if (new_x < 0 || new_x >= map.size()) continue;
if (new_y < 0 || new_y >= map.at(new_x).size()) continue;
const auto interp_height = map.at(new_x).at(new_y);
//If it is a known height don't interp it
if (interp_height > 0)
continue;
counter++;
set_height(new_x, new_y, -interp_height);
}
}
}
}
std::cout << x << " " << counter << std::endl;;
}
if (interp_interp)
run_interp = false;
interp_interp = true;
}
set_height(const int x, const int y, const double height)
{
//First time data being set
if (map.at(x).at(y) == -1337)
{
map.at(x).at(y) = height;
}
else // Data set already so average it
{
//While this isn't technically correct and weights
//Later data significantly more favourablily
//It should be fine
//TODO: fix it.
map.at(x).at(y) += height;
map.at(x).at(y) /= 2;
}
}
If you put the points into a kd-tree, it will be much faster to find the closest point (O(nlogn)).
I'm not sure that will solve all your issues, but it is a start.
I'd like to convert this existing color detection from red to a gray color. I grabbed the code from this project (Flame Detection System)
I have tried to implement my own algorithm but I think I'm no where near to what I'm trying to achieve. I get the algo from this link
Below is the original code fragment with slight modification:
void TargetExtractor::colorDetect(int redThreshold, double saturationThreshold) {
Mat temp;
GaussianBlur(mFrame, temp, Size(3, 3), 0);
uchar grayThreshold = 80;
for (int i = 0; i < temp.rows; i++) {
for (int j = 0; j < temp.cols; j++) {
if (mMask.at<uchar>(i, j) == 255) {
Vec3b& v = temp.at<Vec3b>(i, j);
uchar b = v[0];
uchar g = v[1];
uchar r = v[2];
//if (abs(r - g) < grayThreshold) {
// mMask.at<uchar>(i, j) = 0;
//}
double s = 1 - 3.0 * min(b, min(g, r)) / (b + g + r);
if (!(r > redThreshold && r >= g && g > b &&
s >= ((255 - r) * saturationThreshold / redThreshold))) {
mMask.at<uchar>(i, j) = 0;
}
}
}
}
}
The commented part is my attempt to detect gray regions but it certainly not working for me.
Detecting moving red objects from the original code:
Detecting moving gray objects:
Gray color has property of all 3 components being around about the same valued. You can check that all differences between all pairs of 3 color components are below the threshold:
if (abs(r - g) < grayThreshold && abs(r - b) < grayThreshold && abs(b - g) < grayThreshold) {
mMask.at<uchar>(i, j) = 0;
}
I have a OpenCV C++ application.
I have segmented an image with pyrMeanShiftFiltering function.
Now I need to count the pixel in a segment and the number of pixel having the most frequent value in the same segment in order to compute a ratio between them. How could I do that?
I am using Tsukuba image and the code is.
Mat image, segmented;
image = imread("TsukubaL.jpg", 1 );
pyrMeanShiftFiltering(image, segmented, 16, 32);
The segmented image is:
If I consider a pixel in a single segment, the part where I count the pixel in that segment is:
int cont=0;
Vec3b x = segmented.at<Vec3b>(160, 136);
for(int i = 160; i < segmented.rows; ++i) { //check right-down
for(int j = 136; j < segmented.cols; ++j) {
if(segmented.at<Vec3b>(i, j) == x)
cont++;
else
continue;
}
}
for(int i = 160; i > 0; --i) { //check right-up
for(int j = 136; j < segmented.cols; ++j) {
if(segmented.at<Vec3b>(i, j) == x)
cont++;
else
continue;
}
}
for(int i = 160; i < segmented.rows; ++i) { //check down-left
for(int j = 136; j > 0; --j) {
if(segmented.at<Vec3b>(i, j) == x)
cont++;
else
continue;
}
}
for(int i = 160; i > 0; --i) { //check up-left
for(int j = 136; j > 0; --j) {
if(segmented.at<Vec3b>(i, j) == x)
cont++;
else
continue;
}
}
cout<<"Pixel "<<x<<"cont = "<<cont<<endl;
In this example, I consider a white pixel in position (160, 136) and count the same pixel to the central one in the four direction starting from it, and the output is:
Pixel [206, 222, 240]cont = 127
Could it be a possible good way to do it?
First you need to define a mask with pixels having the same color of your initial point (called seed here). You can use inRange with a given tolerance. Assuming a seed on the head, you'll get something like:
Now you need to find the connected component that contains your seed. You can do this in many ways. Here I modified a generative labeling algorithm (the can be found here). You get the list of points of the blob that contains the seed. You can then make a mask with these points:
Now that you have all points it's trivial to find the number of points in the segment. To find the most frequent color you can make an histogram with the BGR values contained in the segment. Since an histogram with all RGB values will have 256*256*256 bins, it's more practical to use a map. I modified the code found here to make an histogram with a given mask.
Now you just need to find the color value with higher frequency.
For this example, I got:
# points in segment: 2860
Most frequent color: [209, 226, 244] #: 168
Take a look at the code:
#include <opencv2/opencv.hpp>
#include <vector>
#include <stack>
#include <map>
using namespace cv;
using namespace std;
vector<Point> connected_components(const Mat1b& img, Point seed)
{
Mat1b src = img > 0;
int label = 0;
int w = src.cols;
int h = src.rows;
int i;
cv::Point point;
// Start from seed
std::stack<int, std::vector<int>> stack2;
i = seed.x + seed.y*w;
stack2.push(i);
// Current component
std::vector<cv::Point> comp;
while (!stack2.empty())
{
i = stack2.top();
stack2.pop();
int x2 = i%w;
int y2 = i / w;
src(y2, x2) = 0;
point.x = x2;
point.y = y2;
comp.push_back(point);
// 4 connected
if (x2 > 0 && (src(y2, x2 - 1) != 0))
{
stack2.push(i - 1);
src(y2, x2 - 1) = 0;
}
if (y2 > 0 && (src(y2 - 1, x2) != 0))
{
stack2.push(i - w);
src(y2 - 1, x2) = 0;
}
if (y2 < h - 1 && (src(y2 + 1, x2) != 0))
{
stack2.push(i + w);
src(y2 + 1, x2) = 0;
}
if (x2 < w - 1 && (src(y2, x2 + 1) != 0))
{
stack2.push(i + 1);
src(y2, x2 + 1) = 0;
}
// 8 connected
if (x2 > 0 && y2 > 0 && (src(y2 - 1, x2 - 1) != 0))
{
stack2.push(i - w - 1);
src(y2 - 1, x2 - 1) = 0;
}
if (x2 > 0 && y2 < h - 1 && (src(y2 + 1, x2 - 1) != 0))
{
stack2.push(i + w - 1);
src(y2 + 1, x2 - 1) = 0;
}
if (x2 < w - 1 && y2>0 && (src(y2 - 1, x2 + 1) != 0))
{
stack2.push(i - w + 1);
src(y2 - 1, x2 + 1) = 0;
}
if (x2 < w - 1 && y2 < h - 1 && (src(y2 + 1, x2 + 1) != 0))
{
stack2.push(i + w + 1);
src(y2 + 1, x2 + 1) = 0;
}
}
return comp;
}
struct lessVec3b
{
bool operator()(const Vec3b& lhs, const Vec3b& rhs) {
return (lhs[0] != rhs[0]) ? (lhs[0] < rhs[0]) : ((lhs[1] != rhs[1]) ? (lhs[1] < rhs[1]) : (lhs[2] < rhs[2]));
}
};
map<Vec3b, int, lessVec3b> getPalette(const Mat3b& src, const Mat1b& mask)
{
map<Vec3b, int, lessVec3b> palette;
for (int r = 0; r < src.rows; ++r)
{
for (int c = 0; c < src.cols; ++c)
{
if (mask(r, c))
{
Vec3b color = src(r, c);
if (palette.count(color) == 0)
{
palette[color] = 1;
}
else
{
palette[color] = palette[color] + 1;
}
}
}
}
return palette;
}
int main()
{
// Read the image
Mat3b image = imread("tsukuba.jpg");
// Segment
Mat3b segmented;
pyrMeanShiftFiltering(image, segmented, 16, 32);
// Seed
Point seed(140, 160);
// Define a tolerance
Vec3b tol(10,10,10);
// Extract mask of pixels with same value as seed
Mat1b mask;
inRange(segmented, segmented(seed) - tol, segmented(seed) + tol, mask);
// Find the connected component containing the seed
vector<Point> pts = connected_components(mask, seed);
// Number of pixels in the segment
int n_of_pixels_in_segment = pts.size();
Mat1b mask_segment(image.rows, image.cols, uchar(0));
for (const auto& pt : pts)
{
mask_segment(pt) = uchar(255);
}
// Get palette
map<Vec3b, int, lessVec3b> palette = getPalette(segmented, mask_segment);
// Get most frequent color
Vec3b most_frequent_color;
int freq = 0;
for (const auto& pal : palette)
{
if (pal.second > freq)
{
most_frequent_color = pal.first;
freq = pal.second;
}
}
cout << "# points in segment: " << n_of_pixels_in_segment << endl;
cout << "Most frequent color: " << most_frequent_color << " \t#: " << freq << endl;
return 0;
}
After creating the required mask as shown in previous answer or by any other means, you can create a contour around the mask image. This will give allow you to directly count the number of pixels within segment by using contourArea function.
You can segment out the selected area into a new submat and calculate histogram on it get most frequent values. If you are concerned with color values only and not the intensity values, you should also convert your image into HSV, LAB, or YCbCr color space as per requirement.
I'm trying to match a scene from Sketchup in Processing/OpenGL but can't seem to get the measurements right.
I'm using these simply commands in the Ruby Console in Sketchup:
model = Sketchup.active_model
cam = model.active_view.camera
print cam.eye, cam.direction, cam.fov
Which prints these values for my file:
(1668.854717mm, -1723.414322mm, 131.550996mm)(-0.688802494154077, 0.649067164730165, 0.322897723306109)63.6653435710446nil
The FOV seems to work, but I don't think I've figured out the camera position(units) yet.
Here's my attempt:
float eyeScale = 1.0f;
float camEyeX = 1668.854717f * eyeScale;
float camEyeY = -1723.414322f * eyeScale;
float camEyeZ = 131.550996f * eyeScale;
float camTargetX = -0.688802494154077f;
float camTargetY = 0.649067164730165f;
float camTargetZ = 0.322897723306109f;
float camFOV = 63.665f;
float div = 10;
void setup(){
size(1280,720,P3D);
}
void draw(){
background(255);
perspective(radians(camFOV), width/height, camEyeZ * 0.1f, camEyeZ * 10);
camera(camEyeX/div, camEyeY/div, camEyeZ/div, camTargetX, camTargetY, camTargetZ, 1, 0, 0);
drawGrid(20,10,10,0);
drawGrid(20,10,10,1);
drawGrid(20,10,10,2);
}
void keyPressed(){
if(keyCode == UP) div++;
if(keyCode == DOWN) div--;
}
void drawGrid(int size,int w,int h,int plane){
pushStyle();
noFill();
if(plane == 0) stroke(255,0,0);
if(plane == 1) stroke(0,255,0);
if(plane == 2) stroke(0,0,255);
int total = w * h;
int tw = w * size;
int th = h * size;
beginShape(LINES);
for(int i = 0 ; i < total; i++){
int x = (i % w) * size;
int y = (i / w) * size;
if(plane == 0){
vertex(0,x,0);vertex(0,x,th);
vertex(0,0,y);vertex(0,tw,y);
}
if(plane == 1){
vertex(x,0,0);vertex(x,0,th);
vertex(0,0,y);vertex(tw,0,y);
}
if(plane == 2){
vertex(x,0,0);vertex(x,th,0);
vertex(0,y,0);vertex(tw,y,0);
}
}
endShape();
popStyle();
}
The grids look ok above using the perspective() call, but if I comment back the camera() call the scene disappears.
Any hints or tips will help. Processing code is ok, but I don't mind raw GL calls/matrices either.
Update
Based on #Majlik's notes it's good to point out the differences between the sketchup camera api and Processing's camera api, and on top of that difference in coordinate systems (Sketchup uses Z up, while Processing uses Y up).
I've tried playing with the coordinate system a bit using keys, but I'm not understanding the conversion from Sketchup to Processing/OpenGL:
float eyeScale = 1.0f;
float camEyeX = 1668.854717f * eyeScale;
float camEyeY = -1723.414322f * eyeScale;
float camEyeZ = 131.550996f * eyeScale;
float camTargetX = -0.688802494154077f;
float camTargetY = 0.649067164730165f;
float camTargetZ = 0.322897723306109f;
float camFOV = 63.665f;
float div = 10;
PImage bg;
int axis = 0;
boolean flipAxis = false;
void setup(){
size(1280,720,P3D);
bg = loadImage("SketchupCam.png");
}
void draw(){
background(bg);
perspective(radians(camFOV), width/height, camEyeZ * 0.1f, camEyeZ * 10);
camera(camEyeX/div, camEyeY/div, camEyeZ/div,
camTargetX+camEyeX/div, camTargetY+camEyeY/div, camTargetZ+camEyeZ/div,
axis == 0 ? (flipAxis ? -1 : 1) : 0,
axis == 1 ? (flipAxis ? -1 : 1) : 0,
axis == 2 ? (flipAxis ? -1 : 1) : 0);
drawGrid(20,10,10,0);
drawGrid(20,10,10,1);
drawGrid(20,10,10,2);
}
void keyPressed(){
if(keyCode == UP) div++;
if(keyCode == DOWN) div--;
println(div);
if(key == 'x') axis = 0;
if(key == 'y') axis = 1;
if(key == 'z') axis = 2;
if(key == ' ') flipAxis = !flipAxis;
}
void drawGrid(int size,int w,int h,int plane){
pushStyle();
noFill();
if(plane == 0) stroke(255,0,0);
if(plane == 1) stroke(0,255,0);
if(plane == 2) stroke(0,0,255);
int total = w * h;
int tw = w * size;
int th = h * size;
beginShape(LINES);
for(int i = 0 ; i < total; i++){
int x = (i % w) * size;
int y = (i / w) * size;
if(plane == 0){
vertex(0,x,0);vertex(0,x,th);
vertex(0,0,y);vertex(0,tw,y);
}
if(plane == 1){
vertex(x,0,0);vertex(x,0,th);
vertex(0,0,y);vertex(tw,0,y);
}
if(plane == 2){
vertex(x,0,0);vertex(x,th,0);
vertex(0,y,0);vertex(tw,y,0);
}
}
endShape();
popStyle();
}
And here is a Sketchup screenshot I'm trying to match in Processing:
The smaller box is 1000mm in Sketchup.
How I could match the view from Sketchup in Processing/OpenGL ?
I am trying to make a program in OpenCV to convert an image into matrix form, with each value representing an image's pixel. I have converted the image into binary form and now I want to convert it's pixel values into a matrix.
If You need to use CvMat object, You may want to try to use cvCopy function. It takes CvArr* as its arguments, so both IPLImage and CvMat will fit. If You would leave the C API and go to something more modern, You can use cv::Mat object to load image into and use C++ threshold.
The question is why do You want to convert the format of matrix that you already have (IPLImage as well as all others are matrices already). If You want to have a matrix of bool type, use Matx or Mat_ template class for this.
First glance at your question raises more questions... try to specify a bit (I don't seem to be able to see your code example, I'm new to stackoverflow)
Such as, your open cv version and IDE (like codeblocks or Microsoft Visual Studio). But include it in your question. What I would also like to know, is what is the purpose of this? Why do you need a matrix and so forth :)
attempted answer
from what I can gather
"but I have installed OpenCV version 2.3.1 on Visual C++ 2010 – Ayesha Khan"
OpenCV uses the class called Mat, which you should have encountered a lot. This class is essentially a matrix already. If I remember correctly it is very similar to vectors, which I won't cover here.
so if you need to access any pixel value in, lets say.
Mat Img;
you would use a function in this instance of the class, as such
cout << Img.at<uchar>(x,y);
This will access and print the value of the pixel with the coordinates of x,y, to console. In this example I use uchar inside the pointy brackets <>. uchar is used for 8bit picures. You will have to change this if you work with images of more detail (more bits).
When using a binary picture, OpenCV will most likely will allocate the memory of 8bit, which means you need the example above.
I'd like to give more details, but not before you've specified what exactly it is that you are attempting to do.
Regards Scrub # Stackoverflow
Your code uses OpenCV version 1. I'll let someone else answer, since it's not my forte. In my opinion, the 2.0 template-based interface is much more intuitive, and it's my recommendation to use it for all new endeavors.
Have a look at the way I use imread() in this program...
Please inspect the type of value returned from imread()...
Also, search in the code for originalColor = imageArg(/*row*/chosenX, /*column*/chosenY); It's a way to index into the matrix returned from imread
// HW1 Intro to Digital Image Processing
// used OpenCV 2.3.1 and VS2010 SP1 to develop this solution
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <cassert>
using namespace cv;
Mat_<Vec3b> image;
int discreteAngles = 512;
void on_mouse(int eventCode, int centerX, int centerY, int flags, void* params);
int str2int(const std::string &str);
int main(int argc, char* argv[])
{
// command itself is one element of argument array...
if(argc != 1 && argc != 3)
{
std::cout << "Expecting two arguments to the application: angular granularity as a whole number and a file name." << std::endl;
exit(0);
}
std::string discreteAnglesStr, fileName;
if(argc == 3)
{
discreteAnglesStr = argv[1];
fileName = argv[2];
}
else
{
discreteAnglesStr = "64";
fileName = "boats.tif";
}
try
{
discreteAngles = str2int(discreteAnglesStr);
auto image_ = imread(fileName);
int channels = image_.channels();
assert(channels == 3);
image = image_;
if(image.rows == 0)
throw new std::exception();
auto originalImageStr = "Original Image";
namedWindow(originalImageStr);
setMouseCallback(originalImageStr, on_mouse);
imshow(originalImageStr, image);
}
catch(std::exception e)
{
std::cout << "could not load image." << std::endl;
}
waitKey(0);
return -1;
}
// borrowed from http://stackoverflow.com/q/194465/90475, courtesy of Luka Marinko
int str2int(const std::string &str)
{
std::stringstream ss(str);
int num;
if((ss >> num).fail())
{
throw new std::exception("could not parse user input!");
}
return num;
}
double compute_max_madius(int imageRows, int imageCols, int centerX, int centerY)
{
auto otherX = imageCols - centerX;
auto otherY = imageRows - centerY;
auto a = sqrt((double)centerX * centerX + centerY * centerY);
auto b = sqrt((double)otherX * otherX + centerY * centerY);
auto c = sqrt((double)centerX * centerX + otherY * otherY);
auto d = sqrt((double)otherX * otherX + otherY * otherY);
return max(max(a,b), max(c,d));
}
Vec3b interpolate_with_nearest(const Mat_<Vec3b>& imageArg, double x, double y)
{
auto x0 = static_cast<int>(floor(x)); auto y0 = static_cast<int>(floor(y));
auto x1 = static_cast<int>(ceil(x)); auto y1 = static_cast<int>(ceil(y));
// Rolls over to the other side, esp. for angles
if(x0 < 0) x0 = imageArg.rows - 1;
if(y0 < 0) y0 = imageArg.cols - 1;
if (x1 == imageArg.rows) x1 = 0;
if (y1 == imageArg.cols) y1 = 0;
int chosenX, chosenY;
if (x - x0 < 0.5) chosenX = x0; else chosenX = x1;
if (y - y0 < 0.5) chosenY = y0; else chosenY = y1;
Vec3b originalColor = Vec3b(0, 0, 0);
if (chosenX >= 0 && chosenX < imageArg.rows &&
chosenY >= 0 && chosenY < imageArg.cols)
{
originalColor = imageArg(/*row*/chosenX, /*column*/chosenY);
}
return originalColor;
}
Vec3b interpolate_with_bilinear(const Mat_<Vec3b>& imageArg, double x, double y)
{
auto x0 = static_cast<int>(floor(x)); auto y0 = static_cast<int>(floor(y));
auto x1 = static_cast<int>(ceil(x)); auto y1 = static_cast<int>(ceil(y));
// Rolls over to the other side, esp. for angles
if(x0 < 0) x0 = imageArg.rows - 1;
if(y0 < 0) y0 = imageArg.cols - 1;
if (x1 == imageArg.rows) x1 = 0;
if (y1 == imageArg.cols) y1 = 0;
if (!(
x0 >= 0 && x0 < imageArg.rows &&
x1 >= 0 && x1 < imageArg.rows &&
y0 >= 0 && y0 < imageArg.cols &&
y1 >= 0 && y1 < imageArg.cols))
return Vec3b(0, 0, 0);
auto f00 = imageArg(x0, y0);
auto f01 = imageArg(x0, y1);
auto f10 = imageArg(x1, y0);
auto f11 = imageArg(x1, y1);
auto b1 = f00;
auto b2 = f10 - f00;
auto b3 = f01 - f00;
auto b4 = f00 + f11 - f01 - f10;
x = x - x0;
y = y - y0;
return b1 + b2 * x + b3 * y + b4 * x * y;
}
void on_mouse(int eventCode, int centerX, int centerY, int flags, void* params)
{
if(eventCode == 0)
return;
switch( eventCode )
{
case CV_EVENT_LBUTTONDOWN:
{
std::cout << "Center was (" << centerX << ", " << centerY << ")" << std::endl;
auto maxRadiusXY = compute_max_madius(image.rows, image.cols, centerX, centerY);
int discreteRadii = static_cast<int>(floor(maxRadiusXY));
Mat_<Vec3b> polarImg1;
polarImg1.create(/*rows*/discreteRadii, /*cols*/discreteAngles);
Mat_<Vec3b> polarImg2;
polarImg2.create(/*rows*/discreteRadii, /*cols*/discreteAngles);
for (int radius = 0; radius < discreteRadii; radius++) // radii
{
for (int discreteAngle = 0; discreteAngle < discreteAngles; discreteAngle++) // discreteAngles
{
// 3
auto angleRad = discreteAngle * 2.0 * CV_PI / discreteAngles;
// 2
auto xTranslated = cos(angleRad) * radius;
auto yTranslated = sin(angleRad) * radius;
// 1
auto x = centerX + xTranslated;
auto y = centerY - yTranslated;
polarImg1(/*row*/ radius, /*column*/ discreteAngle) = interpolate_with_nearest(image, /*row*/y, /*column*/x);
polarImg2(/*row*/ radius, /*column*/ discreteAngle) = interpolate_with_bilinear(image, /*row*/y, /*column*/x);
}
}
auto polarImage1Str = "Polar (nearest)";
namedWindow(polarImage1Str);
imshow(polarImage1Str, polarImg1);
auto polarImage2Str = "Polar (bilinear)";
namedWindow(polarImage2Str);
imshow(polarImage2Str, polarImg2);
Mat_<Vec3b> reprocessedImg1;
reprocessedImg1.create(Size(image.rows, image.cols));
Mat_<Vec3b> reprocessedImg2;
reprocessedImg2.create(Size(image.rows, image.cols));
for(int y = 0; y < image.rows; y++)
{
for(int x = 0; x < image.cols; x++)
{
// 1
auto xTranslated = x - centerX;
auto yTranslated = -(y - centerY);
// 2
auto radius = sqrt((double)xTranslated * xTranslated + yTranslated * yTranslated);
double angleRad;
if(xTranslated != 0)
{
angleRad = atan((double)abs(yTranslated) / abs(xTranslated));
// I Quadrant
if (xTranslated > 0 && yTranslated > 0)
angleRad = angleRad;
// II Quadrant
if (xTranslated < 0 && yTranslated > 0)
angleRad = CV_PI - angleRad;
// III Quadrant
if (xTranslated < 0 && yTranslated < 0)
angleRad = CV_PI + angleRad;
/// IV Quadrant
if (xTranslated > 0 && yTranslated < 0)
angleRad = 2 * CV_PI - angleRad;
if (yTranslated == 0)
if (xTranslated > 0) angleRad = 0;
else angleRad = CV_PI;
}
else
{
if (yTranslated > 0) angleRad = CV_PI / 2;
else angleRad = 3 * CV_PI / 2;
}
// 3
auto discreteAngle = angleRad * discreteAngles / (2.0 * CV_PI);
reprocessedImg1(/*row*/ y, /*column*/ x) = interpolate_with_nearest(polarImg1, /*row*/radius, /*column*/discreteAngle);
reprocessedImg2(/*row*/ y, /*column*/ x) = interpolate_with_bilinear(polarImg2, /*row*/radius, /*column*/discreteAngle);
}
}
auto reprocessedImg1Str = "Re-processed (nearest)";
namedWindow(reprocessedImg1Str);
imshow(reprocessedImg1Str, reprocessedImg1);
auto reprocessedImg2Str = "Re-processed (bilinear)";
namedWindow(reprocessedImg2Str);
imshow(reprocessedImg2Str, reprocessedImg2);
} break;
}
}