Histogram of oriented gradiants - c++

For a project I'm writing some code to compute the HoG of some images, but I'm stuck with the fact that my orientations are only between 0 ~ 90 degrees, while using the atan2 function.
I'm guessing that this problem occurs due to the filter2D function of OpenCV but I'm not sure if this is the reason or that I'm doing something else wrong:
Vector<Vector<Mat_<float>>> HoG(Mat image) {
Mat img_x;
Mat img_y;
IplImage img = image;
Mat kern_x = (Mat_<char>(1, 3) << -1, 0, 1);
Mat kern_y = (Mat_<char>(3, 1) << -1, 0, 1);
filter2D(image, img_x, image.depth(), kern_x);
filter2D(image, img_y, image.depth(), kern_y);
Vector<Vector<Mat_<float>>> histograms;
for(int y = 0; y < image.rows - size; y += size) {
Vector<Mat_<float>> temp_hist;
for(int x = 0; x < image.cols - size; x += size) {
float total_mag = 0;
Mat hist = Mat::zeros(1, 8, CV_32FC1);
for(int i = y; i < y + size; ++i) {
for(int j = x; j < x + size; ++j) {
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
double ori = myatan2(grad_x, grad_y);
float mag = sqrt(pow(grad_x, 2) + pow(grad_y, 2));
int bin = round(ori/45);
hist.at<float>(0, (bin - 1 < 0 ? 7 : bin - 1)) += - (float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
hist.at<float>(0, bin) += -(float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
total_mag += mag;
}
}
// Normalize the histogram
for(int i = 0; i < 8; ++i) {
hist.at<float>(0, i) = hist.at<float>(0, i) / total_mag;
}
temp_hist.push_back(hist);
}
histograms.push_back(temp_hist);
}
return histograms;
}
If you have any other tips to increase a speed-up in my code or something else that is also welcome of course.

I notice this:
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
You seem to be using uchar. Should this not be char?

Related

Warp Image by Diagonal Sine Wave

I'm trying to warp colour image using sin function in OpenCV and I was successful in doing so. However, how can I make a 'diagonal' warping using sine wave?
My code is this:
Mat result = src.clone();
for (int i = 0; i < src.rows; i++) { // to y
for (int j = 0; j < src.cols; j++) { // to x
for (int ch = 0; ch < 3; ch++) { // each colour
int offset_x = 0;
int offset_y = (int)(25.0 * sin(3.14 * j / 150));
if (i + offset_y < src.rows) {
result.at<Vec3b>(i, j)[ch] = src.at<Vec3b>((i + offset_y) % src.rows, j)[ch];
}
else
result.at<Vec3b>(i, j)[ch] = 0.0;
}
}
}
imshow("result", result);
How can I do this? Not drawing a sine graph, but warping an image.
Solved this! Several times ago, I've received a message by someone who told me that the image is stolen. It was from Google, actually, but I've deleted it to fulfill not to cause any situations. Thx!
I think it should look like this:
void deform()
{
float alpha = 45 * CV_PI / 180.0; // wave direction
float ox = cos(alpha);
float oy = sin(alpha);
cv::Mat src = cv::imread("F:/ImagesForTest/lena.jpg");
for (int i = 0; i < src.rows; i+=8)
{
cv::line(src, cv::Point(i, 0), cv::Point(i, src.rows),cv::Scalar(255,255,255));
}
for (int j = 0; j < src.cols; j += 8)
{
cv::line(src, cv::Point(0,j), cv::Point(src.cols,j), cv::Scalar(255, 255, 255));
}
cv::Mat result = src.clone();
for (int i = 0; i < src.rows; i++)
{ // to y
for (int j = 0; j < src.cols; j++)
{ // to x
float t =(i * oy)+ (j * ox); // wave parameter
for (int ch = 0; ch < 3; ch++)
{ // each colour
int offset_x =ox* (int)(25.0 * (sin(3.14 * t/ 150)));
int offset_y =oy* (int)(25.0 * (sin(3.14 * t / 150)));
if (i + offset_y < src.rows && j + offset_x < src.rows && i + offset_y >=0 && j + offset_x>=0)
{
result.at<cv::Vec3b>(i, j)[ch] = src.at<cv::Vec3b>(i + offset_y, j + offset_x )[ch];
}
else
result.at<cv::Vec3b>(i, j)[ch] = 0.0;
}
}
}
cv:: imshow("result", result);
cv::imwrite("result.jpg", result);
cv::waitKey();
}
The result:
BTW, may be better to use cv::remap ?

Problem while copying an Opencv Image into a bigger one

I'm trying to copy an OpenCV image into a bigger image, but I'm getting some problems. I want to create a bigger image that has a border of an specific size but I do not want to change that border. So, I'd like to change just the center part of the image with the same size of it.
Here is the code I'm using.
main.cpp
#include <iostream>
#include "useful_tools.h"
int main()
{
Useful_Tools ut;
cv::Mat image = cv::imread("/home/felippe/Codigos_Cpp/Image_Registration_2D/square_landscape.jpeg");
cv::Mat gray_image(image.rows, image.cols, image.type());
cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
cv::Mat sobel_x(3, 3, CV_64F);
//Filling the Sobel Filter
sobel_x.at<double>(0, 0) = -1;
sobel_x.at<double>(0, 1) = -2;
sobel_x.at<double>(0, 2) = -1;
sobel_x.at<double>(1, 0) = 0;
sobel_x.at<double>(1, 1) = 0;
sobel_x.at<double>(1, 2) = 0;
sobel_x.at<double>(2, 0) = 1;
sobel_x.at<double>(2, 1) = 2;
sobel_x.at<double>(2, 2) = 1;
cv::Mat edge = ut.correlation(gray_image, sobel_x, "zeros");
return 0;
}
function
cv::Mat PadImage(cv::Mat image, int k_cols, int k_rows)
{
//There is an error when k_rows or k_rows are even numbers.
//cv::Mat image_padded(image.rows + k_rows - 1, image.cols + k_cols - 1, image.type());
//Fixing:
cv::Mat image_padded(image.rows + (k_rows/2)*2, image.cols + (k_cols/2)*2, image.type());
image_padded = 0;
//if (!padding_type.compare("zeros"))
//{
//Naming conventions are: x applies cols, and y applies rows
//int x_add = k_rows / 2, y_add = k_cols / 2;
int y_add = k_rows / 2, x_add = k_cols / 2;
for (int i = y_add; i < image.rows + y_add; i++)
for (int j = x_add; j < image.cols + x_add; j++)
image_padded.at<double>(i, j) = image.at<double>(i - y_add, j - x_add);
//}
return image_padded;
}
cv::Mat Useful_Tools::correlation(cv::Mat image, cv::Mat kernel, std::string padding_type)
{
cv::Mat image_padded(image.rows + kernel.rows-1, image.cols + kernel.cols-1, image.type());
image_padded = 0;
cv::Mat result(image.rows, image.cols, image.type());
result = 0;
cv::Mat image_padded2 = PadImage(image, 3, 3);
showImage(image, "Original Image");
showImage(image_padded2, "Image Padded");
if (!padding_type.compare("zeros"))
{
int x_add = kernel.rows/2, y_add = kernel.cols/2;
for (int i = x_add; i < image.rows + x_add; i++)
for (int j = y_add; j < image.cols + y_add; j++)
image_padded.at<double>(i, j) = image.at<double>(i-x_add, j-y_add);
}
/*else if (!padding_type.compare("repeat"))
{
cv::Mat result(image.rows + kernel.rows/2, image.cols + kernel.cols/2, image.type());
for (int i = (kernel.rows-1)/2; i < image.rows + (kernel.rows-1)/2; i++)
for (int j = (kernel.cols-1)/2; j < image.cols + (kernel.cols-1)/2; j++)
result.at<double>(i, j) = image.at<double>(i-(kernel.rows-1)/2, j-((kernel.cols-1)/2));
}*/
else if (!padding_type.compare("without"))
{
image_padded.release();
cv::Mat image_padded = image;
}
else
std::cerr << "Please enter with a valid padding value." << std::endl;
//showImage(image_padded, "Testing Padding");
cv::imwrite( "Padding_image.jpg", image_padded);
for (int i = 0; i < result.rows; i++)
for (int j = 0; j < result.cols; j++)
for (int m = 0; m < kernel.rows; m++)
for (int n = 0; n < kernel.cols; n++)
{
std::cout << image_padded.at<double>(i+m+kernel.rows/2, j+n+kernel.cols/2) << std::endl
<< kernel.at<double>(m, n) << std::endl;
result.at<double>(i, j) += image_padded.at<double>(i+m+kernel.rows/2, j+n+kernel.cols/2)*kernel.at<double>(m, n);
std::cout << std::endl;
}
return result;
}
Here is the input image that I'm using.
Here is an example of image that I'm getting as a result.
I have done some other examples using vector and the result seems correct, so what is wrong with that code?
Thanks in advance.
The only issue I could find is in case kernel.rows or kernel.cols are even.
The size of your output image is (image.rows + kernel.rows-1, image.cols + kernel.cols-1).
Size is going to be too small when kernel.rows or kernel.cols are even.
For example: in case kernel.rows = 0, the size of the output is going to be smaller than the input.
Suggested correction:
cv::Mat image_padded(image.rows + (kernel.rows/2)*2, image.cols + (kernel.cols/2)*2, image.type());
Dividing the (integer) value by 2 and then multiply be 2 covers both even and odd cases.
Note about naming conversions:
The naming convention is: x applies cols, and y applies rows.
You have replaced the names, and made code reading difficult.
I am not sure if your issue is related to the problem I found.
It could also be a problem in the input or output (problem in the code parts you are not showing).
Here is a test sample code (I put some of your code in comments):
cv::Mat PadImage(cv::Mat image, int k_cols, int k_rows)
{
//There is an error when k_rows or k_rows are even numbers.
//cv::Mat image_padded(image.rows + k_rows - 1, image.cols + k_cols - 1, image.type());
//Fixing:
cv::Mat image_padded(image.rows + (k_rows/2)*2, image.cols + (k_cols/2)*2, image.type());
image_padded = 0;
//if (!padding_type.compare("zeros"))
//{
//Naming conventions are: x applies cols, and y applies rows
//int x_add = k_rows / 2, y_add = k_cols / 2;
int y_add = k_rows / 2, x_add = k_cols / 2;
for (int i = y_add; i < image.rows + y_add; i++)
for (int j = x_add; j < image.cols + x_add; j++)
image_padded.at<double>(i, j) = image.at<double>(i - y_add, j - x_add);
//}
return image_padded;
}
int main()
{
//Read input image as Grayscale (one byte per pixel).
cv::Mat Iu8 = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
//Draw a white rectangle around the input image (for testing)
cv::rectangle(Iu8, cv::Rect(0, 0, Iu8.cols - 1, Iu8.rows - 1), cv::Scalar(255), 1);
cv::Mat I;
Iu8.convertTo(I, CV_64FC1); //Convert from uint8 to double
//Execute padding function
cv::Mat J = PadImage(I, 101, 0);
cv::Mat Ju8;
J.convertTo(Ju8, CV_8UC1); //Convert from double to uint8
//Display input and output
cv::imshow("Iu8", Iu8);
cv::imshow("Ju8", Ju8);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
Update
After posting your main, the problem can be found:
You are using at<double>, but image type is uchar (one byte per pixel).
Solution:
Replace at<double> with at<uchar> when reading from and writing to image.
Keep at<double> for the kernel, because the kernel type is double.
Here is the modified testing code (just for reference):
cv::Mat correlationPad(cv::Mat image, cv::Mat kernel, std::string padding_type)
{
cv::Mat image_padded(image.rows + kernel.rows - 1, image.cols + kernel.cols - 1, image.type());
image_padded = 0;
cv::Mat result(image.rows, image.cols, image.type());
result = 0;
//cv::Mat image_padded2 = PadImage(image, 3, 3);
//showImage(image, "Original Image");
//showImage(image_padded2, "Image Padded");
if (!padding_type.compare("zeros"))
{
int x_add = kernel.rows / 2, y_add = kernel.cols / 2;
for (int i = x_add; i < image.rows + x_add; i++)
for (int j = y_add; j < image.cols + y_add; j++)
image_padded.at<uchar>(i, j) = image.at<uchar>(i - x_add, j - y_add);
}
/*else if (!padding_type.compare("repeat"))
{
cv::Mat result(image.rows + kernel.rows/2, image.cols + kernel.cols/2, image.type());
for (int i = (kernel.rows-1)/2; i < image.rows + (kernel.rows-1)/2; i++)
for (int j = (kernel.cols-1)/2; j < image.cols + (kernel.cols-1)/2; j++)
result.at<double>(i, j) = image.at<double>(i-(kernel.rows-1)/2, j-((kernel.cols-1)/2));
}*/
else if (!padding_type.compare("without"))
{
//image_padded.release();
//cv::Mat image_padded = image;
}
else
std::cerr << "Please enter with a valid padding value." << std::endl;
//showImage(image_padded, "Testing Padding");
//cv::imwrite("Padding_image.jpg", image_padded);
//for (int i = 0; i < result.rows; i++)
// for (int j = 0; j < result.cols; j++)
// for (int m = 0; m < kernel.rows; m++)
// for (int n = 0; n < kernel.cols; n++)
// {
// std::cout << image_padded.at<double>(i + m + kernel.rows / 2, j + n + kernel.cols / 2) << std::endl
// << kernel.at<double>(m, n) << std::endl;
// result.at<double>(i, j) += image_padded.at<double>(i + m + kernel.rows / 2, j + n + kernel.cols / 2)*kernel.at<double>(m, n);
// std::cout << std::endl;
// }
return image_padded;
}
int main()
{
//Read input image as Grayscale (one byte per pixel).
cv::Mat image = cv::imread("square_landscape.jpeg");
cv::Mat gray_image(image.rows, image.cols, image.type());
cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
//Draw a white rectangle around the input image (for testing)
//cv::rectangle(gray_image, cv::Rect(0, 0, gray_image.cols - 1, gray_image.rows - 1), cv::Scalar(255), 1);
cv::Mat sobel_x(3, 3, CV_64F);
//Filling the Sobel Filter
sobel_x.at<double>(0, 0) = -1;
sobel_x.at<double>(0, 1) = -2;
sobel_x.at<double>(0, 2) = -1;
sobel_x.at<double>(1, 0) = 0;
sobel_x.at<double>(1, 1) = 0;
sobel_x.at<double>(1, 2) = 0;
sobel_x.at<double>(2, 0) = 1;
sobel_x.at<double>(2, 1) = 2;
sobel_x.at<double>(2, 2) = 1;
cv::Mat edge = correlationPad(gray_image, sobel_x, "zeros");
cv::imwrite("edge.jpg", edge); //Save result.
//Display input and output
cv::imshow("gray_image", gray_image);
cv::imshow("edge", edge);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
edge:

Laplacian Filter opencv c++

I was learning filters in OpenCV, but I'm a little confused about the Laplacian filter. My result is very different from the Laplacian filter in OpenCV lib.
For first, I use a Gaussian filter for the image:
Mat filtroGauss(Mat src){
Mat gauss = src.clone();
Mat temp(src.rows+2,src.cols+2,DataType<uchar>::type);
int y,x;
for (y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++) temp.at<uchar>(y+1,x+1) = src.at<uchar>(y,x);
}
int mask[lenMask*lenMask];
mask[0] = mask[2] = mask[6] = mask[8] = 1;
mask[1] = mask[3] = mask[5] = mask[7] = 2;
mask[4] = 4;
int denominatore = 0;
for (int i=0; i<lenMask*lenMask; i++) denominatore += mask[i];
int value[lenMask*lenMask];
for(y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++){
value[0] = temp.at<uchar>(y-1,x-1)*mask[0];
value[1] = temp.at<uchar>(y-1,x)*mask[1];
value[2] = temp.at<uchar>(y-1,x+1)*mask[2];
value[3] = temp.at<uchar>(y,x-1)*mask[3];
value[4] = temp.at<uchar>(y,x)*mask[4];
value[5] = temp.at<uchar>(y,x+1)*mask[5];
value[6] = temp.at<uchar>(y+1,x-1)*mask[6];
value[7] = temp.at<uchar>(y+1,x)*mask[7];
value[8] = temp.at<uchar>(y+1,x+1)*mask[8];
int avg = 0;
for(int i=0; i<lenMask*lenMask; i++)avg+=value[i];
avg = avg/denominatore;
gauss.at<uchar>(y,x) = avg;
}
}
return gauss;
}
Then I use the Laplacian function:
L(y,x) = f(y-1,x) + f(y+1,x) + f(y,x-1) + f(y,x+1) + 4*f(y,x)
Mat filtroLaplace(Mat src){
Mat output = src.clone();
Mat temp = src.clone();
int y,x;
for (y =1; y<src.rows-1; y++){
for(x =1; x<src.cols-1; x++){
output.at<uchar>(y,x) = temp.at<uchar>(y-1,x) + temp.at<uchar>(y+1,x) + temp.at<uchar>(y,x-1) + temp.at<uchar>(y,x+1) -4*( temp.at<uchar>(y,x));
}
}
return output;
}
And here is the final result from my code:
OpenCV result:
Let's rewrite the function a little, so it's easier to discuss:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = sum;
}
}
return output;
}
The source of your problem is sum. Let's examine its range in scope of this algorithm, by taking the two extremes:
Black pixel, surrounded by 4 white. That means 255 + 255 + 255 + 255 - 4 * 0 = 1020.
White pixel, surrounded by 4 black. That means 0 + 0 + 0 + 0 - 4 * 255 = -1020.
When you perform output.at<uchar>(y, x) = sum; there's an implicit cast of the int back to unsigned char -- the high order bits simply get chopped off and the value overflows.
The correct approach to handle this situation (which OpenCV takes), is to perform saturation before the actual cast. Essentially
if (sum < 0) {
sum = 0;
} else if (sum > 255) {
sum = 255;
}
OpenCV provides function cv::saturate_cast<T> to do just this.
There's an additional problem that you're not handling the edge rows/columns of the input image -- you just leave them at the original value. Since you're not asking about that, I'll leave solving that as an excercise to the reader.
Code:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = cv::saturate_cast<uchar>(sum);
}
}
return output;
}
Sample input:
Output of corrected filtroLaplace:
Output of cv::Laplacian:

Painterly Rendering, Clipping line, I'm have an error

I'm making a painterly rendering.
And now I'm doing that clipping line things.
But I got this error:
<<unsigned><pt.x*DataType<_Tp>::channels> <<unsigned<size.p[1]*channels<>>>
And
template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1) const
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((const _Tp*)(data + step.p[0]*i0))[i1];
}
Maybe this is the error that on 'Lineclipping()'
Please, tell me another good idea that clipped line.
this is my code. And I'm just a student so my codding skill is very beginner.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <sstream>
#include <cmath>
#include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <random>
using namespace cv;
using namespace std;
random_device rd;
mt19937_64 rng(rd());
double PI = 3.141592;
int perturbLength = (rand() % 6) + 1;
int perturbRadius = ((rand() % 5) + 0) / 10;
int perturbAngle = (rand() % 15) + (-15);
int Maxlength = 10 - perturbLength;
int radius = 2 - perturbRadius;
int angle = 45 - perturbAngle;
double theta = angle*(PI / 180);
void Lineclipping(int x, int y, double theta, int len, Point2d& pt1, Point2d& pt2, Mat& EdgeMap)
{
double length = ceil(len);
enter code here
float detectPT = len / length;
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
}
Mat EdgeDetect(Mat& referenceimg, Mat& Edge)
{
Mat image = referenceimg.clone();
//Make Edge Map
Mat IntensityImg(image.size(), CV_8U, 255);
Mat sobelx, sobely;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Vec3b intensity = image.at<Vec3b>(j, i);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
IntensityImg.at<uchar>(j, i) = (30 * red + 59 * green + 11 * blue) / 100;
}
}
GaussianBlur(IntensityImg, IntensityImg, Size(5, 5), 0.1, 0.1);
Sobel(IntensityImg, sobelx, CV_32F, 1, 0);
Sobel(IntensityImg, sobely, CV_32F, 0, 1);
Mat magnitudeXY = abs(sobelx), abs(sobely);
magnitudeXY.convertTo(Edge, CV_8U);
Mat mask(3, 3, CV_8UC1, 1);
morphologyEx(Edge, Edge, MORPH_ERODE, mask);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Edge.at<uchar>(j, i) = (Edge.at<uchar>(j, i) > 20 ? 255 : 0);
}
}
imshow("intensity", Edge);
return Edge;
}
void paint(Mat &image, int snum)
{
Mat Edge;
EdgeDetect(image, Edge);
for (int n = 0; n < snum; n++)
{
int x = rand() % image.cols;
int y = rand() % image.rows;
if (image.channels() == 1)
{
image.at<uchar>(x, y) = 255;
}
else if (image.channels() == 3)
{
int length = Maxlength / 2;
Point2d pt1(x + length*cos(theta), y + length*sin(theta));
Point2d pt2(x - length*cos(theta), y - length*sin(theta));
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
//draw line
Scalar color(image.at<Vec3b>(y, x)[0], image.at<Vec3b>(y, x)[1], image.at<Vec3b>(y, x)[2]);
line(image, pt1, pt2, color, radius);
}
}
}
int main()
{
Mat Img = imread("fruit.jpg", IMREAD_COLOR);
CV_Assert(Img.data);
Mat resultImage = Img.clone();
Mat sobel = Img.clone();
int num = Img.rows*Img.cols;
paint(resultImage, num);
imshow("result", resultImage);
waitKey();
return 0;
}
And This is the error parts.
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
Thank you!
Since I can't compile this and run it, I am going to run through a possible execution and show you where you can hit this out of range error.
int perturbLength = (rand() % 6) + 1; // Range is 1 to 6, let's assume 4
int perturbAngle = (rand() % 15) + (-15); // Range is -15 to -1 let's assume -14
int Maxlength = 10 - perturbLength; // 6
int angle = 45 - perturbAngle; // 44
double theta = angle*(PI / 180); // .7679
Now we get into this code inside the paint method:
int x = rand() % image.cols; // Let's assume image.cols - 2
int y = rand() % image.rows; // Let's assume image.rows - 1
Inside of paint we will reach this code:
int length = Maxlength / 2; // Maxlength is 6 so this is 3
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
Which leads to the Lineclipping method and here we get a problem:
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
This is the problem. Remember, x is image.cols - 2. Now we perform the operations x + length * cos(theta), which is (image.cols-2) + 3 * cos(.7679). 3 * cos(.7679) is 2.999 which whether you floor it or round it is going to cause a problem when you add it to image.cols - 2. If it is floored and you get 2 we have image.cols which causes out of range, if it is rounded then we have image.cols + 1, so in either case we go beyond the bounds of the array.

Implementation of Harris Corner Detector using Sobel and Gaussian Blur in C++

I want to implement the harris corner detector. I found this page to be very helpful, since it shows how the detector is implemented using the basic opencv functions (like gaussianBlur and Sobel):
https://compvisionlab.wordpress.com/2013/03/02/harris-interest-point-detection-implementation-opencv/
Now I even want to implement Gaussian Blur and Sobel. If I run my Gaussian or Sobel over some Images it works but in combination with my Corner Detector it does not work. Can anybody help me please. The full Code is below, thx.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray, dst;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source Image";
char* corners_window = "Corner Image";
/// Function header
void cornerHarris_demo(int, void*);
void cornerHarrisMe(int, int, double);
int xGradient(Mat, int, int);
int yGradient(Mat, int, int);
void SobelMe(Mat&,Mat&,int,int);
int borderCheck(int M, int x);
void SepGaussian(Mat&, Mat&, int, int);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("data/a-real-big-church.jpg", 1);
//Mat src_gray(src.size(), CV_8UC1);
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Create a window and a trackbar
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo);
imshow(source_window, src);
cornerHarris_demo(0, 0);
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo(int, void*)
{
Mat dst_norm, dst_norm_scaled;
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarrisMe(blockSize, apertureSize, k);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > thresh)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(255), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow(corners_window, CV_WINDOW_AUTOSIZE);
imshow(corners_window, dst_norm_scaled);
}
void cornerHarrisMe(int blockSize, int apertureSize, double k)
{
Mat x2y2, xy, mtrace, x_der, y_der, x2_der, y2_der, xy_der, x2g_der, y2g_der, xyg_der;
//1: calculate x and y derivative of image via Sobel
SobelMe(src_gray, x_der, 1, 0);
SobelMe(src_gray, y_der, 0, 1);
//2: calculate other three images in M
pow(x_der, blockSize, x2_der);
pow(y_der, blockSize, y2_der);
multiply(x_der, y_der, xy_der);
//3: gaussain
SepGaussian(x2_der, x2g_der, 1, 0);
SepGaussian(y2_der, y2g_der, 0, 1);
SepGaussian(xy_der, xyg_der, 1, 1);
//4. calculating R with k
multiply(x2g_der, y2g_der, x2y2);
multiply(xyg_der, xyg_der, xy);
pow((x2g_der + y2g_der), blockSize, mtrace);
dst = (x2y2 - xy) - k * mtrace;
}
// gradient in the x direction
int xGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y, x - 1) +
image.at<uchar>(y + 1, x - 1) -
image.at<uchar>(y - 1, x + 1) -
2 * image.at<uchar>(y, x + 1) -
image.at<uchar>(y + 1, x + 1);
}
// gradient in the y direction
int yGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y - 1, x) +
image.at<uchar>(y - 1, x + 1) -
image.at<uchar>(y + 1, x - 1) -
2 * image.at<uchar>(y + 1, x) -
image.at<uchar>(y + 1, x + 1);
}
void SobelMe(Mat& source, Mat& destination, int xOrder, int yOrder){
int gradX, gradY, sum;
destination = source.clone();
if (xOrder == 1 && yOrder == 0){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
sum = abs(gradX);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 0 && yOrder == 1){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradY = yGradient(source, x, y);
sum = abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 1 && yOrder == 1)
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
gradY = yGradient(source, x, y);
sum = abs(gradX) + abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
int borderCheck(int M, int x){
if (x < 0)
return -x - 1;
if (x >= M)
return 2 * M - x - 1;
return x;
}
void SepGaussian(Mat& source, Mat& desination, int sigmaX, int sigmaY){
// coefficients of 1D gaussian kernel with sigma = 1
double coeffs[] = { 0.0545, 0.2442, 0.4026, 0.2442, 0.0545 };
Mat tempX, tempY;
float sum, x1, y1;
desination = source.clone();
tempY = source.clone();
tempX = source.clone();
// along y - direction
if (sigmaX == 0 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along x - direction
else if (sigmaX == 1 && sigmaY == 0){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along xy - direction
else if (sigmaX == 1 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
tempY.at<uchar>(y, x) = sum;
}
}
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * tempY.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
}
The Result:
Here is the a picture of the Result.
The Result is now the other way around, it detects areas where are no Corners.
In case there are some questions, feel free to ask me.