Image Shearing C++ - c++

I'm trying to Shear an image along the X-axis using OpenCV to load the image, and the following algorithm to shear the image: x′=x+y·Bx, but for some reason, I end up with the following shear:
My source code looks like this:
#include "stdafx.h"
#include "opencv2\opencv.hpp"
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("B2DBy.jpg", 1);
if (src.empty())
cout << "Error: Loading image" << endl;
int r1, c1; // tranformed point
int rows, cols; // original image rows and columns
rows = src.rows;
cols = src.cols;
float Bx = 2; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(cols * Bx);
int maxYOffset = abs(rows * By);
Mat out = Mat::ones(src.rows + maxYOffset, src.cols + maxXOffset, src.type()); // create output image to be the same as the source
for (int r = 0; r < out.rows; r++) // loop through the image
{
for (int c = 0; c < out.cols; c++)
{
r1 = r + c * By - maxYOffset; // map old point to new
c1 = r * Bx + c - maxXOffset;
if (r1 >= 0 && r1 <= out.rows && c1 >= 0 && c1 <= out.cols) // check if the point is within the boundaries
{
out.at<uchar>(r, c) = src.at<uchar>(r1, c1); // set value
}
}
}
namedWindow("Source image", CV_WINDOW_AUTOSIZE);
namedWindow("Rotated image", CV_WINDOW_AUTOSIZE);
imshow("Source image", src);
imshow("Rotated image", out);
waitKey(0);
return 0;
}
EDIT
Fixed it myself.
Didn't need to substract the offset. Heres the updated source code:
Mat forward(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
y1 = y + x * By; // map old point to new
x1 = y * Bx + x;
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
Mat backwards(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
//y1 = y + x * By; // map old point to new
//x1 = y * Bx + x;
y1 = (1 / (1 - Bx*By)) * (y + x * By);
x1 = (1 / (1 - Bx*By)) * (y * Bx + x);
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
int main()
{
Mat src = imread("B2DBy.jpg", 0);
if (src.empty())
cout << "Error: Loading image" << endl;
Mat forwards = forward(src);
Mat back = backwards(src);
namedWindow("Source image", CV_WINDOW_NORMAL);
imshow("Source image", src);
imshow("back", back);
imshow("forward image", forwards);
waitKey(0);
return 0;
}

I found some time to work on this.
Now I understand what you tried to achieve with the offset computation, but I'm not sure whether yours is correct.
Just change all the cv::Vec3b to unsigned char or uchar and load as grayscale, if wanted.
Please try this code and maybe you'll find your error:
// no interpolation yet
// cv::Vec3b only
cv::Mat shear(const cv::Mat & input, float Bx, float By)
{
if (Bx*By == 1)
{
throw("Shearing: Bx*By==1 is forbidden");
}
if (input.type() != CV_8UC3) return cv::Mat();
// shearing:
// x'=x+y·Bx
// y'=y+x*By
// shear the extreme positions to find out new image size:
std::vector<cv::Point2f> extremePoints;
extremePoints.push_back(cv::Point2f(0, 0));
extremePoints.push_back(cv::Point2f(input.cols, 0));
extremePoints.push_back(cv::Point2f(input.cols, input.rows));
extremePoints.push_back(cv::Point2f(0, input.rows));
for (unsigned int i = 0; i < extremePoints.size(); ++i)
{
cv::Point2f & pt = extremePoints[i];
pt = cv::Point2f(pt.x + pt.y*Bx, pt.y + pt.x*By);
}
cv::Rect offsets = cv::boundingRect(extremePoints);
cv::Point2f offset = -offsets.tl();
cv::Size resultSize = offsets.size();
cv::Mat shearedImage = cv::Mat::zeros(resultSize, input.type()); // every pixel here is implicitely shifted by "offset"
// perform the shearing by back-transformation
for (int j = 0; j < shearedImage.rows; ++j)
{
for (int i = 0; i < shearedImage.cols; ++i)
{
cv::Point2f pp(i, j);
pp = pp - offset; // go back to original coordinate system
// go back to original pixel:
// x'=x+y·Bx
// y'=y+x*By
// y = y'-x*By
// x = x' -(y'-x*By)*Bx
// x = +x*By*Bx - y'*Bx +x'
// x*(1-By*Bx) = -y'*Bx +x'
// x = (-y'*Bx +x')/(1-By*Bx)
cv::Point2f p;
p.x = (-pp.y*Bx + pp.x) / (1 - By*Bx);
p.y = pp.y - p.x*By;
if ((p.x >= 0 && p.x < input.cols) && (p.y >= 0 && p.y < input.rows))
{
// TODO: interpolate, if wanted (p is floating point precision and can be placed between two pixels)!
shearedImage.at<cv::Vec3b>(j, i) = input.at<cv::Vec3b>(p);
}
}
}
return shearedImage;
}
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat output = shear(input, 0.7, 0);
//cv::Mat output = shear(input, -0.7, 0);
//cv::Mat output = shear(input, 0, 0.7);
cv::imshow("input", input);
cv::imshow("output", output);
cv::waitKey(0);
return 0;
}
Giving me these outputs for the 3 sample lines:

Related

OpenCV Using a loop to sum a part of your image error?

I am wanting to move through an image and take a 5x5 grid centered around each pixel in the image. I then want to sum that grid and compare it to a threshold.
int main()
{
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
Mat im = imread("blob.png", IMREAD_GRAYSCALE);
bool fromCenter = false;
namedWindow("Crop frame", WINDOW_NORMAL);
Rect2d r = selectROI("Crop frame", im, fromCenter);
im = im(r);
erode(im, im, element);
Mat clone = im;
int sectionSize = 4;
int width = im.cols - sectionSize/2;
int height = im.rows - sectionSize/2;
int sum = 0;
int counter = 0;
for (int i = sectionSize/2; i < width; i++) {
for (int j = sectionSize/2; j < height; j++) {
Rect rect = Rect(i, j, sectionSize, sectionSize);
rect -= Point(rect.width / 2, rect.height / 2);
Mat temp = im(rect);
for (int x = 0; x < temp.cols; x++) {
for (int y = 0; y < temp.rows; y++) {
int pixelValue = (int)temp.at<uchar>(y, x);
sum += pixelValue;
}
}
cout << sum << endl;
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
namedWindow("erode", WINDOW_NORMAL);
imshow("erode", clone);
waitKey(1);
sum = 0;
}
}
}
I am getting fluctuations in the pixel sum based on where I select my ROI in the image even when both over white space Also, my pixel sum is changing when I change the value of the clone pixel in this section of the code which I do not understand at all:
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}

gaussian smoothing output misaligned

I am trying to perform gaussian smoothing on this image without using any opencv function (except displaying the image).
However, the output I got after convoluting the image with the gaussian kernel is as follow:
The output image seems to have misaligned and looks very weird. Any idea what is happening?
Generate gaussian kernel:
double gaussian(int x, int y,double sigma){
return (1/(2*M_PI*pow(sigma,2)))*exp(-1*(pow(x,2)+pow(y,2))/(2*pow(sigma,2)));
}
double generateFilter(vector<vector<double>> & kernel,int width,double sigma){
int value = 0;
double total =0;
if(width%2 == 1){
value = (width-1)/2;
}else{
value = width/2;
}
double smallest = gaussian(-1*value,-1*value,sigma);
for(int i = -1*value; i<=value; i++){
vector<double> temp;
for(int k = -1*value; k<=value; k++){
int gVal = round(gaussian(i,k,sigma)/smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout<<total<<endl;
return total;
}
Convolution:
vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel,double total){
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.size(); //height of input image
int imgCols = src[0].size(); //width of input image
vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
for ( size_t row = 0; row < imgRows; row++ ) {
for ( size_t col = 0; col < imgCols; col++ ) {
float accumulation = 0;
float weightsum = 0;
for ( int i = -1*kCenterX; i <= 1*kCenterX; i++ ) {
for ( int j = -1*kCenterY; j <= 1*kCenterY; j++ ) {
int k = 0;
if((row+i)>=0 && (row+i)<imgRows && (col+j)>=0 && (col+j)<imgCols){
k = src[row+i][col+j];
weightsum += kernel[kCenterX+i][kCenterY+j];
}
accumulation += k * kernel[kCenterX +i][kCenterY+j];
}
}
dst[row][col] = round(accumulation/weightsum);
}
}
return dst;
}
Thank you.
The convolution function is basically correct, so the issue is with the input and output format.
Make sure you are reading the image as Grayscale (and not RGB):
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
You are passing vector<vector<unsigned int>> argument to convolution.
I can't say if it's part of the problem or not, but it's recommended to pass argument of type cv::Mat (and return cv::Mat):
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total)
I assume you can convert the input to and from vector<vector<unsigned int>>, but it's not necessary.
Here is a working code sample:
#include <vector>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
double gaussian(int x, int y, double sigma) {
return (1 / (2 * 3.141592653589793*pow(sigma, 2)))*exp(-1 * (pow(x, 2) + pow(y, 2)) / (2 * pow(sigma, 2)));
}
double generateFilter(vector<vector<double>> & kernel, int width, double sigma)
{
int value = 0;
double total = 0;
if (width % 2 == 1) {
value = (width - 1) / 2;
}
else {
value = width / 2;
}
double smallest = gaussian(-1 * value, -1 * value, sigma);
for (int i = -1 * value; i <= value; i++) {
vector<double> temp;
for (int k = -1 * value; k <= value; k++) {
int gVal = round(gaussian(i, k, sigma) / smallest);
temp.push_back(gVal);
total += gVal;
}
kernel.push_back(temp);
}
cout << total << endl;
return total;
}
//vector<vector<unsigned int>> convolution(vector<vector<unsigned int>> src, vector<vector<double>> kernel, double total) {
cv::Mat convolution(cv::Mat src, vector<vector<double>> kernel, double total) {
int kCenterX = floor(kernel.size() / 2); //center of kernel
int kCenterY = kCenterX; //center of kernel
int kRows = kernel.size(); //height of kernel
int kCols = kRows; //width of kernel
int imgRows = src.rows;//src.size(); //height of input image
int imgCols = src.cols;//src[0].size(); //width of input image
//vector<vector<unsigned int>> dst = vector<vector<unsigned int>> (imgRows, vector<unsigned int>(imgCols ,0));
cv::Mat dst = cv::Mat::zeros(src.size(), CV_8UC1); //Create destination matrix, and fill with zeros (dst is Grayscale image with byte per pixel).
for (size_t row = 0; row < imgRows; row++) {
for (size_t col = 0; col < imgCols; col++) {
double accumulation = 0;
double weightsum = 0;
for (int i = -1 * kCenterX; i <= 1 * kCenterX; i++) {
for (int j = -1 * kCenterY; j <= 1 * kCenterY; j++) {
int k = 0;
if ((row + i) >= 0 && (row + i) < imgRows && (col + j) >= 0 && (col + j) < imgCols) {
//k = src[row+i][col+j];
k = (int)src.at<uchar>(row + i, col + j); //Read pixel from row [row + i] and column [col + j]
weightsum += kernel[kCenterX + i][kCenterY + j];
}
accumulation += (double)k * kernel[kCenterX + i][kCenterY + j];
}
}
//dst[row][col] = round(accumulation/weightsum);
dst.at<uchar>(row, col) = (uchar)round(accumulation / weightsum); //Write pixel from to row [row] and column [col]
//dst.at<uchar>(row, col) = src.at<uchar>(row, col);
}
}
return dst;
}
int main()
{
vector<vector<double>> kernel;
double total = generateFilter(kernel, 11, 3.0);
//Read input image as Grayscale (one byte per pixel).
cv::Mat I = cv::imread("img.png", cv::IMREAD_GRAYSCALE);
cv::Mat J = convolution(I, kernel, total);
//Display input and output
cv::imshow("I", I);
cv::imshow("J", J);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
Result:

cannot Segment a RGB image by pointer based accessing pixel intensities

I define a function void segRgb(Mat &src, Mat &dst, Rect roi), using which I try to segment the region of region (ROI) of an input RGB image by simply thresholding a lumped pixel intensities derived from R, G and B channels. Here below is the code of the function:
void segRgb(Mat &src, Mat &dst, Rect roi)
{
uchar *bgrdata = src.data;
uchar *outdata = dst.data;
int ystart = roi.y;
int yend = roi.y + roi.height;
int xstart = roi.x;
int xend = roi.x+roi.width;
int step1 = src.cols-roi.width;
int step3 = 3*step1;
int start1 = roi.y*src.cols+roi.x;
int start3 = 3*start1;
bgrdata += start3;
outdata += start1;
uchar r, g, b;
double score=0.0;
for(int i=ystart; i<yend; i++)
{
qDebug()<<"Rows: "<<i;
for(int j=xstart; j<xend; j++)
{
b = *bgrdata++;
g = *bgrdata++;
r = *bgrdata++;
score = 0.21*r+0.72*g+0.07*b; //a simple rule to lump RGB values
if(score>100)
{
*outdata = 255;
}
else
{
*outdata = 0;
}
outdata++;
}
outdata+=step1;
bgrdata+=step3;
}
}
Following is my test code for the function:
Rect cvRect = Rect(10,50,256,256);
Mat dst;
segRgb(im, dst, cvRect); //im is a loaded Matrix of 427*640*3, CV_8UC3
namedWindow("Thresholded");
imshow("Thresholed", dst);
I run the codes above. The function segRgb does not work for some reason. No image is shown. Actually, the loop inside the segRgb does not proceed. Anyone can point to the problem, debug my codes bit? Thanks!
void segRgb(Mat &src, Mat &dst, Rect roi)
{
uchar *bgrdata = src.data;
uchar *outdata = dst.data;
int ystart = roi.y;
int yend = roi.y + roi.height;
int xstart = roi.x;
int xend = roi.x + roi.width;
int step1 = src.cols - roi.width;
int step3 = 3 * step1;
int start1 = roi.y*src.cols + roi.x;
int start3 = 3 * start1;
bgrdata += start3;
outdata += start1;
uchar r, g, b;
double score = 0.0;
for (int i = ystart; i < yend; i++)
{
cout << "Rows: " << i;
for (int j = xstart; j < xend; j++)
{
b = *bgrdata++;
g = *bgrdata++;
r = *bgrdata++;
score = 0.21*r + 0.72*g + 0.07*b; //a simple rule to lump RGB values
if (score > 100)
{
*outdata = 255;
}
else
{
*outdata = 0;
}
outdata++;
}
outdata += step1;
bgrdata += step3;
}
}
int main() {
Mat im = imread("urimage");
Rect cvRect = Rect(10, 50, 256, 256);
// you have to allocate a size for the dst Mat otherwise the uchar* output you point to above will be garbage
Mat dst(im.size(),im.type());
segRgb(im, dst, cvRect); //im is a loaded Matrix of 427*640*3, CV_8UC3
//Resize you dst or you can change a bit in your function paramters to get it directly
dst=Mat(dst, cvRect);
namedWindow("Thresholded");
imshow("Thresholed", dst);
waitKey(0);
}

Implementation of Harris Corner Detector using Sobel and Gaussian Blur in C++

I want to implement the harris corner detector. I found this page to be very helpful, since it shows how the detector is implemented using the basic opencv functions (like gaussianBlur and Sobel):
https://compvisionlab.wordpress.com/2013/03/02/harris-interest-point-detection-implementation-opencv/
Now I even want to implement Gaussian Blur and Sobel. If I run my Gaussian or Sobel over some Images it works but in combination with my Corner Detector it does not work. Can anybody help me please. The full Code is below, thx.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray, dst;
int thresh = 200;
int max_thresh = 255;
char* source_window = "Source Image";
char* corners_window = "Corner Image";
/// Function header
void cornerHarris_demo(int, void*);
void cornerHarrisMe(int, int, double);
int xGradient(Mat, int, int);
int yGradient(Mat, int, int);
void SobelMe(Mat&,Mat&,int,int);
int borderCheck(int M, int x);
void SepGaussian(Mat&, Mat&, int, int);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("data/a-real-big-church.jpg", 1);
//Mat src_gray(src.size(), CV_8UC1);
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Create a window and a trackbar
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
createTrackbar("Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo);
imshow(source_window, src);
cornerHarris_demo(0, 0);
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo(int, void*)
{
Mat dst_norm, dst_norm_scaled;
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarrisMe(blockSize, apertureSize, k);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > thresh)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(255), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow(corners_window, CV_WINDOW_AUTOSIZE);
imshow(corners_window, dst_norm_scaled);
}
void cornerHarrisMe(int blockSize, int apertureSize, double k)
{
Mat x2y2, xy, mtrace, x_der, y_der, x2_der, y2_der, xy_der, x2g_der, y2g_der, xyg_der;
//1: calculate x and y derivative of image via Sobel
SobelMe(src_gray, x_der, 1, 0);
SobelMe(src_gray, y_der, 0, 1);
//2: calculate other three images in M
pow(x_der, blockSize, x2_der);
pow(y_der, blockSize, y2_der);
multiply(x_der, y_der, xy_der);
//3: gaussain
SepGaussian(x2_der, x2g_der, 1, 0);
SepGaussian(y2_der, y2g_der, 0, 1);
SepGaussian(xy_der, xyg_der, 1, 1);
//4. calculating R with k
multiply(x2g_der, y2g_der, x2y2);
multiply(xyg_der, xyg_der, xy);
pow((x2g_der + y2g_der), blockSize, mtrace);
dst = (x2y2 - xy) - k * mtrace;
}
// gradient in the x direction
int xGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y, x - 1) +
image.at<uchar>(y + 1, x - 1) -
image.at<uchar>(y - 1, x + 1) -
2 * image.at<uchar>(y, x + 1) -
image.at<uchar>(y + 1, x + 1);
}
// gradient in the y direction
int yGradient(Mat image, int x, int y)
{
return image.at<uchar>(y - 1, x - 1) +
2 * image.at<uchar>(y - 1, x) +
image.at<uchar>(y - 1, x + 1) -
image.at<uchar>(y + 1, x - 1) -
2 * image.at<uchar>(y + 1, x) -
image.at<uchar>(y + 1, x + 1);
}
void SobelMe(Mat& source, Mat& destination, int xOrder, int yOrder){
int gradX, gradY, sum;
destination = source.clone();
if (xOrder == 1 && yOrder == 0){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
sum = abs(gradX);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 0 && yOrder == 1){
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradY = yGradient(source, x, y);
sum = abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
else if (xOrder == 1 && yOrder == 1)
for (int y = 1; y < source.rows - 1; y++){
for (int x = 1; x < source.cols - 1; x++){
gradX = xGradient(source, x, y);
gradY = yGradient(source, x, y);
sum = abs(gradX) + abs(gradY);
sum = sum > 255 ? 255 : sum;
sum = sum < 0 ? 0 : sum;
destination.at<uchar>(y, x) = sum;
}
}
}
int borderCheck(int M, int x){
if (x < 0)
return -x - 1;
if (x >= M)
return 2 * M - x - 1;
return x;
}
void SepGaussian(Mat& source, Mat& desination, int sigmaX, int sigmaY){
// coefficients of 1D gaussian kernel with sigma = 1
double coeffs[] = { 0.0545, 0.2442, 0.4026, 0.2442, 0.0545 };
Mat tempX, tempY;
float sum, x1, y1;
desination = source.clone();
tempY = source.clone();
tempX = source.clone();
// along y - direction
if (sigmaX == 0 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along x - direction
else if (sigmaX == 1 && sigmaY == 0){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
// along xy - direction
else if (sigmaX == 1 && sigmaY == 1){
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
y1 = borderCheck(source.rows, y - i);
sum = sum + coeffs[i + 2] * source.at<uchar>(y1, x);
}
tempY.at<uchar>(y, x) = sum;
}
}
for (int y = 0; y < source.rows; y++){
for (int x = 0; x < source.cols; x++){
sum = 0.0;
for (int i = -2; i <= 2; i++){
x1 = borderCheck(source.cols, x - i);
sum = sum + coeffs[i + 2] * tempY.at<uchar>(y, x1);
}
desination.at<uchar>(y, x) = sum;
}
}
}
}
The Result:
Here is the a picture of the Result.
The Result is now the other way around, it detects areas where are no Corners.
In case there are some questions, feel free to ask me.

corruption of the heap

I have one function dealing with image. In that function, i am trying to find several particular ellipses. The code is working if i call it individually in a separate project, but in the whole project, it crashed when it returns.
I used many vectors in the processing to store some information during the process.
The error information:
Windows has triggered a breakpoint in KinectBridgeWithOpenCVBasics-D2D.exe.
This may be due to a corruption of the heap, which indicates a bug in KinectBridgeWithOpenCVBasics-D2D.exe or any of the DLLs it has loaded.
This may also be due to the user pressing F12 while KinectBridgeWithOpenCVBasics-D2D.exe has focus.
The output window may have more diagnostic information.
could any one tell me where is wrong to cause this crash. More weird is it is working in the separate project.
The code is a little long, but it is really noting, just looking for several particular ellipses with some pattern.
Thank you.
int FindNao(Mat* pImg, double* x, double* y)
{
// Fail if pointer is invalid
if (!pImg)
{
return 2;
}
// Fail if Mat contains no data
if (pImg->empty())
{
return 3;
}
//*x = 0; *y = 0;
Mat localMat = *pImg; // save a local copy of the image
cvtColor(~localMat, localMat, CV_BGR2GRAY); // Convert to gray image
threshold(localMat, localMat, 165, 255, THRESH_BINARY); // Convert into black-white image
Mat elementOpen = getStructuringElement(MORPH_ELLIPSE, Size(5,5), Point(-1,-1));
morphologyEx(localMat, localMat, MORPH_OPEN, elementOpen, Point(-1,-1), 1);
// Find all the contours in the blak-white image
vector<vector<Point>> contours;
findContours(localMat.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
localMat.release();
// Calculate the area of each contour
vector<double> areas; int num = contours.size();
/* If no contours are found, return S_OK */
if(num < 1)
return 1;
for(int i = 0; i < num; i++)
{
areas.push_back(contourArea(contours[i]));
}
// First round of selection
// The area is small, and they are like a ellipse and around the middle in X direction and at the upper part of the image
vector<RotatedRect> selected_ellipses; // store the fitted ellipse fitted to the potential contour
vector<double> selected_areas; // store the contour area of the potential contour
int imgX = localMat.cols; int imgY = localMat.rows; // get the demension of the image
for(int i=0; i < num - 1; i++)
{
if(areas[i] < 350 && areas[i] > 10)
{
// fit an ellipse
RotatedRect ellipse1 = fitEllipse(Mat(contours[i]));
// it is a reasonable ellipse, and the area should be close to the
double length1 = ellipse1.size.height;
double length2 = ellipse1.size.width;
if( abs(1 - length1/length2) <= 0.8 &&
abs(1 - areas[i] / (CV_PI * length1 * length2 / 4) ) <= 0.2 )
{
selected_ellipses.push_back(ellipse1);
selected_areas.push_back(areas[i]);
}
}
}
/************ Second round of selection **************/
// Calculate each ellipse's dimension
vector<double> diff_dimension;
vector<double> ave_dimention;
/* If no contours are found, return S_OK */
if(selected_ellipses.size() < 1)
return 1;
for(int i = 0; i < selected_ellipses.size(); i++)
{
double difference = abs(1 - selected_ellipses[i].size.height / selected_ellipses[i].size.width);
diff_dimension.push_back(difference);
double average = (selected_ellipses[i].size.height + selected_ellipses[i].size.width) / 2;
ave_dimention.push_back(average);
}
vector<vector<int>> eyematches;
vector<vector<int>> cammatches;
// go over all the ellipses to find the matches with close area and dimension.
for(int i = 0; i < selected_ellipses.size() - 1; i++)
{
for(int j = i+1; j < selected_ellipses.size(); j++)
{
// looking for the eyes
if(diff_dimension[i] < 0.05 && diff_dimension[j] < 0.05)
{
double diff_area = abs( 1 - selected_areas[i] / selected_areas[j] );
if (diff_area < 0.05)
{
double diff_y = abs(selected_ellipses[i].center.y - selected_ellipses[j].center.y);
if(diff_y < 10)
{
vector<int> match1;
match1.push_back(i); match1.push_back(j);
eyematches.push_back(match1);
}
}
}
// looking for the cameras
double diff_x = abs(selected_ellipses[i].center.x - selected_ellipses[j].center.x);
if (diff_x < 10)
{
vector<int> match2;
match2.push_back(i); match2.push_back(j);
cammatches.push_back(match2);
}
}
}
/* Last check */
int num_eyes = eyematches.size();
int num_cams = cammatches.size();
if(num_eyes == 0 || num_cams == 0)
return 1;
// Calculate the vector between two eyes and the center
vector<Point> vector_eyes; vector<Point> center_eyes;
vector<vector<int>>::iterator ite = eyematches.begin();
while(ite < eyematches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_eyes.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_eyes.push_back(point);
ite++;
}
// Calculate the vector between two cameras and the center
vector<Point> vector_cams; vector<Point> center_cams;
ite = cammatches.begin();
while(ite < cammatches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_cams.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_cams.push_back(point);
ite++;
}
// Match the eyes and cameras, by calculating the center distances and intersection angle
vector<vector<int>> matches_eye_cam;
vector<vector<double>> matches_parameters;
for(int i = 0; i < num_eyes; i++)
{
for(int j = 0; j < num_cams; j++)
{
vector<int> temp1;
vector<double> temp2;
// calculate the distances
double distance = sqrt( double( (center_eyes[i].x - center_cams[j].x)^2 + (center_eyes[i].y - center_cams[j].y)^2 ) );
// calculate the cosine intersection angle
double cosAngle = vector_eyes[i].x * vector_cams[j].x + vector_eyes[i].y * vector_cams[j].y;
// store everything
temp1.push_back(i); temp1.push_back(j);
temp2.push_back(distance); temp2.push_back(cosAngle);
matches_eye_cam.push_back(temp1);
matches_parameters.push_back(temp2);
}
}
// go over to find the minimum
int min_dis = 0; int min_angle = 0;
vector<vector<double>>::iterator ite_para = matches_parameters.begin();
/* If no contours are found, return S_OK */
if(matches_parameters.size() < 1)
return 1;
for(int i = 1; i < matches_parameters.size(); i++)
{
if( (*(ite_para+min_dis))[0] > (*(ite_para+i))[0] )
min_dis = i;
if( (*(ite_para+min_angle))[1] > (*(ite_para+i))[1] )
min_angle = i;
}
// get the best match of eyes and cameras 's index
int eyes_index, cams_index;
vector<vector<int>>::iterator ite_match_eye_cam = matches_eye_cam.begin();
if(min_dis == min_angle)
{
// perfect match
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
else
{
// tried to fuse them and find a better sulotion, but didnot work out, so
// go with the min_dis
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
vector<vector<int>>::iterator ite_eyes = eyematches.begin();
vector<vector<int>>::iterator ite_cams = cammatches.begin();
// draw the eyes
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[0]], Scalar(0, 255, 255), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[1]], Scalar(0, 255, 255), 2, 8);
// draw the camera
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[0]], Scalar(0, 255, 0), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[1]], Scalar(0, 255, 0), 2, 8);
imshow("show", *pImg);
// find the upper camera
int m1 = (*(ite_cams+cams_index))[0];
int m2 = (*(ite_cams+cams_index))[1];
int upper;
if(selected_ellipses[m1].center.y < selected_ellipses[m2].center.y)
upper = m1;
else
upper = m2;
*x = selected_ellipses[upper].center.x;
*y = selected_ellipses[upper].center.y;
return 1;
}
int main()
{
Mat imO = imread("Capture.PNG");
double x, y;
FindNao(&imO, &x, &y);
cout<<x<<" "<<y<<endl;
cvWaitKey(0);
}