I have one function dealing with image. In that function, i am trying to find several particular ellipses. The code is working if i call it individually in a separate project, but in the whole project, it crashed when it returns.
I used many vectors in the processing to store some information during the process.
The error information:
Windows has triggered a breakpoint in KinectBridgeWithOpenCVBasics-D2D.exe.
This may be due to a corruption of the heap, which indicates a bug in KinectBridgeWithOpenCVBasics-D2D.exe or any of the DLLs it has loaded.
This may also be due to the user pressing F12 while KinectBridgeWithOpenCVBasics-D2D.exe has focus.
The output window may have more diagnostic information.
could any one tell me where is wrong to cause this crash. More weird is it is working in the separate project.
The code is a little long, but it is really noting, just looking for several particular ellipses with some pattern.
Thank you.
int FindNao(Mat* pImg, double* x, double* y)
{
// Fail if pointer is invalid
if (!pImg)
{
return 2;
}
// Fail if Mat contains no data
if (pImg->empty())
{
return 3;
}
//*x = 0; *y = 0;
Mat localMat = *pImg; // save a local copy of the image
cvtColor(~localMat, localMat, CV_BGR2GRAY); // Convert to gray image
threshold(localMat, localMat, 165, 255, THRESH_BINARY); // Convert into black-white image
Mat elementOpen = getStructuringElement(MORPH_ELLIPSE, Size(5,5), Point(-1,-1));
morphologyEx(localMat, localMat, MORPH_OPEN, elementOpen, Point(-1,-1), 1);
// Find all the contours in the blak-white image
vector<vector<Point>> contours;
findContours(localMat.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
localMat.release();
// Calculate the area of each contour
vector<double> areas; int num = contours.size();
/* If no contours are found, return S_OK */
if(num < 1)
return 1;
for(int i = 0; i < num; i++)
{
areas.push_back(contourArea(contours[i]));
}
// First round of selection
// The area is small, and they are like a ellipse and around the middle in X direction and at the upper part of the image
vector<RotatedRect> selected_ellipses; // store the fitted ellipse fitted to the potential contour
vector<double> selected_areas; // store the contour area of the potential contour
int imgX = localMat.cols; int imgY = localMat.rows; // get the demension of the image
for(int i=0; i < num - 1; i++)
{
if(areas[i] < 350 && areas[i] > 10)
{
// fit an ellipse
RotatedRect ellipse1 = fitEllipse(Mat(contours[i]));
// it is a reasonable ellipse, and the area should be close to the
double length1 = ellipse1.size.height;
double length2 = ellipse1.size.width;
if( abs(1 - length1/length2) <= 0.8 &&
abs(1 - areas[i] / (CV_PI * length1 * length2 / 4) ) <= 0.2 )
{
selected_ellipses.push_back(ellipse1);
selected_areas.push_back(areas[i]);
}
}
}
/************ Second round of selection **************/
// Calculate each ellipse's dimension
vector<double> diff_dimension;
vector<double> ave_dimention;
/* If no contours are found, return S_OK */
if(selected_ellipses.size() < 1)
return 1;
for(int i = 0; i < selected_ellipses.size(); i++)
{
double difference = abs(1 - selected_ellipses[i].size.height / selected_ellipses[i].size.width);
diff_dimension.push_back(difference);
double average = (selected_ellipses[i].size.height + selected_ellipses[i].size.width) / 2;
ave_dimention.push_back(average);
}
vector<vector<int>> eyematches;
vector<vector<int>> cammatches;
// go over all the ellipses to find the matches with close area and dimension.
for(int i = 0; i < selected_ellipses.size() - 1; i++)
{
for(int j = i+1; j < selected_ellipses.size(); j++)
{
// looking for the eyes
if(diff_dimension[i] < 0.05 && diff_dimension[j] < 0.05)
{
double diff_area = abs( 1 - selected_areas[i] / selected_areas[j] );
if (diff_area < 0.05)
{
double diff_y = abs(selected_ellipses[i].center.y - selected_ellipses[j].center.y);
if(diff_y < 10)
{
vector<int> match1;
match1.push_back(i); match1.push_back(j);
eyematches.push_back(match1);
}
}
}
// looking for the cameras
double diff_x = abs(selected_ellipses[i].center.x - selected_ellipses[j].center.x);
if (diff_x < 10)
{
vector<int> match2;
match2.push_back(i); match2.push_back(j);
cammatches.push_back(match2);
}
}
}
/* Last check */
int num_eyes = eyematches.size();
int num_cams = cammatches.size();
if(num_eyes == 0 || num_cams == 0)
return 1;
// Calculate the vector between two eyes and the center
vector<Point> vector_eyes; vector<Point> center_eyes;
vector<vector<int>>::iterator ite = eyematches.begin();
while(ite < eyematches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_eyes.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_eyes.push_back(point);
ite++;
}
// Calculate the vector between two cameras and the center
vector<Point> vector_cams; vector<Point> center_cams;
ite = cammatches.begin();
while(ite < cammatches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_cams.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_cams.push_back(point);
ite++;
}
// Match the eyes and cameras, by calculating the center distances and intersection angle
vector<vector<int>> matches_eye_cam;
vector<vector<double>> matches_parameters;
for(int i = 0; i < num_eyes; i++)
{
for(int j = 0; j < num_cams; j++)
{
vector<int> temp1;
vector<double> temp2;
// calculate the distances
double distance = sqrt( double( (center_eyes[i].x - center_cams[j].x)^2 + (center_eyes[i].y - center_cams[j].y)^2 ) );
// calculate the cosine intersection angle
double cosAngle = vector_eyes[i].x * vector_cams[j].x + vector_eyes[i].y * vector_cams[j].y;
// store everything
temp1.push_back(i); temp1.push_back(j);
temp2.push_back(distance); temp2.push_back(cosAngle);
matches_eye_cam.push_back(temp1);
matches_parameters.push_back(temp2);
}
}
// go over to find the minimum
int min_dis = 0; int min_angle = 0;
vector<vector<double>>::iterator ite_para = matches_parameters.begin();
/* If no contours are found, return S_OK */
if(matches_parameters.size() < 1)
return 1;
for(int i = 1; i < matches_parameters.size(); i++)
{
if( (*(ite_para+min_dis))[0] > (*(ite_para+i))[0] )
min_dis = i;
if( (*(ite_para+min_angle))[1] > (*(ite_para+i))[1] )
min_angle = i;
}
// get the best match of eyes and cameras 's index
int eyes_index, cams_index;
vector<vector<int>>::iterator ite_match_eye_cam = matches_eye_cam.begin();
if(min_dis == min_angle)
{
// perfect match
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
else
{
// tried to fuse them and find a better sulotion, but didnot work out, so
// go with the min_dis
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
vector<vector<int>>::iterator ite_eyes = eyematches.begin();
vector<vector<int>>::iterator ite_cams = cammatches.begin();
// draw the eyes
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[0]], Scalar(0, 255, 255), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[1]], Scalar(0, 255, 255), 2, 8);
// draw the camera
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[0]], Scalar(0, 255, 0), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[1]], Scalar(0, 255, 0), 2, 8);
imshow("show", *pImg);
// find the upper camera
int m1 = (*(ite_cams+cams_index))[0];
int m2 = (*(ite_cams+cams_index))[1];
int upper;
if(selected_ellipses[m1].center.y < selected_ellipses[m2].center.y)
upper = m1;
else
upper = m2;
*x = selected_ellipses[upper].center.x;
*y = selected_ellipses[upper].center.y;
return 1;
}
int main()
{
Mat imO = imread("Capture.PNG");
double x, y;
FindNao(&imO, &x, &y);
cout<<x<<" "<<y<<endl;
cvWaitKey(0);
}
Related
I am wanting to move through an image and take a 5x5 grid centered around each pixel in the image. I then want to sum that grid and compare it to a threshold.
int main()
{
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
Mat im = imread("blob.png", IMREAD_GRAYSCALE);
bool fromCenter = false;
namedWindow("Crop frame", WINDOW_NORMAL);
Rect2d r = selectROI("Crop frame", im, fromCenter);
im = im(r);
erode(im, im, element);
Mat clone = im;
int sectionSize = 4;
int width = im.cols - sectionSize/2;
int height = im.rows - sectionSize/2;
int sum = 0;
int counter = 0;
for (int i = sectionSize/2; i < width; i++) {
for (int j = sectionSize/2; j < height; j++) {
Rect rect = Rect(i, j, sectionSize, sectionSize);
rect -= Point(rect.width / 2, rect.height / 2);
Mat temp = im(rect);
for (int x = 0; x < temp.cols; x++) {
for (int y = 0; y < temp.rows; y++) {
int pixelValue = (int)temp.at<uchar>(y, x);
sum += pixelValue;
}
}
cout << sum << endl;
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
namedWindow("erode", WINDOW_NORMAL);
imshow("erode", clone);
waitKey(1);
sum = 0;
}
}
}
I am getting fluctuations in the pixel sum based on where I select my ROI in the image even when both over white space Also, my pixel sum is changing when I change the value of the clone pixel in this section of the code which I do not understand at all:
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
I have a circular brush of with a diameter of 200px and hardness of 0 (the brush is a circular gradient). The spacing between each brush is 25% of the brush diameter. However, when I compare the stroke my program draws and the stroke Photoshop draws, where all settings are equal...
It is clear that photoshop's is much smoother! I can't reduce the spacing because that causes the edges to become harder
How can i make my stroke like photoshop's?
Here is the relevant code from my program...
//defining a circle
Mat alphaBrush(2*outerRadius,2*outerRadius,CV_32FC1);
float floatInnerRadius = outerRadius * hardness;
for(int i = 0; i < alphaBrush.rows; i++ ){
for(int j=0; j<alphaBrush.cols; j++ ){
int x = outerRadius - i;
int y = outerRadius - j;
float radius=hypot((float) x, (float) y );
auto& pixel = alphaBrush.at<float>(i,j);
if(radius>outerRadius){ pixel=0.0; continue;} // transparent
if(radius<floatInnerRadius){ pixel=1.0; continue;} // solid
pixel=1-((radius-floatInnerRadius)/(outerRadius-floatInnerRadius)); // partial
}
}
/*
(...irrelevant stuff)
*/
//drawing the brush onto the canvas
for (int j = 0; j < inMatROI.rows; j++) {
Vec3b *thisBgRow = inMatROI.ptr<Vec3b>(j);
float *thisAlphaRow = brushROI.ptr<float>(j);
for (int i = 0; i < inMatROI.cols; i++) {
for (int c = 0; c < 3; c++) {
thisBgRow[i][c] = saturate_cast<uchar>((brightness * thisAlphaRow[i]) + ((1.0 - thisAlphaRow[i]) * thisBgRow[i][c]));
}
}
}
I have also tried resultValue = max(backgroundValue, brushValue), but the intersection between the two circles is pretty obvious.
this is the approach, drawing a solid thin line and afterwards computing the distance of each pixel to that line.
As you can see there are some artifacts, probably mostly because of only approximated distance values from cv::distanceTransform. If you compute the distances precisely (and maybe in double precision) you should get very smooth results.
int main()
{
cv::Mat canvas = cv::Mat(768, 768, CV_8UC3, cv::Scalar::all(255));
cv::Mat canvasMask = cv::Mat::zeros(canvas.size(), CV_8UC1);
// make sure the stroke has always a size of >= 2, otherwise will be cv::line way not work...
std::vector<cv::Point> strokeSampling;
strokeSampling.push_back(cv::Point(250, 100));
strokeSampling.push_back(cv::Point(250, 200));
strokeSampling.push_back(cv::Point(600, 300));
strokeSampling.push_back(cv::Point(600, 400));
strokeSampling.push_back(cv::Point(250, 500));
strokeSampling.push_back(cv::Point(250, 650));
for (int i = 0; i < strokeSampling.size() - 1; ++i)
cv::line(canvasMask, strokeSampling[i], strokeSampling[i + 1], cv::Scalar::all(255));
// computing a distance map:
cv::Mat tmp1 = 255 - canvasMask;
cv::Mat distMap;
cv::distanceTransform(tmp1, distMap, CV_DIST_L2, CV_DIST_MASK_PRECISE);
float outerRadius = 50;
float innerRadius = 10;
cv::Scalar strokeColor = cv::Scalar::all(0);
for (int y = 0; y < distMap.rows; ++y)
for (int x = 0; x < distMap.cols; ++x)
{
float percentage = 0.0f;
float radius = distMap.at<float>(y, x);
if (radius>outerRadius){ percentage = 0.0; } // transparent
else
if (radius<innerRadius){ percentage = 1.0; } // solid
else
{
percentage = 1 - ((radius - innerRadius) / (outerRadius - innerRadius)); // partial
}
if (percentage > 0)
{
// here you could use the canvasMask if you like to, instead of directly drawing on the canvas
cv::Vec3b canvasColor = canvas.at<cv::Vec3b>(y, x);
cv::Vec3b cColor = cv::Vec3b(strokeColor[0], strokeColor[1], strokeColor[2]);
canvas.at<cv::Vec3b>(y, x) = percentage*cColor + (1 - percentage) * canvasColor;
}
}
cv::imshow("out", canvas);
cv::imwrite("C:/StackOverflow/Output/stroke.png", canvas);
cv::waitKey(0);
}
how can I calculate percentage of white pixels inside of cv::RotatedRect? I mean, how to access single pixel inside of my cv::RotatedRect. If i'd reach that, i'd know what to do later. Thanks
I've tried solution from this thread, but I've had exceptions. https://stackoverflow.com/a/28780359
std::vector<cv::RotatedRect> minRect(count.size());
for (int i = 0; i < count.size(); i++)
{
minRect[i] = cv::minAreaRect(cv::Mat(count[i]));
}
for (size_t i = 0; i < count.size(); i++){
if (cv::contourArea(count[i]) > 200) {
cv::Point2f rect_points[4];
minRect[i].points(rect_points);
// Now I'd like to calculate percentage of white pixels inside of RotatedRect, and if value returned by func would be smaller than 30%,continue;
for (int j = 0; j < 4; j++) {
cv::line(mask, rect_points[j], rect_points[(j + 1) % 4], (0, 255, 0), 1, 8);
}
}
}
You can:
Work on the sub-image defined by cv::boundingRect
create the mask where all points inside the rotated rect are white with cv::fillConvexPoly
logical AND with the original image
count the number of white pixels with cv::countNonZero
The method proposed by John Henkel works, but in my (very quick) tests it something between 10 and 40 times slower.
Below the code with both methods. You'll find small differences in the result, because the white pixels on the border of the rotated rect are handled differently.
#include <opencv2\opencv.hpp>
#include <chrono>
int main()
{
// Create binary image with random pixels b/W
cv::Mat1b img(5000, 5000);
cv::randu(img, cv::Scalar(0), cv::Scalar(256));
img = img > 127;
// Define a rotated rect
cv::Point2f center(2000, 2000);
cv::Size2f sz(1000, 500);
float angle = 30.f;
cv::RotatedRect rr(center, sz, angle);
// Get points
std::vector<cv::Point2f> points(4);
rr.points(points.data());
// Work on ROI
cv::Rect roi = rr.boundingRect();
// Area
float area = rr.size.width * rr.size.height;
//// DEBUG, Show rect
//cv::Mat3b out;
//cv::cvtColor(img, out, cv::COLOR_GRAY2BGR);
//for (int i = 0; i < 4; ++i) {
// cv::line(out, points[i], points[(i + 1) % 4], cv::Scalar(0, 0, 255));
//}
{
// --------------------
// Method #Miki
// --------------------
auto tic = std::chrono::high_resolution_clock::now();
cv::Mat1b sub_img = img(roi);
// Create rotated rect mask
cv::Mat1b mask(roi.size(), uchar(0));
std::vector<cv::Point> points_in_sub_image(4);
for (int i = 0; i < 4; ++i) {
points_in_sub_image[i] = cv::Point(points[i]) - roi.tl();
}
cv::fillConvexPoly(mask, points_in_sub_image, cv::Scalar(255));
// AND sub image with mask
cv::Mat1b inside_roi = sub_img & mask;
//// DEBUG, Draw green points
//for (int r = 0; r < sub_img.rows; ++r) {
// for (int c = 0; c < sub_img.cols; ++c) {
// if (inside_roi(r, c) > 0)
// {
// out(r + roi.y, c + roi.x) = cv::Vec3b(0, 255, 0);
// }
// }
//}
// Get actual count
int cnz = cv::countNonZero(inside_roi);
auto toc = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(toc - tic);
float percent_white_pixels = cnz / area;
std::cout << "percent_white_pixels: " << percent_white_pixels << " in " << elapsed.count() << " us" << std::endl;
}
{
// --------------------
// Method #John Henkel
// --------------------
auto tic = std::chrono::high_resolution_clock::now();
int cnz = 0;
for (int y = roi.y; y < roi.y + roi.height; ++y) {
for (int x = roi.x; x < roi.x + roi.width; ++x) {
if (
(img(y, x) > 0) &&
(cv::pointPolygonTest(points, cv::Point2f(x, y), false) >= 0.0)
)
{
// DEBUG, Draw blue points
//out(y, x) = cv::Vec3b(255, 0, 0);
++cnz;
}
}
}
auto toc = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(toc - tic);
float percent_white_pixels = cnz / area;
std::cout << "percent_white_pixels: " << percent_white_pixels << " in " << elapsed.count() << " us" << std::endl;
}
getchar();
return 0;
}
The best way I can think of to get the individual pixels would be to first obtain the bounding box of your rotated rectangle and then iterate through each of the pixels inside the box to see if they are in the rotated rectangle with pointPolygonTest. I'm not sure if there's a more efficient way to do it, but this should give you the results you're looking for.
I'm trying to Shear an image along the X-axis using OpenCV to load the image, and the following algorithm to shear the image: x′=x+y·Bx, but for some reason, I end up with the following shear:
My source code looks like this:
#include "stdafx.h"
#include "opencv2\opencv.hpp"
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("B2DBy.jpg", 1);
if (src.empty())
cout << "Error: Loading image" << endl;
int r1, c1; // tranformed point
int rows, cols; // original image rows and columns
rows = src.rows;
cols = src.cols;
float Bx = 2; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(cols * Bx);
int maxYOffset = abs(rows * By);
Mat out = Mat::ones(src.rows + maxYOffset, src.cols + maxXOffset, src.type()); // create output image to be the same as the source
for (int r = 0; r < out.rows; r++) // loop through the image
{
for (int c = 0; c < out.cols; c++)
{
r1 = r + c * By - maxYOffset; // map old point to new
c1 = r * Bx + c - maxXOffset;
if (r1 >= 0 && r1 <= out.rows && c1 >= 0 && c1 <= out.cols) // check if the point is within the boundaries
{
out.at<uchar>(r, c) = src.at<uchar>(r1, c1); // set value
}
}
}
namedWindow("Source image", CV_WINDOW_AUTOSIZE);
namedWindow("Rotated image", CV_WINDOW_AUTOSIZE);
imshow("Source image", src);
imshow("Rotated image", out);
waitKey(0);
return 0;
}
EDIT
Fixed it myself.
Didn't need to substract the offset. Heres the updated source code:
Mat forward(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
y1 = y + x * By; // map old point to new
x1 = y * Bx + x;
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
Mat backwards(Mat img) {
Mat umg = img;
int y1, x1; // tranformed point
int rows, cols; // original image rows and columns
rows = umg.rows;
cols = umg.cols;
float Bx = 0.7; // amount of shearing in x-axis
float By = 0; // amount of shearing in y-axis
int maxXOffset = abs(rows * Bx);
int maxYOffset = abs(cols * By);
Mat out = Mat::ones(rows + maxYOffset, cols + maxXOffset, umg.type()); // create output image to be the same as the source
for (int y = 0; y < rows; y++) // loop through the image
{
for (int x = 0; x < cols; x++)
{
//y1 = y + x * By; // map old point to new
//x1 = y * Bx + x;
y1 = (1 / (1 - Bx*By)) * (y + x * By);
x1 = (1 / (1 - Bx*By)) * (y * Bx + x);
out.at<uchar>(y1, x1) = umg.at<uchar>(y, x); // set value
}
}
return out;
}
int main()
{
Mat src = imread("B2DBy.jpg", 0);
if (src.empty())
cout << "Error: Loading image" << endl;
Mat forwards = forward(src);
Mat back = backwards(src);
namedWindow("Source image", CV_WINDOW_NORMAL);
imshow("Source image", src);
imshow("back", back);
imshow("forward image", forwards);
waitKey(0);
return 0;
}
I found some time to work on this.
Now I understand what you tried to achieve with the offset computation, but I'm not sure whether yours is correct.
Just change all the cv::Vec3b to unsigned char or uchar and load as grayscale, if wanted.
Please try this code and maybe you'll find your error:
// no interpolation yet
// cv::Vec3b only
cv::Mat shear(const cv::Mat & input, float Bx, float By)
{
if (Bx*By == 1)
{
throw("Shearing: Bx*By==1 is forbidden");
}
if (input.type() != CV_8UC3) return cv::Mat();
// shearing:
// x'=x+y·Bx
// y'=y+x*By
// shear the extreme positions to find out new image size:
std::vector<cv::Point2f> extremePoints;
extremePoints.push_back(cv::Point2f(0, 0));
extremePoints.push_back(cv::Point2f(input.cols, 0));
extremePoints.push_back(cv::Point2f(input.cols, input.rows));
extremePoints.push_back(cv::Point2f(0, input.rows));
for (unsigned int i = 0; i < extremePoints.size(); ++i)
{
cv::Point2f & pt = extremePoints[i];
pt = cv::Point2f(pt.x + pt.y*Bx, pt.y + pt.x*By);
}
cv::Rect offsets = cv::boundingRect(extremePoints);
cv::Point2f offset = -offsets.tl();
cv::Size resultSize = offsets.size();
cv::Mat shearedImage = cv::Mat::zeros(resultSize, input.type()); // every pixel here is implicitely shifted by "offset"
// perform the shearing by back-transformation
for (int j = 0; j < shearedImage.rows; ++j)
{
for (int i = 0; i < shearedImage.cols; ++i)
{
cv::Point2f pp(i, j);
pp = pp - offset; // go back to original coordinate system
// go back to original pixel:
// x'=x+y·Bx
// y'=y+x*By
// y = y'-x*By
// x = x' -(y'-x*By)*Bx
// x = +x*By*Bx - y'*Bx +x'
// x*(1-By*Bx) = -y'*Bx +x'
// x = (-y'*Bx +x')/(1-By*Bx)
cv::Point2f p;
p.x = (-pp.y*Bx + pp.x) / (1 - By*Bx);
p.y = pp.y - p.x*By;
if ((p.x >= 0 && p.x < input.cols) && (p.y >= 0 && p.y < input.rows))
{
// TODO: interpolate, if wanted (p is floating point precision and can be placed between two pixels)!
shearedImage.at<cv::Vec3b>(j, i) = input.at<cv::Vec3b>(p);
}
}
}
return shearedImage;
}
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat output = shear(input, 0.7, 0);
//cv::Mat output = shear(input, -0.7, 0);
//cv::Mat output = shear(input, 0, 0.7);
cv::imshow("input", input);
cv::imshow("output", output);
cv::waitKey(0);
return 0;
}
Giving me these outputs for the 3 sample lines:
I am using this code from the project FingertipTuio3D
std::vector<cv::Point2i> detectFingertips(cv::Mat1f z, float zMin = 0.0f, float zMax = 0.75f, cv::Mat1f& debugFrame = cv::Mat1f()) {
using namespace cv;
using namespace std;
bool debug = !debugFrame.empty();
vector<Point2i> fingerTips;
Mat handMask = z < zMax & z > zMin;
std::vector<std::vector<cv::Point>> contours;
findContours(handMask.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); // we are cloning here since method will destruct the image
if (contours.size()) {
for (int i=0; i<contours.size(); i++) {
vector<Point> contour = contours[i];
Mat contourMat = Mat(contour);
double area = cv::contourArea(contourMat);
if (area > 3000) { // possible hand
Scalar center = mean(contourMat);
Point centerPoint = Point(center.val[0], center.val[1]);
vector<Point> approxCurve;
cv::approxPolyDP(contourMat, approxCurve, 20, true);
vector<int> hull;
cv::convexHull(Mat(approxCurve), hull);
// find upper and lower bounds of the hand and define cutoff threshold (don't consider lower vertices as fingers)
int upper = 640, lower = 0;
for (int j=0; j<hull.size(); j++) {
int idx = hull[j]; // corner index
if (approxCurve[idx].y < upper) upper = approxCurve[idx].y;
if (approxCurve[idx].y > lower) lower = approxCurve[idx].y;
}
float cutoff = lower - (lower - upper) * 0.1f;
// find interior angles of hull corners
for (int j=0; j<hull.size(); j++) {
int idx = hull[j]; // corner index
int pdx = idx == 0 ? approxCurve.size() - 1 : idx - 1; // predecessor of idx
int sdx = idx == approxCurve.size() - 1 ? 0 : idx + 1; // successor of idx
Point v1 = approxCurve[sdx] - approxCurve[idx];
Point v2 = approxCurve[pdx] - approxCurve[idx];
float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) );
// low interior angle + within upper 90% of region -> we got a finger
if (angle < 1 && approxCurve[idx].y < cutoff) {
int u = approxCurve[idx].x;
int v = approxCurve[idx].y;
fingerTips.push_back(Point2i(u,v));
if (debug) {
cv::circle(debugFrame, approxCurve[idx], 10, Scalar(1), -1);
}
}
}
if (debug) {
// draw cutoff threshold
cv::line(debugFrame, Point(center.val[0]-100, cutoff), Point(center.val[0]+100, cutoff), Scalar(1.0f));
// draw approxCurve
for (int j=0; j<approxCurve.size(); j++) {
cv::circle(debugFrame, approxCurve[j], 10, Scalar(1.0f));
if (j != 0) {
cv::line(debugFrame, approxCurve[j], approxCurve[j-1], Scalar(1.0f));
} else {
cv::line(debugFrame, approxCurve[0], approxCurve[approxCurve.size()-1], Scalar(1.0f));
}
}
// draw approxCurve hull
for (int j=0; j<hull.size(); j++) {
cv::circle(debugFrame, approxCurve[hull[j]], 10, Scalar(1.0f), 3);
if(j == 0) {
cv::line(debugFrame, approxCurve[hull[j]], approxCurve[hull[hull.size()-1]], Scalar(1.0f));
} else {
cv::line(debugFrame, approxCurve[hull[j]], approxCurve[hull[j-1]], Scalar(1.0f));
}
}
}
}
}
}
return fingerTips;
}
When the code reaches this point vector<Point> contour = contours[i]; it fails with an AccessViolation:
Unhandled exception at 0x00b85039 in
FingertipTuio3d.exe: 0xC0000005:
Access violation reading location
0x00000008.
That location is in the size_type size() const function of std::vector.
Any idea what is causing the problem, and how it might be fixed?
Does your findContours actually put anything into each contour?
You are checking that the array exists but not that the individual elements are valid
Can you access the elements via an iterator? IOW, if you add the following code immediately after the call to loadContours, what output do you get?
int i = 0;
for (std::vector<std::vector<cv::Point> >::iterator it = contours.begin();
it != contours.end();
++it)
{
std::cout << "contours[" << i << "].size() == " << it->size() << std::endl;
}
because contours is a vector of vector (std::vector> contours;)
and contour is a vector of points(vector contour)
so it cannot do vector contour = contours[i];
if you want to copy contours[i] you need to take another vector of vector.
This is build linked error. look at this:
--#ifdef _DEBUG
--#pragma comment(lib,"opencv_core249d.lib")
--#else
--#pragma comment(lib,"opencv_core249.lib") <<---- your checked!!
--#endif
:-)