how to draw line in opencv - c++

hello every ace i have some problem
Here is my code who can teach me how to draw a straight line in the video at center?
And how to reduce my green line in the video ,because i want to find the Vanishing Point.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>e
using namespace cv;
using namespace std;
Point2f center(0,0);
Point2f computeIntersect(Vec4i a, Vec4i b)
{
int x1 = a[0], y1 = a[1], x2 = a[2], y2 = a[3], x3 = b[0], y3 = b[1], x4 = b[2], y4 = b[3];
float denom;
if (float d = ((float)(x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4)))
{
Point2f pt;
pt.x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / d;
pt.y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / d;
return pt;
}
else
return Point2f(-1, -1);
}
void sortCorners(std::vector<Point2f>& corners, Point2f center)
{
std::vector<Point2f> top, bot;
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
corners.clear();
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
Mat src;
int main()
{
VideoCapture cap("C://pic/way.mp4");
while ( cap.isOpened() )
{
cap >> src;
//Mat src = imread("D:\\11.jpg");
if (src.empty())
return -1;
Mat bw;
cvtColor(src, bw, CV_BGR2GRAY);
blur(bw, bw, Size(3, 3));
Canny(bw, bw, 150, 100, 3);
std::vector<Vec4i> lines;
HoughLinesP(bw, lines, 1, CV_PI/180, 70, 30, 10);
// Expand the lines
for (int i = 0; i < lines.size(); i++)
{
Vec4i v = lines[i];
lines[i][0] = 0;
lines[i][1] = ((float)v[1] - v[3]) / (v[0] - v[2]) * -v[0] + v[1];
lines[i][2] = src.cols;
lines[i][3] = ((float)v[1] - v[3]) / (v[0] - v[2]) * (src.cols - v[2]) + v[3];
}
std::vector<Point2f> corners;
for (int i = 0; i < lines.size(); i++)
{
for (int j = i+1; j < lines.size(); j++)
{
Point2f pt = computeIntersect(lines[i], lines[j]);
if (pt.x >= 0 && pt.y >= 0)
corners.push_back(pt);
}
}
std::vector<Point2f> approx;
approxPolyDP(Mat(corners), approx, arcLength(Mat(corners), true) * 0.02, true);
// Get mass center
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
sortCorners(corners, center);
Mat dst = src.clone();
// Draw lines
for (int i = 0; i < lines.size(); i++)
{
Vec4i v = lines[i];
line(dst, Point(v[0], v[1]), Point(v[2], v[3]), CV_RGB(0,255,0));
}
// Draw corner points
circle(dst, corners[3], 3, CV_RGB(255,255,255), 2);
cout << "Detected " << corners[3] << "\n";
// Draw mass center
circle(dst, center, 3, CV_RGB(255,255,0), 2);
Mat quad = Mat::zeros(300, 220, CV_8UC3);
std::vector<Point2f> quad_pts;
quad_pts.push_back(Point2f(0, 0));
quad_pts.push_back(Point2f(quad.cols, 0));
quad_pts.push_back(Point2f(quad.cols, quad.rows));
quad_pts.push_back(Point2f(0, quad.rows));
Mat transmtx = getPerspectiveTransform(corners, quad_pts);
warpPerspective(src, quad, transmtx, quad.size());
imshow("image", bw);
imshow("images", dst);
if(waitKey(27) >= 0) break; }
return 0;}

Check this out. Hope it will help, Let me know.

Related

Extracting the information of angle and axis

Here I got the code of extracting the orientation of the object in the image. I am new to OpenCV and C++. But I need to get this work done.
My question is, how to extract, write out the information of the angle and axis in this code?
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | joint2.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 200, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
imshow("output", src);
waitKey();
return 0;
}
Here is the image of the object:
And here is the result:
As you see, it finds orientation correctly, but I need the information about the angle and which is which axis to be written.
Will be very grateful if someone knows how to do it!
EDIT: I have figured out how to find the information about the center, area, and the angle.
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
But still don't know how to denote which arrow is which axis in the image.
So I have figured out everything I need (almost).
Here is the final code:
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
string s = "";
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.01 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.005 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
putText(img, s = "Y-axis", p1, cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 100));
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
putText(img, s = "X-axis", p2/1.1 , cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | circle3.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 70, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
for (int i = 0; i < contours.size(); i++) {
std::stringstream ss; ss << i;
putText(src, ss.str(), mc[i] + Point2f(10,-10), cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
}
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle X: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
waitKey();
return 0;
}
The only thing I want to know how to draw the coordinate system in the corner of the image. Because I don't understand the angle results.
The results of the final code: https://imgur.com/l7t9bns
And relevant information: https://imgur.com/OuE79rR

Painterly Rendering, Clipping line, I'm have an error

I'm making a painterly rendering.
And now I'm doing that clipping line things.
But I got this error:
<<unsigned><pt.x*DataType<_Tp>::channels> <<unsigned<size.p[1]*channels<>>>
And
template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1) const
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((const _Tp*)(data + step.p[0]*i0))[i1];
}
Maybe this is the error that on 'Lineclipping()'
Please, tell me another good idea that clipped line.
this is my code. And I'm just a student so my codding skill is very beginner.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <sstream>
#include <cmath>
#include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <random>
using namespace cv;
using namespace std;
random_device rd;
mt19937_64 rng(rd());
double PI = 3.141592;
int perturbLength = (rand() % 6) + 1;
int perturbRadius = ((rand() % 5) + 0) / 10;
int perturbAngle = (rand() % 15) + (-15);
int Maxlength = 10 - perturbLength;
int radius = 2 - perturbRadius;
int angle = 45 - perturbAngle;
double theta = angle*(PI / 180);
void Lineclipping(int x, int y, double theta, int len, Point2d& pt1, Point2d& pt2, Mat& EdgeMap)
{
double length = ceil(len);
enter code here
float detectPT = len / length;
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
}
Mat EdgeDetect(Mat& referenceimg, Mat& Edge)
{
Mat image = referenceimg.clone();
//Make Edge Map
Mat IntensityImg(image.size(), CV_8U, 255);
Mat sobelx, sobely;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Vec3b intensity = image.at<Vec3b>(j, i);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
IntensityImg.at<uchar>(j, i) = (30 * red + 59 * green + 11 * blue) / 100;
}
}
GaussianBlur(IntensityImg, IntensityImg, Size(5, 5), 0.1, 0.1);
Sobel(IntensityImg, sobelx, CV_32F, 1, 0);
Sobel(IntensityImg, sobely, CV_32F, 0, 1);
Mat magnitudeXY = abs(sobelx), abs(sobely);
magnitudeXY.convertTo(Edge, CV_8U);
Mat mask(3, 3, CV_8UC1, 1);
morphologyEx(Edge, Edge, MORPH_ERODE, mask);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
Edge.at<uchar>(j, i) = (Edge.at<uchar>(j, i) > 20 ? 255 : 0);
}
}
imshow("intensity", Edge);
return Edge;
}
void paint(Mat &image, int snum)
{
Mat Edge;
EdgeDetect(image, Edge);
for (int n = 0; n < snum; n++)
{
int x = rand() % image.cols;
int y = rand() % image.rows;
if (image.channels() == 1)
{
image.at<uchar>(x, y) = 255;
}
else if (image.channels() == 3)
{
int length = Maxlength / 2;
Point2d pt1(x + length*cos(theta), y + length*sin(theta));
Point2d pt2(x - length*cos(theta), y - length*sin(theta));
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
//draw line
Scalar color(image.at<Vec3b>(y, x)[0], image.at<Vec3b>(y, x)[1], image.at<Vec3b>(y, x)[2]);
line(image, pt1, pt2, color, radius);
}
}
}
int main()
{
Mat Img = imread("fruit.jpg", IMREAD_COLOR);
CV_Assert(Img.data);
Mat resultImage = Img.clone();
Mat sobel = Img.clone();
int num = Img.rows*Img.cols;
paint(resultImage, num);
imshow("result", resultImage);
waitKey();
return 0;
}
And This is the error parts.
for (int i = detectPT; i <= len;)
{
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
else if (i == length)
{
pt1.x = Mpt1.x;
pt1.y = Mpt1.y;
}
i = i + detectPT;
}
for (int i = detectPT; i <= len;)
{
Point2d Mpt2(x - length*cos(theta), y - length*sin(theta));
if (EdgeMap.at<uchar>(Mpt2.y, Mpt2.x) > 0)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
else if (i == length)
{
pt2.x = Mpt2.x;
pt2.y = Mpt2.y;
}
i = i + detectPT;
}
Thank you!
Since I can't compile this and run it, I am going to run through a possible execution and show you where you can hit this out of range error.
int perturbLength = (rand() % 6) + 1; // Range is 1 to 6, let's assume 4
int perturbAngle = (rand() % 15) + (-15); // Range is -15 to -1 let's assume -14
int Maxlength = 10 - perturbLength; // 6
int angle = 45 - perturbAngle; // 44
double theta = angle*(PI / 180); // .7679
Now we get into this code inside the paint method:
int x = rand() % image.cols; // Let's assume image.cols - 2
int y = rand() % image.rows; // Let's assume image.rows - 1
Inside of paint we will reach this code:
int length = Maxlength / 2; // Maxlength is 6 so this is 3
Lineclipping(x, y, theta, length, fpt1, fpt2, Edge);
Which leads to the Lineclipping method and here we get a problem:
Point2d Mpt1(x + length*cos(theta), y + length*sin(theta));
if (EdgeMap.at<uchar>(Mpt1.y, Mpt1.x) > 0)
This is the problem. Remember, x is image.cols - 2. Now we perform the operations x + length * cos(theta), which is (image.cols-2) + 3 * cos(.7679). 3 * cos(.7679) is 2.999 which whether you floor it or round it is going to cause a problem when you add it to image.cols - 2. If it is floored and you get 2 we have image.cols which causes out of range, if it is rounded then we have image.cols + 1, so in either case we go beyond the bounds of the array.

OpenCV.3.0 error : 0xC0000005: Access violation reading location 0x00000000

My code is working just fine in debug mode and here is the Output. Once I tried it in release mode I got this error :
Unhandled exception at 0x5E3ADF2C (msvcp120d.dll) in Project4.exe: 0xC0000005: Access violation reading location 0x00000000.
From what I read on the internet, I think it is something that has to do with uninitialized Pointer or some other variable. my bet is on : vector< vector< Point > > contours;in the findSquares function. I tried to intitialize it with everything I could think of but no luck so far.
I am using Visual Studio 2013 with OpenCV.3.0.0 the X86 version.Here is the complet code :
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <sstream>
#include "opencv2/imgproc/imgproc.hpp"
#include <math.h>
#include <string.h>
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <algorithm>
using namespace cv;
using namespace std;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2) / sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) +
1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares(const Mat& image, vector<vector<Point> >& squares)
{
squares.clear();
vector<vector<Point>> contours;
// find white and yellow patch
Mat grayscal, grayscal1;
cvtColor(image, grayscal, CV_BGR2GRAY);
// try several threshold levels
for (int l = 0; l < 1; l++)
{
Mat imgThresholded, imgThresholded1, imgThresholded2;
cv::adaptiveThreshold(grayscal, grayscal1, 255,
cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 0);
inRange(grayscal, Scalar(100, 100, 100), Scalar(255, 255, 255),
imgThresholded1);
//morphological closing (fill small holes in the foreground)
//dilate(imgThresholded1, imgThresholded1,
getStructuringElement(MORPH_RECT, Size(7, 7)));
erode(imgThresholded1, imgThresholded1,
getStructuringElement(MORPH_RECT, Size(7, 7)));
// find contours and store them all as a list
findContours(imgThresholded1, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]),
true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 4000 &&
fabs(contourArea(Mat(approx))) < 400000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j % 4], approx[j - 2],
approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.07)
squares.push_back(approx);
}
}
cout << "size of squares:" << squares.size() << endl;
}
}
// the function draws all the squares in the image
cv::Mat drawSquares(Mat& image, const vector<vector<Point> >& squares)
{
std::vector<cv::Mat> listOfMatrices, listOfMatrices2;
vector<Point> centers;
int m = listOfMatrices.size();
int n = listOfMatrices2.size();
int q = centers.size();
for (size_t i = 0; i < squares.size(); i++)
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
Rect r = boundingRect(Mat(squares[i]));
cv::Size inflationSize(2, 2);
r -= inflationSize;
r.x = r.x + r.width / 4;
r.y = r.y + r.height / 4;
r.width = r.width / 2;
r.height = r.height / 2;
//dont detect the border
//Mat roi = image(r);
cv::Mat Image(image);
cv::Mat croppedImage = Image(Rect(r.x, r.y, r.width - 4, r.height - 4));
Point center(r.x + r.width / 2, r.y + r.height / 2);
centers.push_back(center);
q++;
listOfMatrices.push_back(croppedImage);
m++;
}
int maxbleu = 0;
Scalar tempVal0 = mean(listOfMatrices[0]);
double myMAtMeanB0 = tempVal0.val[0];
for (int j = 1; j < q; j++)
{
Scalar tempVal = mean(listOfMatrices[j]);
double myMAtMeanB = tempVal.val[0];
if (myMAtMeanB > myMAtMeanB0)
{
myMAtMeanB0 = myMAtMeanB;
maxbleu = j;
}
}
int maxdistance = 0, indicemax = 0, resmax = 0;
for (int i = 0; i < q; i++)
{
//listOfMatrices[i].release();
double xDiff = abs(centers[maxbleu].x - centers[i].x);
double yDiff = abs(centers[maxbleu].y - centers[i].y);
resmax = sqrt((xDiff * xDiff) + (yDiff * yDiff));
if (i == maxbleu)
{
continue;
}
else if (resmax>maxdistance)
{
maxdistance = resmax;
indicemax = i;
}
}
int mindistance = 1000, indicemin = 0, resmin = 0;
for (int i = 0; i < q; i++)
{
//listOfMatrices[i].release();
double xDiff = abs(centers[maxbleu].x - centers[i].x);
double yDiff = abs(centers[maxbleu].y - centers[i].y);
resmin = sqrt((xDiff * xDiff) + (yDiff * yDiff));
if (i == maxbleu)
{
continue;
}
else if (resmin<mindistance)
{
mindistance = resmin;
indicemin = i;
}
}
cout << "cyan" << centers[indicemax] << endl;
cout << "white" << centers[maxbleu] << endl;
cout << "gray" << centers[indicemin] << endl;
vector<Point> centersV2;
for (int j = 0; j < 4; j++)
{
for (int i = 0; i < 6; i++)
{
if (abs(centers[maxbleu].x - centers[indicemax].x) <
abs(centers[maxbleu].y - centers[indicemax].y))
{
if (centers[maxbleu].y - centers[indicemax].y > 0)
{
if (5 * abs(centers[maxbleu].x - centers[indicemin].x) > 30)
{
Point tmpV2(centers[maxbleu].x - i*(centers[maxbleu].x -
centers[indicemin].x) - j*(centers[maxbleu].x - centers[indicemax].x) / 3.3,
centers[maxbleu].y - i*(abs(centers[maxbleu].y - centers[indicemax].y)) /
5);
centersV2.push_back(tmpV2);
}
else {
Point tmpV2(centers[maxbleu].x - i*(centers[maxbleu].x -
centers[indicemin].x) - j*(centers[maxbleu].x - centers[indicemax].x) / 3,
centers[maxbleu].y - i*(abs(centers[maxbleu].y - centers[indicemax].y)) /
5);
centersV2.push_back(tmpV2);
}
}
else {
if (5 * abs(centers[maxbleu].x - centers[indicemin].x) > 30)
{
Point tmpV2(centers[maxbleu].x - i*
(abs(centers[maxbleu].x - centers[indicemin].x)) - j*(abs(centers[maxbleu].x
- centers[indicemax].x)) / 3.3, centers[maxbleu].y + i*
(abs(centers[maxbleu].y - centers[indicemax].y) / 5));
centersV2.push_back(tmpV2);
}
else {
Point tmpV2(centers[maxbleu].x - i*
(abs(centers[maxbleu].x - centers[indicemin].x)) - j*
(abs(centers[maxbleu].x - centers[indicemax].x)) / 3, centers[maxbleu].y
+ i*(abs(centers[maxbleu].y - centers[indicemax].y) / 5));
centersV2.push_back(tmpV2);
}
}
}
else {
if (centers[maxbleu].x - centers[indicemin].x > 0)
{
if (5 * abs(centers[maxbleu].y - centers[indicemin].y) > 30)
{
Point tmpV2(centers[maxbleu].x - i*
(abs(centers[maxbleu].x - centers[indicemax].x) / 5) + i,
centers[indicemin].y - i*(centers[maxbleu].y - centers[indicemin].y) - j*
(centers[maxbleu].y - centers[indicemax].y) / 3.3);
centersV2.push_back(tmpV2);
}
else {
Point tmpV2(centers[maxbleu].x - i*
(abs(centers[maxbleu].x - centers[indicemax].x) / 5) + i,
centers[indicemin].y - i*(centers[maxbleu].y - centers[indicemin].y) - j*
(centers[maxbleu].y - centers[indicemax].y) / 3);
centersV2.push_back(tmpV2);
}
}
else {
if (5 * abs(centers[maxbleu].y - centers[indicemin].y) > 30)
{
Point tmpV2(centers[maxbleu].x + i*
(abs(centers[maxbleu].x - centers[indicemax].x) / 5) + i,
centers[maxbleu].y - i*((centers[maxbleu].y - centers[indicemin].y)) - j*
(centers[maxbleu].y - centers[indicemax].y) / 3.3);
centersV2.push_back(tmpV2);
}
else
{
Point tmpV2(centers[maxbleu].x + i*
(abs(centers[maxbleu].x - centers[indicemax].x) / 5) + i,
centers[maxbleu].y - i*((centers[maxbleu].y - centers[indicemin].y)) - j*
(centers[maxbleu].y - centers[indicemax].y) / 3);
centersV2.push_back(tmpV2);
}
}
}
}
}
for (int i = 0; i < centersV2.size(); i++)
{
cv::Mat IImage;
image.copyTo(IImage);
cv::Mat roi = IImage(Rect(centersV2[i].x -
0.66*listOfMatrices[maxbleu].size().width / 2, centersV2[i].y -
0.66*listOfMatrices[maxbleu].size().height / 2,
0.66*listOfMatrices[maxbleu].size().width,
0.66*listOfMatrices[maxbleu].size().height));
listOfMatrices2.push_back(roi);
n++;
cout << "centre de patchs :" << i + 1 << " :est:" << centersV2[i] << "
colour :" << mean(listOfMatrices2[i]) << endl;
rectangle(image, Point(centersV2[i].x -
0.66*listOfMatrices[maxbleu].size().width, centersV2[i].y -
0.66*listOfMatrices[maxbleu].size().height), Point(centersV2[i].x +
0.66*listOfMatrices[maxbleu].size().width, centersV2[i].y +
0.66*listOfMatrices[maxbleu].size().height), Scalar(0, 255, 0), 4, 8,
0);
//ellipse(image, centersV2[i],
Size(0.66*listOfMatrices[maxbleu].size().width,
0.66*listOfMatrices[maxbleu].size().height), 0, 0, 360, Scalar(0, 255,
0), 2, LINE_AA);
stringstream numero;
numero << i + 1;
putText(image, numero.str(), Point(centersV2[i].x - 15, centersV2[i].y +
5), 5, 2, Scalar(0, 0, 255), 4, 8, false);
}
}
int main(int /*argc*/, char** /*argv*/)
{
static const char* filename[] = { "E:/Zouhair Jimmouh-Colorimetrie/Example
Etudes/Exemple2/AS1606001A-008-R045-HP-01.jpg", 0};
vector<vector<Point> > Squares;
for (int i = 0; filename[i] != 0; i++)
{
Mat Image = imread(filename[i], 1);
if (Image.empty())
{
cout << "Couldn't load " << endl;
//continue;
}
Mat blackTOwhite;
findSquares(Image, Squares);
(drawSquares(Image, Squares)).copyTo(blackTOwhite);
//show image with detected patches
namedWindow("RECT", CV_WINDOW_NORMAL);
imshow("RECT", Image);
int c = waitKey();
if ((char)c == 27)
break;
}
return 0;
}
Any help guys is appreciated ! i've been struggling with this for days now.
You're mixing debug and release libraries in your linking settings.
From the comments to the question:
I linked both opencv_world300d.lib and opencv_world300.lib in Additional Dependencies.
You shouldn't link to both of them. Link to:
opencv_world300d.lib in Debug
opencv_world300.lib in Release

OpenCV: How to iterate each pixel in a specific area of an image

I have an Image(see below) that is marked by two parallel green lines. This image is read in a OpenCV Mat in C++, the slope of green lines and their distances to the image center are already known.
Now I want to iterate all the pixels in the area between these two green lines. How can i solve this problem? It will be very helpful, if somebody can give me a code example.
Many thanks.
The formula for a slope is the following:
y = mx + b
Since you have two of them you should have two slope formulas:
y1 = m1x1 + b1
y2 = m2x2 + b2
m1, m2, b1, b2 should be known.
All you have to do is start at y1 = 0 and y2 = 0, and iterate between x1 to x2 for each y1 = y2 from top to bottom.
Example code:
for (int y = 0; y < imageHeight; ++y)
{
int x1 = (y - b1) / m1;
int x2 = (y - b2) / m2;
for (int x = x1; x < x2; ++x)
{
// Do something.
}
}
you could use LineIterator
take a look at the sample code below
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main( int, char** argv )
{
Mat src;
src = imread( argv[1] );
if( src.empty() )
{
return -1;
}
Point pt1 = Point( (src.cols / 5) + (src.cols / 8), src.rows);
Point pt2 = Point( (src.cols ) - (src.cols / 8), 0);
LineIterator it(src, pt1, pt2, 8);
for(int y = 0; y < it.count; y++, ++it)
{
Point it_pos = it.pos();
for(int x = it_pos.x; x < it_pos.x+(src.cols / 5) & x < src.cols; x++)
{
Vec3b & pixel = src.at<Vec3b>(it_pos.y,x);
pixel = pixel * 1.3;
pixel[0] = 0;
}
}
imshow("result", src );
waitKey(0);
return 0;
}
result images (look edit history to understand changes well):

Setting transparency in OpenCV not working

I am setting transparency for a four channeled Mat like this (Based on some calculations). But when I am showing the image the on window there is no change happening for the Image. Any help would be a great support.
void feather_touch(Rect enclosingRect, Mat frame){
Point center(frame.size().width * 0.5, frame.size().height * 0.5);
int inclussive_circle_radius = (sqrt((frame.cols * frame.cols + frame.rows * frame.rows))) / 2;
for(int i = 0; i < frame.rows; i++){
for(int j = 0; j < frame.cols; j++){
Point point(i, j);
if(!inRect(point, enclosingRect)){
Vec4b channels = frame.at<Vec4b>(i, j);
int dx = center.x - point.x;
int dy = center.y - point.y;
int dist = sqrt((dx * dx) + (dy * dy));
float alpha = (float)dist/(float)inclussive_circle_radius;
int a = (int)((1 - alpha) * 255);
frame.at<Vec4b>(i, j)[3] = a;
}
}
}
}
bool inRect(cv::Point p,Rect rect) {
return p.x >= rect.x && p.x <= (rect.x + rect.width) && p.y >= rect.y && p.y <= (rect.y + rect.height);
}
I got the answer though: imshow in OpenCV doesn't support transparency.I replaced it by using addWeighted functionality. Now my function look like this:
float alpha = ((float)dist/(float)inclussive_circle_radius);
//int a = (int)((1 - alpha) * 255);
//frame.at<Vec4b>(i, j)[3] = a;
Rect rect(j, i, 1, 1);
Mat mat = frame(rect);
Mat sub = layer(rect);
if(dist > (enclosingRect.width*0.5)){
addWeighted(mat, alpha, sub, 1 - alpha, 0, mat);
mat.copyTo(frame(rect));
}else{
sub.copyTo(frame(rect));
}