how to detect the pallet in fork lift using find contours - c++

Im trying to detect the pallet in forklift. but find contours cant detect the rectangle in a correct way.
how can I detect the large pallet.
I have tried hough transform but it fails of detecting the forklift rectangle, so I'm using findcontours instead.
pallet
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
int main()
{
cv::Mat input = cv::imread("pallet.jpg");
// convert to grayscale (you could load as grayscale instead)
cv::Mat gray;
cv::cvtColor(input,gray, CV_BGR2GRAY);
// compute mask (you could use a simple threshold if the image is always as good as the one you provided)
cv::Mat mask;
cv::threshold(gray, mask, 0, 255, CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
// find contours (if always so easy to segment as your image, you could just add the black/rect pixels to a vector)
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask,contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
/// Draw contours and find biggest contour (if there are other contours in the image, we assume the biggest one is the desired rect)
// drawing here is only for demonstration!
int biggestContourIdx = -1;
float biggestContourArea = 0;
cv::Mat drawing = cv::Mat::zeros( mask.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
cv::Scalar color = cv::Scalar(0, 100, 0);
drawContours( drawing, contours, i, color, 1, 8, hierarchy, 0, cv::Point() );
float ctArea= cv::contourArea(contours[i]);
if(ctArea > biggestContourArea)
{
biggestContourArea = ctArea;
biggestContourIdx = i;
}
}
// if no contour found
if(biggestContourIdx < 0)
{
std::cout << "no contour found" << std::endl;
return 1;
}
// compute the rotated bounding rect of the biggest contour! (this is the part that does what you want/need)
cv::RotatedRect boundingBox = cv::minAreaRect(contours[biggestContourIdx]);
// one thing to remark: this will compute the OUTER boundary box, so maybe you have to erode/dilate if you want something between the ragged lines
// draw the rotated rect
cv::Point2f corners[4];
boundingBox.points(corners);
cv::line(drawing, corners[0], corners[1], cv::Scalar(255,255,255));
cv::line(drawing, corners[1], corners[2], cv::Scalar(255,255,255));
cv::line(drawing, corners[2], corners[3], cv::Scalar(255,255,255));
cv::line(drawing, corners[3], corners[0], cv::Scalar(255,255,255));
// display
cv::imshow("input", input);
cv::imshow("drawing", drawing);
cv::waitKey(0);
cv::imwrite("rotatedRect.png",drawing);
return 0;
}

Related

Draw mat from contour?

I use openCV to recognize contours. Now I want to create a new binary mat containing all coordinates of this contour.
Canny edge detection applied
found contour's (red one is the one I'd like to use)
just coordinates inside contour are drawn into new mat
This is what I've got so far:
vector<cv::Point> contour; // red marked contour;
cv::Rect boundingBox = cv::boundingRect(contour);
Mat newMat;
vector<cv::Point> insideContour;
for (int i=0; i<contour.size(); i++) {
// get all coordinates inside of contour
// insideContour.push_back(?)
}
for (int y=0; y<boundingBox.height; y++) {
for (int x=0; x<boundingBox.width; x++) {
// newMat
}
}
Any help how to go on would be really appreciated because I'm absolutely clueless.
Try this. For simplicity cv::Point(250, 219) is a point inside the red contour, use Haar to find bounding box and it's center in reality.
cv::Mat image = imread("Smiley.jpg");
cv::Mat image2 = imread("Smiley2.jpg");
// subtract images and floodfill to prepare red mask
Mat red_contour, red_mask, maskMat, outputMat;
subtract(image2, image, red_contour);
threshold(red_contour, red_mask, 100, 255, THRESH_BINARY);
int filling = cv::floodFill(red_mask, cv::Point(250, 219), cv::Scalar(0, 0, 255), (cv::Rect*)0, cv::Scalar(), cv::Scalar(), 4);
//prepare a grey mask
cv::cvtColor(red_mask, maskMat, CV_BGR2GRAY);
threshold(maskMat, maskMat, 0, 255, THRESH_BINARY);
// use mask to crop original image
image.copyTo(outputMat, maskMat);
cv::namedWindow("Image");
cv::imshow("Image", outputMat);
cv::waitKey();
return 0;

detect fingers in hand

I have a image
this image is output of threshold function.
I want to detect the specified object as above and measure their heights.
My idea is extracting contours and use convex hull but my result not correct.
Have anyone idea for this problem?
regards.
Sys : Win7(64bit),OpenCV 3.1,Visual Studio 2015
my output :
here is my code:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void*);
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("C:/Users/Amin/Desktop/binary.jpg", 1);
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void*)
{
Mat src_copy = src.clone();
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
}
/// Draw contours + hull results
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point());
drawContours(drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point());
}
/// Show in a window
namedWindow("Hull demo", CV_WINDOW_AUTOSIZE);
imshow("Hull demo", drawing);
}
I achieved this result but don't know how to measure AB?
A is on the contour and B is known.
thanks
To measure heights (I think thats what you are asking?) there is little point in using Convex hull - considering this messy output. Instead, I would loop through each contour (1,2,3,4 and 5) and all their points, and detect the widths and heights separately by analysing the X,Y differences. so, while you loop through lets say contour 1, if Y is increasing but not X, you can assume it is the height .. so count each point. Then, when X starts to increase, assume the height has come to an end. You will want to set a tolerance as the lines are not perfectly straight. Hope this helps.

Detect rectangles drawn on an background image using OpenCV

I’m trying to detect some rectangles (white colored) which is drawn on an image. (say using paint or some other image editing tool).
As I’m very much beginner to image processing I searched through net and OpenCV sample program to accomplish the job, but could not get it to working perfectly. I’m using OpenCV C++ library.
Algorithm that I’ve tried
cv::Mat src = cv::imread(argv[1]);
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
meanStdDev(gray, mu, sigma);
cv::Mat bw;
cv::Canny(gray, bw, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0]);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
Rect boundRect = boundingRect( Mat(approx) );
rectangle( dst, boundRect.tl(), boundRect.br(), Scalar(255,255,255), 1, 8, 0 );}
Only one rectangle is detected. Can you please guide me or some link for the same.
Input image:
Output image:
I could not compile your code sample because there boundRect is declared within the if-block but rectangle drawing (trying to access boundRect) is outside of the if-block, so I adjusted your code:
int main(int argc, char* argv[])
{
cv::Mat src = cv::imread("C:/StackOverflow/Input/rectangles.png");
cv::Mat dst = src.clone();
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
// ADDED: missing declaration of mu and sigma
cv::Scalar mu, sigma;
meanStdDev(gray, mu, sigma);
cv::Mat bw;
cv::Canny(gray, bw, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0]);
// ADDED: displaying the canny output
cv::imshow("canny", bw);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
{
// ADDED: brackets around both lines belonging to the if-block
cv::Rect boundRect = cv::boundingRect(cv::Mat(approx));
cv::rectangle(dst, boundRect.tl(), boundRect.br(), cv::Scalar(255, 255, 255), 3, 8, 0);
}
}
// ADDED: displaying input and results
cv::imshow("input", src);
cv::imshow("dst", dst);
cv::imwrite("C:/StackOverflow/Output/rectangles.png", dst);
cv::waitKey(0);
return 0;
}
with your input image I do get this output:
which is probably not what you expected. See the canny output image (it is always good to have a look at intermediate results for visual debugging!), there are just too many structures in the image and contours will cover all of these, so there are some that will be approximated to polynomes with 4 to 6 elements.
Instead you'll have to become a bit smarter. You could try to extract straight lines with cv::HoughLinesP and connect those lines. Or you could try to segment the image first by finding white areas (if your rectangles are always white).
int main(int argc, char* argv[])
{
cv::Mat src = cv::imread("C:/StackOverflow/Input/rectangles.png");
cv::Mat dst = src.clone();
cv::Mat gray;
cv::cvtColor(src, gray, CV_BGR2GRAY);
cv::Mat mask;
// find "white" pixel
cv::inRange(src, cv::Scalar(230, 230, 230), cv::Scalar(255, 255, 255), mask);
cv::imshow("mask", mask);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(mask, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
std::vector<cv::Point> approx;
for (int i = 0; i < contours.size(); i++){
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
if (approx.size() >= 4 && approx.size() <= 6)
{
cv::Rect boundRect = cv::boundingRect(cv::Mat(approx));
cv::rectangle(dst, boundRect.tl(), boundRect.br(), cv::Scalar(255, 255, 255), 1, 8, 0);
}
}
cv::imshow("input", src);
cv::imshow("dst", dst);
cv::imwrite("C:/StackOverflow/Output/rectangles2.png", dst);
cv::waitKey(0);
return 0;
}
gives this result:
As you can see, there are other bright regions near white, too. The polynom approximation does not help much, too.
In general, it's easier to segment a color (even white) in HSV space. With appropriate thresholds:
inRange(hsv, Scalar(0, 0, 220), Scalar(180, 30, 255), mask);
where we don't care about the Hue, and keep only low Saturation and high Value, I get:
Then you can easily find connected components, and discard blobs smaller than a threshold th_blob_size. Resulting rectangles are (in green):
You can eventually apply other filtering stage to account for more difficult situations, but for this image removing small blobs is enough. Please post other images if you need something more robust in general.
Code:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat3b img = imread("path_to_image");
int th_blob_size = 100;
Mat3b hsv;
cvtColor(img, hsv, COLOR_BGR2HSV);
Mat1b mask;
inRange(hsv, Scalar(0, 0, 220), Scalar(180, 30, 255), mask);
vector<vector<Point>> contours;
findContours(mask.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
Mat3b res = img.clone();
for (int i = 0; i < contours.size(); ++i)
{
// Remove small blobs
if (contours[i].size() < th_blob_size)
{
continue;
}
Rect box = boundingRect(contours[i]);
rectangle(res, box, Scalar(0,255,0), 1);
}
imshow("Result", res);
waitKey();
return 0;
}
Are you sure you are only finding one contour or are you only drawing one contour? It doesn't look like you are looping in the drawing routine so you will only ever draw the first one that is found.
I have a blog, long since dead, that may provide you some good direction on this: http://workingwithcomputervision.blogspot.co.uk/2012/09/game-player-step-2-finding-game-board.html
Should the link die I believe this is the most relevant part of the article which relates to drawing contours:
//Draw contours
for (int i = 0; i < contours.size(); i++) {
Scalar color = Scalar(0, 255, 0);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
I notice you are using bounding rectangles for the drawing. Here is an alternative drawing routine, again from the above link, that does this:
Rect bounds;
Mat drawing = Mat::zeros(purpleOnly.size(), CV_8UC3);
int j = 0;
for (int i = 0; i < contours.size(); i++) {
if (arcLength(contours[i], true) > 500){
Rect temp = boundingRect(contours[i]);
rectangle(drawing, temp, Scalar(255, 0, 0), 2, 8);
if (j == 0) {
bounds = temp;
} else {
bounds = bounds | temp;
}
j++;
}
}
Note that I also do some checks on the size of the contour to filter out noise.

OpenCV copy bounded text area to new image

I am new to OpenCV and I am using this code to bound the text area in image. After that I am filtering contours and putting the bounded rectangle to a vector<Rect> to copy these to new image.
Mat large = img1;
Mat rgb;
// downsample and use it for processing
pyrUp(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(2, 2));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
//morphKernel = getStructuringElement(MORPH_RECT, Size(7, 1));
//morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
connected = bw;
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
Mat mask2;
Mat mask3;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/*drawContours(mask2, contours, -1, Scalar(255), CV_FILLED);
Mat Crop(img1.rows, img1.cols, CV_8UC3);
Crop.setTo(Scalar(0, 255, 0));
img1.copyTo(Crop, mask2);
normalize(mask2.clone(), mask2, 0.0, 255.0, CV_MINMAX, CV_8UC1);
*/
vector<Rect> rect1;
int i = 0;
//filter contours
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width*rect.height);
if (r > .45 /* assume at least 45% of the area is filled if it contains text */
&&
(rect.height > 10 && rect.width > 10 && rect.height<150 && rect.width<150) /* constraints on region size */
/* these two conditions alone are not very robust. better to use something
like the number of significant peaks in a horizontal projection as a third condition */
)
{
//making rectangles on bounded area
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
//pushing bounding rectangles in vector for new mask
rect1.push_back(rect);
}
}
Input output I am getting after bounded text ares is:
After that I am using this code to copy the bounded area only to new mask
//copying bounded rectangles area from small to new mask2
for (int i = 0; i < rect1.size(); i++){
mask2 = rgb(rect1[i]);
}
but by using this I only get this last bounded text area:
How can I get or update the mask2 rows or cols to get all the mapping of bounded text areas from rgb to mask2.
That's because mask2 will be equal to the last rgb(rect1[i]) called.
You can easily solve this in two ways (using copyTo):
Create a mask (black initialized, same size as input image), where you draw (white) rectangles. Then you copy the original image to a black initialized image of the same size, using the obtained mask.
Copy each sub-image directly to a black initialized image.
Starting from this image, where the red rectangles will be your detected rectangles:
With first approach you'll get a mask like:
and, for both approaches, the final result will be:
Code for first approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Mask for rectangles (black initializeds)
Mat1b mask(img.rows, img.cols, uchar(0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
// Draw white rectangles on mask
rectangle(mask, rects[i], Scalar(255), CV_FILLED);
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0,0,0));
img.copyTo(result, mask);
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
Code for second approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0, 0, 0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
img(rects[i]).copyTo(result(rects[i]));
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}

How to draw contours of each segmented object

I apply watershed segmentation to detect touching objects and it works okay doing that. Now, I would like to draw contours of each object, so I can get their length, area, moments etc.. But the objects in the result of the segmentation are still touching. So, I fail to draw contours of each one. How can I draw contours of each object?
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("source.png");
// Create binary image from source image
Mat srcGray;
cvtColor(src, srcGray, CV_BGR2GRAY);
Mat srcThresh;
threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Perform the distance transform algorithm
Mat dist;
distanceTransform(srcThresh, dist, CV_DIST_L2, 3);
// Normalize the distance image for range = {0.0, 1.0}
normalize(dist, dist, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
threshold(dist, dist, 0.1, 3.5, CV_THRESH_BINARY);
// Create the CV_8U version of the distance image
Mat dist_8u;
dist.convertTo(dist_8u, CV_8U);
// Find total markers
std::vector<std::vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int ncomp = contours.size();
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist.size(), CV_32SC1);
// Draw the foreground markers
for (int i = 0; i < ncomp; i++)
drawContours(markers, contours, i, Scalar::all(i + 1), -1);
// Draw the background marker
circle(markers, Point(5, 5), 3, CV_RGB(255, 255, 255), -1);
// Perform the watershed algorithm
watershed(src, markers);
Mat wgResult = (markers.clone()) * 10000;
imshow("Watershed", wgResult);
waitKey(0);
return 0;
}
Source image:
Watershed Result:
The markers matrix returned by watershed contains the indices of the segmented regions, according to the seed. So each component will have the same seed value. You can then create a binary matrix for each seed like:
Mat1b mask = (markers == seed);
Once you have the binary mask for each component, you can easily compute its area, moments, etc...
Code:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("D:\\SO\\img\\postit.png");
// Create binary image from source image
Mat srcGray;
cvtColor(src, srcGray, CV_BGR2GRAY);
Mat srcThresh;
threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Perform the distance transform algorithm
Mat dist;
distanceTransform(srcThresh, dist, CV_DIST_L2, 3);
// Normalize the distance image for range = {0.0, 1.0}
normalize(dist, dist, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
threshold(dist, dist, 0.1, 3.5, CV_THRESH_BINARY);
// Create the CV_8U version of the distance image
Mat dist_8u;
dist.convertTo(dist_8u, CV_8U);
// Find total markers
std::vector<std::vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int ncomp = contours.size();
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist.size(), CV_32SC1);
// Draw the foreground markers
for (int i = 0; i < ncomp; i++)
drawContours(markers, contours, i, Scalar::all(i + 1), -1);
// Draw the background marker
circle(markers, Point(5, 5), 3, CV_RGB(255, 255, 255), -1);
// Perform the watershed algorithm
watershed(src, markers);
for (int seed = 1; seed <= ncomp; ++seed)
{
Mat1b mask = (markers == seed);
// Now you have the mask, you can compute your statistics
imshow("Mask", mask);
waitKey();
}
return 0;
}
There are many way to do this. Depending on the current image that have been shown, you can simply do erosion and dilation operation on order to separate them. However this will not work if the elapse area is bigger.
You need a closing operation:
http://docs.opencv.org/2.4/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.html
threshold it.
apply closing operation.
get contours