Hough Circles, and an overly complicated solution - c++

So I've been working on recognizing a yoga ball with Hough Circles. Now, when converted to grayscale, it works straight away. Unfortunately, I have to take a more complicated procedure due to there being multiple of these coloured balls and only wanted to detect the blue.
Unfiltered ball:
Filtered ball:
Steps of my algorithm:
convert from BGR to HSV
blur the image
filter HSV for only select values (in my case dark blue to light blue due to lighting)
invert the image
use morphology to fill in the part that was lighted
blur again
filter the blur so it's a solid shape instead of unrecognisable blurry grayscale
detect with hough-circles. The MAT is still Grayscale so that isn't the problem.
Code:
#include <iostream>
#include <string>
#include <iomanip>
#include <sstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
// Morphology stuff
Mat element5(30, 30, CV_8U, Scalar(1));
int morph_elem = 1; // 2
int morph_size = 33;// 30
int morph_operator = 2; // 2
Mat element = getStructuringElement(morph_elem, Size(2 * morph_size + 1, 2 * morph_size + 1), Point(morph_size, morph_size));
int const max_operator = 4;
int const max_elem = 2;
int const max_kernel_size = 21;
Mat kernel;
// Display Windows Name
namedWindow("Testing Purposes", CV_WINDOW_AUTOSIZE);
Mat src; // loaded image
Mat hsv; // changed src into HSV
Mat Filtered; // filtered w/ inRange for blue ball
Mat Gray; // gray filter for src
Mat dst; // destination for canny edge
Mat detected_edges; // matrix of edges w/ canny
// thresholds for canny
int edgeThresh = 45;
int lowThreshold;
int const max_lowThreshold = 100;
src = imread(argv[1]);
cvtColor(src, Gray, CV_BGR2GRAY);
cvtColor(src, hsv, CV_BGR2HSV);
/*
// CannyEdge Testing
blur(Gray, detected_edges, Size(3, 3)); // blur the grayimage
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold * ratio, kernel_size);
dst = Scalar::all(0);
src.copyTo( dst, detected_edges);
imshow(window_name,dst);
*/
// hsv blur and then thresholds
blur(hsv,hsv,Size(4, 4), Point(-1, -1));
inRange(hsv, Scalar(100, 100, 0), Scalar(200, 200, 255), Filtered); //filtering after blur
vector<Vec3f> circles; //vector for holding info on circles
// houghcircles - attempts to detect circles in the Filtered image we passed it
// morphology defintion for Kernel
bitwise_not(Filtered, Filtered);
// imwrite("/home/bjacobs/Desktop/Testing.jpg", Filtered);
imwrite("/home/bjacobs/Desktop/Testingg.jpg", Filtered);
morphologyEx(Filtered, dst, MORPH_OPEN, element);
blur(dst, dst, Size(20, 20), Point(-1, -1));
Mat baw = dst > 128;
HoughCircles(baw ,circles, CV_HOUGH_GRADIENT, 1, baw.rows/8,200,100,0,0);
imwrite("/home/bjacobs/Desktop/Testing.jpg", baw);
// Draw the circles detected onto the SRC file
for(size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][3]));
int radius = cvRound(circles[i][2]);
// circle center
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);
// circle outline
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);
}
imwrite("/home/bjacobs/Desktop/Test.jpg", hsv);
imshow("Testing Purposes", src);
waitKey(0);
}
I've already read as much as I possibly could online on this matter, and nothing I've found so far has helped. Forgive the sloppy commenting, and there are some failed algorithms included with using Canny Edge detection, so don't pay too much mind to them. Does anyone know of a solution to this detection issue?

Instead of using houghcircle you can do the following.
Segment the blue color.
Find contours(largest).
Minimum enclosing circle for contour.

Related

How to Enlarge each rectangle (boundingbox) from findcontour Opencv C++

I have a program to detect an Image using bounding box. But, somehow only the center of the image (which have a high saturation) can detect. I want to enlarge the bounding box so the object with low saturation can be detect too.Here is the image image result
The red rectangle is the result of boundingbox from findcontour. I want the rectangle like the blue one, so the large object can be detected. Here is my code:
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(){
Mat frame = imread("32.jpg", 1);
Mat mask(frame.size(), CV_8UC1, Scalar::all(0));
Mat hsv;
int a = 5;
cv::cvtColor(frame, hsv, CV_BGR2HSV);
std::vector<cv::Mat> channels;
split(hsv, channels);
cv::Mat H = channels[0];
cv::Mat S = channels[1];
cv::Mat V = channels[2];
Mat th;
threshold(S, th, 90, 255, THRESH_OTSU);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Find contours
findContours(th, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE, Point(0, 0));
vector<Rect> rekt(contours.size());
/// Draw contours
Mat drawing = Mat::zeros(th.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
if (cv::contourArea(contours[i]) < 500) continue;
rekt[i] = boundingRect(Mat(contours[i]));
rectangle(mask, rekt[i].tl(), rekt[i].br(), Scalar(0, 0, 255), 2, 8, 0);
}
imshow("ori", frame);
imshow("olaaa", resImage);
waitKey(0);
}
I have tried the other code such as here and this here but the image doesn't show the rectangle and only show black background.
Thankyou.

How to detect triangle accurately in OpenCv using C++

I am detecting red triangle objects in real time video using OpenCv c++. But my program is not working properly. Whether I cannot use findContours or approxPolyDP not much well. Here is my code
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <raspicam/raspicam_cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
int thresh = 100;
int number_pixels;
void drawAllTriangles(Mat&, const vector< vector<Point> >&);
int main(){
//Vectors
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat canny_output,drawing;
//Initialise the image as a matrix container
Mat bgr;
raspicam::RaspiCam_Cv capture; // initialise the raspicam object
capture.open(); // activate the raspicam object
while (1)
{
capture.grab(); //grab the scene using raspicam
capture.retrieve(bgr); // retrieve the captured scene as an image and store it in matrix container
Mat gray,hsv; //Initialise the matrix container for gray color image
resize(bgr, bgr, Size(), .25, 0.25, CV_INTER_AREA);
//cvtColor(bgr, gray, COLOR_BGR2GRAY); //OpenCV code line for converting COLOR to GRAY scale image
//
cvtColor(bgr, hsv, COLOR_BGR2HSV);
//Gaussian Noice
Mat blure;
GaussianBlur(hsv,blure,Size(9,9),1.0);
//Gaussian Noice End
Mat mask1, mask2;
inRange(blure, Scalar(0,70,50), Scalar(10, 255, 255), mask1);
inRange(blure, Scalar(170, 70, 50), Scalar(180, 255, 255), mask2);
Mat mask3 = mask1 | mask2;
//Mat mask3 = mask1;
//
number_pixels = countNonZero(mask3);
Canny(mask3, canny_output, thresh, thresh*2, 3 );
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
imshow("Original frame", bgr); //displaying original frame
imshow("Gray frame", mask3); //displaying original frame
drawAllTriangles(drawing,contours);
imshow("Triangles",drawing);
if (cvWaitKey(20) == 'q') // waitkey
break;
}
capture.release(); // release the raspicam frame grabbing
return 0;
}
void drawAllTriangles(Mat& img, const vector< vector<Point> >& contours){
vector<Point> approxTriangle;
for(size_t i = 0; i < contours.size(); i++){
approxPolyDP(contours[i], approxTriangle, arcLength(Mat(contours[i]), true)*0.05, true);
if(approxTriangle.size() == 3 and number_pixels>200){
drawContours(img, contours, i, Scalar(0, 255, 255), CV_FILLED); // fill GREEN
vector<Point>::iterator vertex;
for(vertex = approxTriangle.begin(); vertex != approxTriangle.end(); ++vertex){
circle(img, *vertex, 3, Scalar(0, 0, 255), 1);
}
printf("Triangle \n");
}
else {
printf("None \n");
}
}
}
My program should detect any red triangle shaped objects in video. Where I am going wrong. Thank you for answer
THese are the images, but it cannot detect triangles and confusing.

Using Opencv and Hough Transform circle to detect circles (subscript error)

As right now is my school holiday, I decided to pick up some skills thus I'm attempting to learn how to use OpenCV features with visual studio c++ to detect how many cans is in the carton and had to group it 4 by 4.
I have tried various demo codes such as " opencv find:contour " , Template matching(doesn't work well as it cannot detect the rotation of the top lid)
The best method that I found out is that to combine Canny Edge Detection and Hough Transform Circle such that the output result of Canny Edge Detection can be the input image of the Hough Transform Circle,the result is as below.
Unfortunately, not all circles is detected and if i change the
for (int i = 0; i < circles.size(); i++) into
for (int i = 0; i < 24; i++) // 24 is the no. of cans
I will get a Expression: vector subscript out of range. I am not sure why it is only able to detect 21 circles
Source code as below:-
using namespace cv;
using namespace std;
Mat src, src_gray;
int main()
{
Mat src1;
src1 = imread("cans.jpg", CV_LOAD_IMAGE_COLOR);
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", src1);
Mat gray, edge, draw;
cvtColor(src1, gray, CV_BGR2GRAY);
Canny(gray, edge,50, 150, 3);
//50,150,3
edge.convertTo(draw, CV_8U);
namedWindow("Canny Edge", CV_WINDOW_AUTOSIZE);
imshow("Canny Edge", draw);
imwrite("output.jpg", draw);
waitKey(500);
/// Read the image
src = imread("output.jpg", 1);
Size size(932, 558);//the dst image size,e.g.100x100
resize(src, src, size);//resize image
/// Convert it to gray
cvtColor(src, src_gray, CV_BGR2GRAY);
/// Reduce the noise so we avoid false circle detection
GaussianBlur(src_gray, src_gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
/// Apply the Hough Transform to find the circles
HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, src_gray.rows / 8,200, 100, 0, 0);
/// Draw the circles detected
for (int i = 0; i < circles.size(); i++)
{
printf("are you um?\n");
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);
// circle outline
circle(src, center, radius, Scalar(255, 0, 255), 3, 8, 0);
}
// namedWindow("Hough Circle Transform Demo", CV_WINDOW_NORMAL);
imshow("Hough Circle Transform Demo", src);
line(src, Point(0, 288), Point(1024, 288), Scalar(225, 220, 225), 2, 8);
// middle line
line(src, Point(360, 0), Point(360, 576), Scalar(225, 220, 225), 2, 8);
//break cans into 4 by 4
line(src, Point(600, 0), Point(600, 576), Scalar(225, 220, 225), 2, 8);
// x, y
imshow("Lines", src);
imwrite("lineoutput.jpg", src);
waitKey(0);
return 0;
}
I had also manually typed out the coordinates for the lines to group them into 4 x 4.
What should I change in order for it not to have any subscript out of range error and able to detect all circles?
Okay solved my own question. Changed CV_BGR2GRAY to CV_RGB2GRAY,made the file ratio smaller, changing the circles min Radius and applying another threshold to get the circles.

OpenCV copy bounded text area to new image

I am new to OpenCV and I am using this code to bound the text area in image. After that I am filtering contours and putting the bounded rectangle to a vector<Rect> to copy these to new image.
Mat large = img1;
Mat rgb;
// downsample and use it for processing
pyrUp(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(2, 2));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
//morphKernel = getStructuringElement(MORPH_RECT, Size(7, 1));
//morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
connected = bw;
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
Mat mask2;
Mat mask3;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/*drawContours(mask2, contours, -1, Scalar(255), CV_FILLED);
Mat Crop(img1.rows, img1.cols, CV_8UC3);
Crop.setTo(Scalar(0, 255, 0));
img1.copyTo(Crop, mask2);
normalize(mask2.clone(), mask2, 0.0, 255.0, CV_MINMAX, CV_8UC1);
*/
vector<Rect> rect1;
int i = 0;
//filter contours
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width*rect.height);
if (r > .45 /* assume at least 45% of the area is filled if it contains text */
&&
(rect.height > 10 && rect.width > 10 && rect.height<150 && rect.width<150) /* constraints on region size */
/* these two conditions alone are not very robust. better to use something
like the number of significant peaks in a horizontal projection as a third condition */
)
{
//making rectangles on bounded area
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
//pushing bounding rectangles in vector for new mask
rect1.push_back(rect);
}
}
Input output I am getting after bounded text ares is:
After that I am using this code to copy the bounded area only to new mask
//copying bounded rectangles area from small to new mask2
for (int i = 0; i < rect1.size(); i++){
mask2 = rgb(rect1[i]);
}
but by using this I only get this last bounded text area:
How can I get or update the mask2 rows or cols to get all the mapping of bounded text areas from rgb to mask2.
That's because mask2 will be equal to the last rgb(rect1[i]) called.
You can easily solve this in two ways (using copyTo):
Create a mask (black initialized, same size as input image), where you draw (white) rectangles. Then you copy the original image to a black initialized image of the same size, using the obtained mask.
Copy each sub-image directly to a black initialized image.
Starting from this image, where the red rectangles will be your detected rectangles:
With first approach you'll get a mask like:
and, for both approaches, the final result will be:
Code for first approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Mask for rectangles (black initializeds)
Mat1b mask(img.rows, img.cols, uchar(0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
// Draw white rectangles on mask
rectangle(mask, rects[i], Scalar(255), CV_FILLED);
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0,0,0));
img.copyTo(result, mask);
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}
Code for second approach:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat3b img = imread("path_to_image");
// Your rectangles
vector<Rect> rects{Rect(100, 100, 100, 200), Rect(300, 200, 200, 100), Rect(500, 400, 80, 130)};
// Black initizlied result
Mat3b result(img.rows, img.cols, Vec3b(0, 0, 0));
Mat3b dbgRects = img.clone();
for (int i = 0; i < rects.size(); ++i)
{
img(rects[i]).copyTo(result(rects[i]));
// Show rectangles
rectangle(dbgRects, rects[i], Scalar(0, 0, 255), 2);
}
imshow("Rectangles", dbgRects);
imshow("Result", result);
waitKey();
return 0;
}

How to draw contours of each segmented object

I apply watershed segmentation to detect touching objects and it works okay doing that. Now, I would like to draw contours of each object, so I can get their length, area, moments etc.. But the objects in the result of the segmentation are still touching. So, I fail to draw contours of each one. How can I draw contours of each object?
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("source.png");
// Create binary image from source image
Mat srcGray;
cvtColor(src, srcGray, CV_BGR2GRAY);
Mat srcThresh;
threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Perform the distance transform algorithm
Mat dist;
distanceTransform(srcThresh, dist, CV_DIST_L2, 3);
// Normalize the distance image for range = {0.0, 1.0}
normalize(dist, dist, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
threshold(dist, dist, 0.1, 3.5, CV_THRESH_BINARY);
// Create the CV_8U version of the distance image
Mat dist_8u;
dist.convertTo(dist_8u, CV_8U);
// Find total markers
std::vector<std::vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int ncomp = contours.size();
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist.size(), CV_32SC1);
// Draw the foreground markers
for (int i = 0; i < ncomp; i++)
drawContours(markers, contours, i, Scalar::all(i + 1), -1);
// Draw the background marker
circle(markers, Point(5, 5), 3, CV_RGB(255, 255, 255), -1);
// Perform the watershed algorithm
watershed(src, markers);
Mat wgResult = (markers.clone()) * 10000;
imshow("Watershed", wgResult);
waitKey(0);
return 0;
}
Source image:
Watershed Result:
The markers matrix returned by watershed contains the indices of the segmented regions, according to the seed. So each component will have the same seed value. You can then create a binary matrix for each seed like:
Mat1b mask = (markers == seed);
Once you have the binary mask for each component, you can easily compute its area, moments, etc...
Code:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("D:\\SO\\img\\postit.png");
// Create binary image from source image
Mat srcGray;
cvtColor(src, srcGray, CV_BGR2GRAY);
Mat srcThresh;
threshold(srcGray, srcThresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Perform the distance transform algorithm
Mat dist;
distanceTransform(srcThresh, dist, CV_DIST_L2, 3);
// Normalize the distance image for range = {0.0, 1.0}
normalize(dist, dist, 0, 1., NORM_MINMAX);
// Threshold to obtain the peaks
threshold(dist, dist, 0.1, 3.5, CV_THRESH_BINARY);
// Create the CV_8U version of the distance image
Mat dist_8u;
dist.convertTo(dist_8u, CV_8U);
// Find total markers
std::vector<std::vector<Point> > contours;
findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int ncomp = contours.size();
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist.size(), CV_32SC1);
// Draw the foreground markers
for (int i = 0; i < ncomp; i++)
drawContours(markers, contours, i, Scalar::all(i + 1), -1);
// Draw the background marker
circle(markers, Point(5, 5), 3, CV_RGB(255, 255, 255), -1);
// Perform the watershed algorithm
watershed(src, markers);
for (int seed = 1; seed <= ncomp; ++seed)
{
Mat1b mask = (markers == seed);
// Now you have the mask, you can compute your statistics
imshow("Mask", mask);
waitKey();
}
return 0;
}
There are many way to do this. Depending on the current image that have been shown, you can simply do erosion and dilation operation on order to separate them. However this will not work if the elapse area is bigger.
You need a closing operation:
http://docs.opencv.org/2.4/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.html
threshold it.
apply closing operation.
get contours